code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
from __future__ import annotations
from copy import deepcopy
from enum import Enum
from typing import Any, NamedTuple, Optional
import matplotlib.pyplot as plt
from skdecide import DeterministicPlanningDomain, Space, Value
from skdecide.builders.domain import Renderable, UnrestrictedActions
from skdecide.hub.space.gym import EnumSpace, ListSpace, MultiDiscreteSpace
DEFAULT_MAZE = """
+-+-+-+-+o+-+-+-+-+-+
| | | |
+ + + +-+-+-+ +-+ + +
| | | | | | | |
+ +-+-+ +-+ + + + +-+
| | | | | | |
+ + + + + + + +-+ +-+
| | | | | |
+-+-+-+-+-+-+-+ +-+ +
| | | |
+ +-+-+-+-+ + +-+-+ +
| | | |
+ + + +-+ +-+ +-+-+-+
| | | | | |
+ +-+-+ + +-+ + +-+ +
| | | | | | | |
+-+ +-+ + + + +-+ + +
| | | | | | |
+ +-+ +-+-+-+-+ + + +
| | | | |
+-+-+-+-+-+x+-+-+-+-+
"""
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions, Renderable):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class Maze(D):
def __init__(self, maze_str: str = DEFAULT_MAZE):
maze = []
for y, line in enumerate(maze_str.strip().split("\n")):
line = line.rstrip()
row = []
for x, c in enumerate(line):
if c in {" ", "o", "x"}:
row.append(1) # spaces are 1s
if c == "o":
self._start = State(x, y)
if c == "x":
self._goal = State(x, y)
else:
row.append(0) # walls are 0s
maze.append(row)
# self._render_maze = deepcopy(self._maze)
self._maze = maze
self._num_cols = len(maze[0])
self._num_rows = len(maze)
self._ax = None
self._image = None
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
if action == Action.left:
next_state = State(memory.x - 1, memory.y)
if action == Action.right:
next_state = State(memory.x + 1, memory.y)
if action == Action.up:
next_state = State(memory.x, memory.y - 1)
if action == Action.down:
next_state = State(memory.x, memory.y + 1)
# If candidate next state is valid
if (
0 <= next_state.x < self._num_cols
and 0 <= next_state.y < self._num_rows
and self._maze[next_state.y][next_state.x] == 1
):
return next_state
else:
return memory
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(
next_state.y - memory.y
) # every move costs 1
return Value(cost=cost)
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([self._goal])
def _get_initial_state_(self) -> D.T_state:
return self._start
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self._num_cols, self._num_rows])
def _render_from(self, memory: D.T_memory[D.T_state], **kwargs: Any) -> Any:
if self._ax is None:
# fig = plt.gcf()
fig, ax = plt.subplots(1)
# ax = plt.axes()
ax.set_aspect("equal") # set the x and y axes to the same scale
plt.xticks([]) # remove the tick marks by setting to an empty list
plt.yticks([]) # remove the tick marks by setting to an empty list
ax.invert_yaxis() # invert the y-axis so the first row of data is at the top
self._ax = ax
plt.ion()
maze = deepcopy(self._maze)
maze[self._goal.y][self._goal.x] = 0.7
maze[memory.y][memory.x] = 0.3
if self._image is None:
self._image = self._ax.imshow(maze)
else:
self._image.set_data(maze)
# self._ax.pcolormesh(maze)
# plt.draw()
plt.pause(0.001) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/domain/maze/maze.py | maze.py |
from __future__ import annotations
from copy import deepcopy
from enum import Enum
from typing import Any, NamedTuple, Optional
import matplotlib.pyplot as plt
from skdecide import DeterministicPlanningDomain, Space, Value
from skdecide.builders.domain import Renderable, UnrestrictedActions
from skdecide.hub.space.gym import EnumSpace, ListSpace, MultiDiscreteSpace
DEFAULT_MAZE = """
+-+-+-+-+o+-+-+-+-+-+
| | | |
+ + + +-+-+-+ +-+ + +
| | | | | | | |
+ +-+-+ +-+ + + + +-+
| | | | | | |
+ + + + + + + +-+ +-+
| | | | | |
+-+-+-+-+-+-+-+ +-+ +
| | | |
+ +-+-+-+-+ + +-+-+ +
| | | |
+ + + +-+ +-+ +-+-+-+
| | | | | |
+ +-+-+ + +-+ + +-+ +
| | | | | | | |
+-+ +-+ + + + +-+ + +
| | | | | | |
+ +-+ +-+-+-+-+ + + +
| | | | |
+-+-+-+-+-+x+-+-+-+-+
"""
class State(NamedTuple):
x: int
y: int
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions, Renderable):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_predicate = bool # Type of logical checks
T_info = (
None # Type of additional information given as part of an environment outcome
)
class Maze(D):
def __init__(self, maze_str: str = DEFAULT_MAZE):
maze = []
for y, line in enumerate(maze_str.strip().split("\n")):
line = line.rstrip()
row = []
for x, c in enumerate(line):
if c in {" ", "o", "x"}:
row.append(1) # spaces are 1s
if c == "o":
self._start = State(x, y)
if c == "x":
self._goal = State(x, y)
else:
row.append(0) # walls are 0s
maze.append(row)
# self._render_maze = deepcopy(self._maze)
self._maze = maze
self._num_cols = len(maze[0])
self._num_rows = len(maze)
self._ax = None
self._image = None
def _get_next_state(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
) -> D.T_state:
if action == Action.left:
next_state = State(memory.x - 1, memory.y)
if action == Action.right:
next_state = State(memory.x + 1, memory.y)
if action == Action.up:
next_state = State(memory.x, memory.y - 1)
if action == Action.down:
next_state = State(memory.x, memory.y + 1)
# If candidate next state is valid
if (
0 <= next_state.x < self._num_cols
and 0 <= next_state.y < self._num_rows
and self._maze[next_state.y][next_state.x] == 1
):
return next_state
else:
return memory
def _get_transition_value(
self,
memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None,
) -> D.T_agent[Value[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(
next_state.y - memory.y
) # every move costs 1
return Value(cost=cost)
def _is_terminal(self, state: D.T_state) -> D.T_agent[D.T_predicate]:
return self._is_goal(state)
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ListSpace([self._goal])
def _get_initial_state_(self) -> D.T_state:
return self._start
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self._num_cols, self._num_rows])
def _render_from(self, memory: D.T_memory[D.T_state], **kwargs: Any) -> Any:
if self._ax is None:
# fig = plt.gcf()
fig, ax = plt.subplots(1)
# ax = plt.axes()
ax.set_aspect("equal") # set the x and y axes to the same scale
plt.xticks([]) # remove the tick marks by setting to an empty list
plt.yticks([]) # remove the tick marks by setting to an empty list
ax.invert_yaxis() # invert the y-axis so the first row of data is at the top
self._ax = ax
plt.ion()
maze = deepcopy(self._maze)
maze[self._goal.y][self._goal.x] = 0.7
maze[memory.y][memory.x] = 0.3
if self._image is None:
self._image = self._ax.imshow(maze)
else:
self._image.set_data(maze)
# self._ax.pcolormesh(maze)
# plt.draw()
plt.pause(0.001) | 0.890065 | 0.342709 |
from __future__ import annotations
import os
import sys
from typing import Callable, Optional
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _AStarSolver_ as astar_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class Astar(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain] = None,
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = astar_solver(
domain=self.get_domain(),
goal_checker=lambda d, s: d.is_goal(s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 0, s),
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/astar/astar.py | astar.py |
from __future__ import annotations
import os
import sys
from typing import Callable, Optional
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _AStarSolver_ as astar_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class Astar(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain] = None,
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = astar_solver(
domain=self.get_domain(),
goal_checker=lambda d, s: d.is_goal(s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 0, s),
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | 0.541894 | 0.132543 |
import random as rnd
import sys
import numpy as np
class CGP:
class CGPFunc:
def __init__(self, f, name, arity):
self.function = f
self.name = name
self.arity = arity
class CGPNode:
def __init__(self, args, f):
self.args = args
self.function = f
def __init__(
self,
genome,
num_inputs,
num_outputs,
num_cols,
num_rows,
library,
recurrency_distance=1.0,
):
self.genome = genome.copy()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_cols = num_cols
self.num_rows = num_rows
self.max_graph_length = num_cols * num_rows
self.library = library
self.max_arity = 0
self.recurrency_distance = recurrency_distance
for f in self.library:
self.max_arity = np.maximum(self.max_arity, f.arity)
self.graph_created = False
def create_graph(self):
self.to_evaluate = np.zeros(self.max_graph_length, dtype=bool)
self.node_output = np.zeros(
self.max_graph_length + self.num_inputs, dtype=np.float64
)
self.nodes_used = []
self.output_genes = np.zeros(self.num_outputs, dtype=np.int)
self.nodes = np.empty(0, dtype=self.CGPNode)
for i in range(0, self.num_outputs):
self.output_genes[i] = self.genome[len(self.genome) - self.num_outputs + i]
i = 0
# building node list
while i < len(self.genome) - self.num_outputs:
f = self.genome[i]
args = np.empty(0, dtype=int)
for j in range(self.max_arity):
args = np.append(args, self.genome[i + j + 1])
i += self.max_arity + 1
self.nodes = np.append(self.nodes, self.CGPNode(args, f))
self.node_to_evaluate()
self.graph_created = True
def node_to_evaluate(self):
p = 0
while p < self.num_outputs:
if self.output_genes[p] - self.num_inputs >= 0:
self.to_evaluate[self.output_genes[p] - self.num_inputs] = True
p = p + 1
p = self.max_graph_length - 1
while p >= 0:
if self.to_evaluate[p]:
for i in range(0, len(self.nodes[p].args)):
arg = self.nodes[p].args[i]
if arg - self.num_inputs >= 0:
self.to_evaluate[arg - self.num_inputs] = True
self.nodes_used.append(p)
p = p - 1
self.nodes_used = np.array(self.nodes_used)
def load_input_data(self, input_data):
for p in range(self.num_inputs):
self.node_output[p] = input_data[p]
def compute_graph(self):
self.node_output_old = self.node_output.copy()
p = len(self.nodes_used) - 1
while p >= 0:
args = np.zeros(self.max_arity)
for i in range(0, self.max_arity):
args[i] = self.node_output_old[self.nodes[self.nodes_used[p]].args[i]]
f = self.library[self.nodes[self.nodes_used[p]].function].function
self.node_output[self.nodes_used[p] + self.num_inputs] = f(args)
if (
self.node_output[self.nodes_used[p] + self.num_inputs]
!= self.node_output[self.nodes_used[p] + self.num_inputs]
):
print(
self.library[self.nodes[self.nodes_used[p]].function].name,
" returned NaN with ",
args,
)
if (
self.node_output[self.nodes_used[p] + self.num_inputs] < -1.0
or self.node_output[self.nodes_used[p] + self.num_inputs] > 1.0
):
print(
self.library[self.nodes[self.nodes_used[p]].function].name,
" returned ",
self.node_output[self.nodes_used[p] + self.num_inputs],
" with ",
args,
)
p = p - 1
def run(self, inputData):
if not self.graph_created:
self.create_graph()
self.load_input_data(inputData)
self.compute_graph()
return self.read_output()
def read_output(self):
output = np.zeros(self.num_outputs)
for p in range(0, self.num_outputs):
output[p] = self.node_output[self.output_genes[p]]
return output
def clone(self):
return CGP(
self.genome,
self.num_inputs,
self.num_outputs,
self.num_cols,
self.num_rows,
self.library,
)
def mutate(self, num_mutationss):
for i in range(0, num_mutationss):
index = rnd.randint(0, len(self.genome) - 1)
if index < self.num_cols * self.num_rows * (self.max_arity + 1):
# this is an internal node
if index % (self.max_arity + 1) == 0:
# mutate function
self.genome[index] = rnd.randint(0, len(self.library) - 1)
else:
# mutate connection
self.genome[index] = rnd.randint(
0,
self.num_inputs
+ (int(index / (self.max_arity + 1)) - 1) * self.num_rows,
)
else:
# this is an output node
self.genome[index] = rnd.randint(
0, self.num_inputs + self.num_cols * self.num_rows - 1
)
def mutate_per_gene(self, mutation_rate_nodes, mutation_rate_outputs):
for index in range(0, len(self.genome)):
if index < self.num_cols * self.num_rows * (self.max_arity + 1):
# this is an internal node
if rnd.random() < mutation_rate_nodes:
if index % (self.max_arity + 1) == 0:
# mutate function
self.genome[index] = rnd.randint(0, len(self.library) - 1)
else:
# mutate connection
self.genome[index] = rnd.randint(
0,
min(
self.max_graph_length + self.num_inputs - 1,
(
self.num_inputs
+ (int(index / (self.max_arity + 1)) - 1)
* self.num_rows
)
* self.recurrency_distance,
),
)
# self.genome[index] = rnd.randint(0, self.num_inputs + (int(index / (self.max_arity + 1)) - 1) * self.num_rows)
else:
# this is an output node
if rnd.random() < mutation_rate_outputs:
# this is an output node
self.genome[index] = rnd.randint(
0, self.num_inputs + self.num_cols * self.num_rows - 1
)
def to_dot(self, file_name, input_names, output_names):
if not self.graph_created:
self.create_graph()
out = open(file_name, "w")
out.write("digraph cgp {\n")
out.write('\tsize = "4,4";\n')
self.dot_rec_visited_nodes = np.empty(1)
for i in range(self.num_outputs):
out.write("\t" + output_names[i] + " [shape=oval];\n")
self._write_dot_from_gene(
output_names[i], self.output_genes[i], out, 0, input_names, output_names
)
out.write("}")
out.close()
def _write_dot_from_gene(self, to_name, pos, out, a, input_names, output_names):
if pos < self.num_inputs:
out.write("\t" + input_names[pos] + " [shape=polygon,sides=6];\n")
out.write(
"\t"
+ input_names[pos]
+ " -> "
+ to_name
+ ' [label="'
+ str(a)
+ '"];\n'
)
self.dot_rec_visited_nodes = np.append(self.dot_rec_visited_nodes, [pos])
else:
pos -= self.num_inputs
out.write(
"\t"
+ self.library[self.nodes[pos].function].name
+ "_"
+ str(pos)
+ " -> "
+ to_name
+ ' [label="'
+ str(a)
+ '"];\n'
)
if pos + self.num_inputs not in self.dot_rec_visited_nodes:
out.write(
"\t"
+ self.library[self.nodes[pos].function].name
+ "_"
+ str(pos)
+ " [shape=none];\n"
)
for a in range(self.library[self.nodes[pos].function].arity):
self._write_dot_from_gene(
self.library[self.nodes[pos].function].name + "_" + str(pos),
self.nodes[pos].args[a],
out,
a,
input_names,
output_names,
)
self.dot_rec_visited_nodes = np.append(
self.dot_rec_visited_nodes, [pos + self.num_inputs]
)
def to_function_string(self, input_names, output_names):
if not self.graph_created:
self.create_graph()
for o in range(self.num_outputs):
print(output_names[o] + " = ", end="")
self._write_from_gene(self.output_genes[o], input_names, output_names)
print(";")
print("")
def _write_from_gene(self, pos, input_names, output_names):
if pos < self.num_inputs:
print(input_names[pos], end="")
else:
pos -= self.num_inputs
print(self.library[self.nodes[pos].function].name + "(", end="")
for a in range(self.library[self.nodes[pos].function].arity):
# print(' ', end='')
self._write_from_gene(
self.nodes[pos].args[a], input_names, output_names
)
if a != self.library[self.nodes[pos].function].arity - 1:
print(", ", end="")
# else:
# print(')', end='')
print(")", end="")
@classmethod
def random(
cls, num_inputs, num_outputs, num_cols, num_rows, library, recurrency_distance
):
max_arity = 0
for f in library:
max_arity = np.maximum(max_arity, f.arity)
genome = np.zeros(
num_cols * num_rows * (max_arity + 1) + num_outputs, dtype=int
)
gPos = 0
for c in range(0, num_cols):
for r in range(0, num_rows):
genome[gPos] = rnd.randint(0, len(library) - 1)
for a in range(max_arity):
genome[gPos + a + 1] = rnd.randint(0, num_inputs + c * num_rows - 1)
gPos = gPos + max_arity + 1
for o in range(0, num_outputs):
genome[gPos] = rnd.randint(0, num_inputs + num_cols * num_rows - 1)
gPos = gPos + 1
return CGP(
genome,
num_inputs,
num_outputs,
num_cols,
num_rows,
library,
recurrency_distance,
)
def save(self, file_name):
out = open(file_name, "w")
out.write(str(self.num_inputs) + " ")
out.write(str(self.num_outputs) + " ")
out.write(str(self.num_cols) + " ")
out.write(str(self.num_rows) + "\n")
for g in self.genome:
out.write(str(g) + " ")
out.write("\n")
for f in self.library:
out.write(f.name + " ")
out.close()
@classmethod
def load_from_file(cls, file_name, library):
inp = open(file_name, "r")
pams = inp.readline().split()
genes = inp.readline().split()
funcs = inp.readline().split()
inp.close()
params = np.empty(0, dtype=int)
for p in pams:
params = np.append(params, int(p))
genome = np.empty(0, dtype=int)
for g in genes:
genome = np.append(genome, int(g))
return CGP(genome, params[0], params[1], params[2], params[3], library)
@classmethod
def test(cls, num):
c = CGP.random(2, 1, 2, 2, 2)
for i in range(0, num):
c.mutate(1)
print(c.genome)
print(c.run([1, 2])) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/cgp/pycgp/cgp.py | cgp.py |
import random as rnd
import sys
import numpy as np
class CGP:
class CGPFunc:
def __init__(self, f, name, arity):
self.function = f
self.name = name
self.arity = arity
class CGPNode:
def __init__(self, args, f):
self.args = args
self.function = f
def __init__(
self,
genome,
num_inputs,
num_outputs,
num_cols,
num_rows,
library,
recurrency_distance=1.0,
):
self.genome = genome.copy()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.num_cols = num_cols
self.num_rows = num_rows
self.max_graph_length = num_cols * num_rows
self.library = library
self.max_arity = 0
self.recurrency_distance = recurrency_distance
for f in self.library:
self.max_arity = np.maximum(self.max_arity, f.arity)
self.graph_created = False
def create_graph(self):
self.to_evaluate = np.zeros(self.max_graph_length, dtype=bool)
self.node_output = np.zeros(
self.max_graph_length + self.num_inputs, dtype=np.float64
)
self.nodes_used = []
self.output_genes = np.zeros(self.num_outputs, dtype=np.int)
self.nodes = np.empty(0, dtype=self.CGPNode)
for i in range(0, self.num_outputs):
self.output_genes[i] = self.genome[len(self.genome) - self.num_outputs + i]
i = 0
# building node list
while i < len(self.genome) - self.num_outputs:
f = self.genome[i]
args = np.empty(0, dtype=int)
for j in range(self.max_arity):
args = np.append(args, self.genome[i + j + 1])
i += self.max_arity + 1
self.nodes = np.append(self.nodes, self.CGPNode(args, f))
self.node_to_evaluate()
self.graph_created = True
def node_to_evaluate(self):
p = 0
while p < self.num_outputs:
if self.output_genes[p] - self.num_inputs >= 0:
self.to_evaluate[self.output_genes[p] - self.num_inputs] = True
p = p + 1
p = self.max_graph_length - 1
while p >= 0:
if self.to_evaluate[p]:
for i in range(0, len(self.nodes[p].args)):
arg = self.nodes[p].args[i]
if arg - self.num_inputs >= 0:
self.to_evaluate[arg - self.num_inputs] = True
self.nodes_used.append(p)
p = p - 1
self.nodes_used = np.array(self.nodes_used)
def load_input_data(self, input_data):
for p in range(self.num_inputs):
self.node_output[p] = input_data[p]
def compute_graph(self):
self.node_output_old = self.node_output.copy()
p = len(self.nodes_used) - 1
while p >= 0:
args = np.zeros(self.max_arity)
for i in range(0, self.max_arity):
args[i] = self.node_output_old[self.nodes[self.nodes_used[p]].args[i]]
f = self.library[self.nodes[self.nodes_used[p]].function].function
self.node_output[self.nodes_used[p] + self.num_inputs] = f(args)
if (
self.node_output[self.nodes_used[p] + self.num_inputs]
!= self.node_output[self.nodes_used[p] + self.num_inputs]
):
print(
self.library[self.nodes[self.nodes_used[p]].function].name,
" returned NaN with ",
args,
)
if (
self.node_output[self.nodes_used[p] + self.num_inputs] < -1.0
or self.node_output[self.nodes_used[p] + self.num_inputs] > 1.0
):
print(
self.library[self.nodes[self.nodes_used[p]].function].name,
" returned ",
self.node_output[self.nodes_used[p] + self.num_inputs],
" with ",
args,
)
p = p - 1
def run(self, inputData):
if not self.graph_created:
self.create_graph()
self.load_input_data(inputData)
self.compute_graph()
return self.read_output()
def read_output(self):
output = np.zeros(self.num_outputs)
for p in range(0, self.num_outputs):
output[p] = self.node_output[self.output_genes[p]]
return output
def clone(self):
return CGP(
self.genome,
self.num_inputs,
self.num_outputs,
self.num_cols,
self.num_rows,
self.library,
)
def mutate(self, num_mutationss):
for i in range(0, num_mutationss):
index = rnd.randint(0, len(self.genome) - 1)
if index < self.num_cols * self.num_rows * (self.max_arity + 1):
# this is an internal node
if index % (self.max_arity + 1) == 0:
# mutate function
self.genome[index] = rnd.randint(0, len(self.library) - 1)
else:
# mutate connection
self.genome[index] = rnd.randint(
0,
self.num_inputs
+ (int(index / (self.max_arity + 1)) - 1) * self.num_rows,
)
else:
# this is an output node
self.genome[index] = rnd.randint(
0, self.num_inputs + self.num_cols * self.num_rows - 1
)
def mutate_per_gene(self, mutation_rate_nodes, mutation_rate_outputs):
for index in range(0, len(self.genome)):
if index < self.num_cols * self.num_rows * (self.max_arity + 1):
# this is an internal node
if rnd.random() < mutation_rate_nodes:
if index % (self.max_arity + 1) == 0:
# mutate function
self.genome[index] = rnd.randint(0, len(self.library) - 1)
else:
# mutate connection
self.genome[index] = rnd.randint(
0,
min(
self.max_graph_length + self.num_inputs - 1,
(
self.num_inputs
+ (int(index / (self.max_arity + 1)) - 1)
* self.num_rows
)
* self.recurrency_distance,
),
)
# self.genome[index] = rnd.randint(0, self.num_inputs + (int(index / (self.max_arity + 1)) - 1) * self.num_rows)
else:
# this is an output node
if rnd.random() < mutation_rate_outputs:
# this is an output node
self.genome[index] = rnd.randint(
0, self.num_inputs + self.num_cols * self.num_rows - 1
)
def to_dot(self, file_name, input_names, output_names):
if not self.graph_created:
self.create_graph()
out = open(file_name, "w")
out.write("digraph cgp {\n")
out.write('\tsize = "4,4";\n')
self.dot_rec_visited_nodes = np.empty(1)
for i in range(self.num_outputs):
out.write("\t" + output_names[i] + " [shape=oval];\n")
self._write_dot_from_gene(
output_names[i], self.output_genes[i], out, 0, input_names, output_names
)
out.write("}")
out.close()
def _write_dot_from_gene(self, to_name, pos, out, a, input_names, output_names):
if pos < self.num_inputs:
out.write("\t" + input_names[pos] + " [shape=polygon,sides=6];\n")
out.write(
"\t"
+ input_names[pos]
+ " -> "
+ to_name
+ ' [label="'
+ str(a)
+ '"];\n'
)
self.dot_rec_visited_nodes = np.append(self.dot_rec_visited_nodes, [pos])
else:
pos -= self.num_inputs
out.write(
"\t"
+ self.library[self.nodes[pos].function].name
+ "_"
+ str(pos)
+ " -> "
+ to_name
+ ' [label="'
+ str(a)
+ '"];\n'
)
if pos + self.num_inputs not in self.dot_rec_visited_nodes:
out.write(
"\t"
+ self.library[self.nodes[pos].function].name
+ "_"
+ str(pos)
+ " [shape=none];\n"
)
for a in range(self.library[self.nodes[pos].function].arity):
self._write_dot_from_gene(
self.library[self.nodes[pos].function].name + "_" + str(pos),
self.nodes[pos].args[a],
out,
a,
input_names,
output_names,
)
self.dot_rec_visited_nodes = np.append(
self.dot_rec_visited_nodes, [pos + self.num_inputs]
)
def to_function_string(self, input_names, output_names):
if not self.graph_created:
self.create_graph()
for o in range(self.num_outputs):
print(output_names[o] + " = ", end="")
self._write_from_gene(self.output_genes[o], input_names, output_names)
print(";")
print("")
def _write_from_gene(self, pos, input_names, output_names):
if pos < self.num_inputs:
print(input_names[pos], end="")
else:
pos -= self.num_inputs
print(self.library[self.nodes[pos].function].name + "(", end="")
for a in range(self.library[self.nodes[pos].function].arity):
# print(' ', end='')
self._write_from_gene(
self.nodes[pos].args[a], input_names, output_names
)
if a != self.library[self.nodes[pos].function].arity - 1:
print(", ", end="")
# else:
# print(')', end='')
print(")", end="")
@classmethod
def random(
cls, num_inputs, num_outputs, num_cols, num_rows, library, recurrency_distance
):
max_arity = 0
for f in library:
max_arity = np.maximum(max_arity, f.arity)
genome = np.zeros(
num_cols * num_rows * (max_arity + 1) + num_outputs, dtype=int
)
gPos = 0
for c in range(0, num_cols):
for r in range(0, num_rows):
genome[gPos] = rnd.randint(0, len(library) - 1)
for a in range(max_arity):
genome[gPos + a + 1] = rnd.randint(0, num_inputs + c * num_rows - 1)
gPos = gPos + max_arity + 1
for o in range(0, num_outputs):
genome[gPos] = rnd.randint(0, num_inputs + num_cols * num_rows - 1)
gPos = gPos + 1
return CGP(
genome,
num_inputs,
num_outputs,
num_cols,
num_rows,
library,
recurrency_distance,
)
def save(self, file_name):
out = open(file_name, "w")
out.write(str(self.num_inputs) + " ")
out.write(str(self.num_outputs) + " ")
out.write(str(self.num_cols) + " ")
out.write(str(self.num_rows) + "\n")
for g in self.genome:
out.write(str(g) + " ")
out.write("\n")
for f in self.library:
out.write(f.name + " ")
out.close()
@classmethod
def load_from_file(cls, file_name, library):
inp = open(file_name, "r")
pams = inp.readline().split()
genes = inp.readline().split()
funcs = inp.readline().split()
inp.close()
params = np.empty(0, dtype=int)
for p in pams:
params = np.append(params, int(p))
genome = np.empty(0, dtype=int)
for g in genes:
genome = np.append(genome, int(g))
return CGP(genome, params[0], params[1], params[2], params[3], library)
@classmethod
def test(cls, num):
c = CGP.random(2, 1, 2, 2, 2)
for i in range(0, num):
c.mutate(1)
print(c.genome)
print(c.run([1, 2])) | 0.267408 | 0.212375 |
import os
import numpy as np
from joblib import Parallel, delayed
from .cgp import CGP
from .evaluator import Evaluator
class CGPES:
def __init__(
self,
num_offsprings,
mutation_rate_nodes,
mutation_rate_outputs,
father,
evaluator,
folder="genomes",
num_cpus=1,
):
self.num_offsprings = num_offsprings
self.mutation_rate_nodes = mutation_rate_nodes
self.mutation_rate_outputs = mutation_rate_outputs
self.father = father
# self.num_mutations = int(len(self.father.genome) * self.mutation_rate)
self.evaluator = evaluator
self.num_cpus = num_cpus
self.folder = folder
if self.num_cpus > 1:
self.evaluator_pool = []
for i in range(self.num_offsprings):
self.evaluator_pool.append(self.evaluator.clone())
def run(self, num_iteration):
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
self.logfile = open(self.folder + "/out.txt", "w")
self.current_fitness = self.evaluator.evaluate(self.father, 0)
self.father.save(
self.folder + "/cgp_genome_0_" + str(self.current_fitness) + ".txt"
)
self.offsprings = np.empty(self.num_offsprings, dtype=CGP)
self.offspring_fitnesses = np.zeros(self.num_offsprings, dtype=float)
for self.it in range(1, num_iteration + 1):
# generate offsprings
if self.num_cpus == 1:
for i in range(0, self.num_offsprings):
self.offsprings[i] = self.father.clone()
# self.offsprings[i].mutate(self.num_mutations)
self.offsprings[i].mutate_per_gene(
self.mutation_rate_nodes, self.mutation_rate_outputs
)
self.offspring_fitnesses[i] = self.evaluator.evaluate(
self.offsprings[i], self.it
)
else:
for i in range(self.num_offsprings):
self.offsprings[i] = self.father.clone()
# self.offsprings[i].mutate(self.num_mutations)
self.offsprings[i].mutate_per_gene(
self.mutation_rate_nodes, self.mutation_rate_outputs
)
def offspring_eval_task(offspring_id):
return self.evaluator_pool[offspring_id].evaluate(
self.offsprings[offspring_id], self.it
)
self.offspring_fitnesses = Parallel(n_jobs=self.num_cpus)(
delayed(offspring_eval_task)(i) for i in range(self.num_offsprings)
)
# get the best fitness
best_offspring = np.argmax(self.offspring_fitnesses)
# compare to father
self.father_was_updated = False
if self.offspring_fitnesses[best_offspring] >= self.current_fitness:
self.current_fitness = self.offspring_fitnesses[best_offspring]
self.father = self.offsprings[best_offspring]
self.father_was_updated = True
# display stats
print(
self.it,
"\t",
self.current_fitness,
"\t",
self.father_was_updated,
"\t",
self.offspring_fitnesses,
)
self.logfile.write(
str(self.it)
+ "\t"
+ str(self.current_fitness)
+ "\t"
+ str(self.father_was_updated)
+ "\t"
+ str(self.offspring_fitnesses)
+ "\n"
)
self.logfile.flush()
print("====================================================")
if self.father_was_updated:
# print(self.father.genome)
self.father.save(
self.folder
+ "/cgp_genome_"
+ str(self.it)
+ "_"
+ str(self.current_fitness)
+ ".txt"
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/cgp/pycgp/cgpes.py | cgpes.py |
import os
import numpy as np
from joblib import Parallel, delayed
from .cgp import CGP
from .evaluator import Evaluator
class CGPES:
def __init__(
self,
num_offsprings,
mutation_rate_nodes,
mutation_rate_outputs,
father,
evaluator,
folder="genomes",
num_cpus=1,
):
self.num_offsprings = num_offsprings
self.mutation_rate_nodes = mutation_rate_nodes
self.mutation_rate_outputs = mutation_rate_outputs
self.father = father
# self.num_mutations = int(len(self.father.genome) * self.mutation_rate)
self.evaluator = evaluator
self.num_cpus = num_cpus
self.folder = folder
if self.num_cpus > 1:
self.evaluator_pool = []
for i in range(self.num_offsprings):
self.evaluator_pool.append(self.evaluator.clone())
def run(self, num_iteration):
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
self.logfile = open(self.folder + "/out.txt", "w")
self.current_fitness = self.evaluator.evaluate(self.father, 0)
self.father.save(
self.folder + "/cgp_genome_0_" + str(self.current_fitness) + ".txt"
)
self.offsprings = np.empty(self.num_offsprings, dtype=CGP)
self.offspring_fitnesses = np.zeros(self.num_offsprings, dtype=float)
for self.it in range(1, num_iteration + 1):
# generate offsprings
if self.num_cpus == 1:
for i in range(0, self.num_offsprings):
self.offsprings[i] = self.father.clone()
# self.offsprings[i].mutate(self.num_mutations)
self.offsprings[i].mutate_per_gene(
self.mutation_rate_nodes, self.mutation_rate_outputs
)
self.offspring_fitnesses[i] = self.evaluator.evaluate(
self.offsprings[i], self.it
)
else:
for i in range(self.num_offsprings):
self.offsprings[i] = self.father.clone()
# self.offsprings[i].mutate(self.num_mutations)
self.offsprings[i].mutate_per_gene(
self.mutation_rate_nodes, self.mutation_rate_outputs
)
def offspring_eval_task(offspring_id):
return self.evaluator_pool[offspring_id].evaluate(
self.offsprings[offspring_id], self.it
)
self.offspring_fitnesses = Parallel(n_jobs=self.num_cpus)(
delayed(offspring_eval_task)(i) for i in range(self.num_offsprings)
)
# get the best fitness
best_offspring = np.argmax(self.offspring_fitnesses)
# compare to father
self.father_was_updated = False
if self.offspring_fitnesses[best_offspring] >= self.current_fitness:
self.current_fitness = self.offspring_fitnesses[best_offspring]
self.father = self.offsprings[best_offspring]
self.father_was_updated = True
# display stats
print(
self.it,
"\t",
self.current_fitness,
"\t",
self.father_was_updated,
"\t",
self.offspring_fitnesses,
)
self.logfile.write(
str(self.it)
+ "\t"
+ str(self.current_fitness)
+ "\t"
+ str(self.father_was_updated)
+ "\t"
+ str(self.offspring_fitnesses)
+ "\n"
)
self.logfile.flush()
print("====================================================")
if self.father_was_updated:
# print(self.father.genome)
self.father.save(
self.folder
+ "/cgp_genome_"
+ str(self.it)
+ "_"
+ str(self.current_fitness)
+ ".txt"
) | 0.221519 | 0.223271 |
from __future__ import annotations
from typing import Any, Callable, Set, Tuple
from skdecide import Domain, Solver
from skdecide.builders.domain import MultiAgent, Sequential, SingleAgent
from skdecide.builders.solver import DeterministicPolicies, Utilities
from skdecide.core import Value
# TODO: remove Markovian req?
class D(Domain, MultiAgent, Sequential):
pass
class MAHD(Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
multiagent_solver_class,
singleagent_solver_class,
multiagent_domain_class,
singleagent_domain_class,
multiagent_domain_factory: Callable[[], Domain] = None,
singleagent_domain_factory: Callable[[Domain, Any], Domain] = None,
multiagent_solver_kwargs=None,
singleagent_solver_kwargs=None,
) -> None:
if multiagent_solver_kwargs is None:
multiagent_solver_kwargs = {}
if "heuristic" in multiagent_solver_kwargs:
print(
"\x1b[3;33;40m"
+ "Multi-agent solver heuristic will be overwritten by MAHD!"
+ "\x1b[0m"
)
multiagent_solver_kwargs["heuristic"] = lambda d, o: self._multiagent_heuristic(
o
)
self._multiagent_solver = multiagent_solver_class(**multiagent_solver_kwargs)
self._multiagent_domain_class = multiagent_domain_class
self._multiagent_domain_factory = multiagent_domain_factory
self._multiagent_domain = self._multiagent_domain_factory()
self._singleagent_solver_class = singleagent_solver_class
self._singleagent_solver_kwargs = singleagent_solver_kwargs
self._singleagent_domain_class = singleagent_domain_class
self._singleagent_domain_factory = singleagent_domain_factory
self._singleagent_domains = {}
self._singleagent_solvers = {}
if self._singleagent_solver_kwargs is None:
self._singleagent_solver_kwargs = {}
for a in self._multiagent_domain.get_agents():
self._singleagent_solvers[a] = self._singleagent_solver_class(
**self._singleagent_solver_kwargs
)
self._singleagent_domain_class.solve_with(
solver=self._singleagent_solvers[a],
domain_factory=lambda: self._singleagent_domain_factory(
self._multiagent_domain, a
)
if self._singleagent_domain_factory is not None
else None,
)
self._singleagent_solutions = {
a: {} for a in self._multiagent_domain.get_agents()
}
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._multiagent_domain_class.solve_with(
solver=self._multiagent_solver, domain_factory=domain_factory
)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self._multiagent_solver._get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._multiagent_solver._get_utility(observation)
def _multiagent_heuristic(
self, observation: D.T_agent[D.T_observation]
) -> Tuple[D.T_agent[Value[D.T_value]], D.T_agent[D.T_concurrency[D.T_event]]]:
h = {}
for a, s in self._singleagent_solvers.items():
if observation[a] not in self._singleagent_solutions[a]:
undefined_solution = False
s.solve_from(observation[a])
if hasattr(self._singleagent_solvers[a], "get_policy"):
p = self._singleagent_solvers[a].get_policy()
for ps, pav in p.items():
self._singleagent_solutions[a][ps] = pav[::-1]
undefined_solution = (
observation[a] not in self._singleagent_solutions[a]
)
else:
if not s.is_solution_defined_for(observation[a]):
undefined_solution = True
else:
self._singleagent_solutions[a][observation[a]] = (
s.get_utility(observation[a]),
s.get_next_action(observation[a]),
)
if undefined_solution:
is_terminal = (
hasattr(self._get_singleagent_domain(a), "is_goal")
and self._get_singleagent_domain(a).is_goal(observation[a])
) or (
hasattr(self._get_singleagent_domain(a), "is_terminal")
and self._get_singleagent_domain(a).is_terminal(observation[a])
)
if not is_terminal:
print(
"\x1b[3;33;40m"
+ "/!\ Solution not defined for agent {} in non terminal state {}".format(
a, observation[a]
)
+ ": Assigning default action! (is it a terminal state without no-op action?)"
"\x1b[0m"
)
try:
self._singleagent_solutions[a][observation[a]] = (
0,
self._get_singleagent_domain(a)
.get_applicable_actions(observation[a])
.sample(),
)
except Exception as err:
terminal_str = "terminal " if is_terminal else ""
raise RuntimeError(
"Cannot sample applicable action "
"for agent {} in {}state {} "
"(original exception is: {})".format(
a, terminal_str, observation[a], err
)
)
if issubclass(self._multiagent_solver.T_domain, SingleAgent):
h = (
Value(
cost=sum(
p[observation[a]][0]
for a, p in self._singleagent_solutions.items()
)
),
{
a: p[observation[a]][1]
for a, p in self._singleagent_solutions.items()
},
)
else:
h = (
{
a: Value(cost=p[observation[a]][0])
for a, p in self._singleagent_solutions.items()
},
{
a: p[observation[a]][1]
for a, p in self._singleagent_solutions.items()
},
)
return h
def _get_singleagent_domain(self, agent):
if agent not in self._singleagent_domains:
self._singleagent_domains[agent] = self._singleagent_domain_factory(
self._multiagent_domain, agent
)
return self._singleagent_domains[agent]
def _initialize(self):
self._multiagent_solver._initialize()
for a, s in self._singleagent_solvers.items():
s._initialize()
def _cleanup(self):
self._multiagent_solver._cleanup()
for a, s in self._singleagent_solvers.items():
s._cleanup() | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/mahd/mahd.py | mahd.py |
from __future__ import annotations
from typing import Any, Callable, Set, Tuple
from skdecide import Domain, Solver
from skdecide.builders.domain import MultiAgent, Sequential, SingleAgent
from skdecide.builders.solver import DeterministicPolicies, Utilities
from skdecide.core import Value
# TODO: remove Markovian req?
class D(Domain, MultiAgent, Sequential):
pass
class MAHD(Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
multiagent_solver_class,
singleagent_solver_class,
multiagent_domain_class,
singleagent_domain_class,
multiagent_domain_factory: Callable[[], Domain] = None,
singleagent_domain_factory: Callable[[Domain, Any], Domain] = None,
multiagent_solver_kwargs=None,
singleagent_solver_kwargs=None,
) -> None:
if multiagent_solver_kwargs is None:
multiagent_solver_kwargs = {}
if "heuristic" in multiagent_solver_kwargs:
print(
"\x1b[3;33;40m"
+ "Multi-agent solver heuristic will be overwritten by MAHD!"
+ "\x1b[0m"
)
multiagent_solver_kwargs["heuristic"] = lambda d, o: self._multiagent_heuristic(
o
)
self._multiagent_solver = multiagent_solver_class(**multiagent_solver_kwargs)
self._multiagent_domain_class = multiagent_domain_class
self._multiagent_domain_factory = multiagent_domain_factory
self._multiagent_domain = self._multiagent_domain_factory()
self._singleagent_solver_class = singleagent_solver_class
self._singleagent_solver_kwargs = singleagent_solver_kwargs
self._singleagent_domain_class = singleagent_domain_class
self._singleagent_domain_factory = singleagent_domain_factory
self._singleagent_domains = {}
self._singleagent_solvers = {}
if self._singleagent_solver_kwargs is None:
self._singleagent_solver_kwargs = {}
for a in self._multiagent_domain.get_agents():
self._singleagent_solvers[a] = self._singleagent_solver_class(
**self._singleagent_solver_kwargs
)
self._singleagent_domain_class.solve_with(
solver=self._singleagent_solvers[a],
domain_factory=lambda: self._singleagent_domain_factory(
self._multiagent_domain, a
)
if self._singleagent_domain_factory is not None
else None,
)
self._singleagent_solutions = {
a: {} for a in self._multiagent_domain.get_agents()
}
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._multiagent_domain_class.solve_with(
solver=self._multiagent_solver, domain_factory=domain_factory
)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self._multiagent_solver._get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._multiagent_solver._get_utility(observation)
def _multiagent_heuristic(
self, observation: D.T_agent[D.T_observation]
) -> Tuple[D.T_agent[Value[D.T_value]], D.T_agent[D.T_concurrency[D.T_event]]]:
h = {}
for a, s in self._singleagent_solvers.items():
if observation[a] not in self._singleagent_solutions[a]:
undefined_solution = False
s.solve_from(observation[a])
if hasattr(self._singleagent_solvers[a], "get_policy"):
p = self._singleagent_solvers[a].get_policy()
for ps, pav in p.items():
self._singleagent_solutions[a][ps] = pav[::-1]
undefined_solution = (
observation[a] not in self._singleagent_solutions[a]
)
else:
if not s.is_solution_defined_for(observation[a]):
undefined_solution = True
else:
self._singleagent_solutions[a][observation[a]] = (
s.get_utility(observation[a]),
s.get_next_action(observation[a]),
)
if undefined_solution:
is_terminal = (
hasattr(self._get_singleagent_domain(a), "is_goal")
and self._get_singleagent_domain(a).is_goal(observation[a])
) or (
hasattr(self._get_singleagent_domain(a), "is_terminal")
and self._get_singleagent_domain(a).is_terminal(observation[a])
)
if not is_terminal:
print(
"\x1b[3;33;40m"
+ "/!\ Solution not defined for agent {} in non terminal state {}".format(
a, observation[a]
)
+ ": Assigning default action! (is it a terminal state without no-op action?)"
"\x1b[0m"
)
try:
self._singleagent_solutions[a][observation[a]] = (
0,
self._get_singleagent_domain(a)
.get_applicable_actions(observation[a])
.sample(),
)
except Exception as err:
terminal_str = "terminal " if is_terminal else ""
raise RuntimeError(
"Cannot sample applicable action "
"for agent {} in {}state {} "
"(original exception is: {})".format(
a, terminal_str, observation[a], err
)
)
if issubclass(self._multiagent_solver.T_domain, SingleAgent):
h = (
Value(
cost=sum(
p[observation[a]][0]
for a, p in self._singleagent_solutions.items()
)
),
{
a: p[observation[a]][1]
for a, p in self._singleagent_solutions.items()
},
)
else:
h = (
{
a: Value(cost=p[observation[a]][0])
for a, p in self._singleagent_solutions.items()
},
{
a: p[observation[a]][1]
for a, p in self._singleagent_solutions.items()
},
)
return h
def _get_singleagent_domain(self, agent):
if agent not in self._singleagent_domains:
self._singleagent_domains[agent] = self._singleagent_domain_factory(
self._multiagent_domain, agent
)
return self._singleagent_domains[agent]
def _initialize(self):
self._multiagent_solver._initialize()
for a, s in self._singleagent_solvers.items():
s._initialize()
def _cleanup(self):
self._multiagent_solver._cleanup()
for a, s in self._singleagent_solvers.items():
s._cleanup() | 0.617051 | 0.123418 |
from __future__ import annotations
import random
from enum import Enum
from typing import Callable
import networkx as nx
import numpy as np
from skdecide.builders.domain.scheduling.scheduling_domains import SchedulingDomain
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import (
SchedulingAction,
SchedulingActionEnum,
State,
)
from skdecide.solvers import DeterministicPolicies, Solver
D = SchedulingDomain
class GreedyChoice(Enum):
MOST_SUCCESSORS = 1
SAMPLE_MOST_SUCCESSORS = 2
FASTEST = 3
TOTALLY_RANDOM = 4
class PilePolicy(Solver, DeterministicPolicies):
T_domain = D
def __init__(self, greedy_method: GreedyChoice = GreedyChoice.MOST_SUCCESSORS):
self.greedy_method = greedy_method
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self.domain = domain_factory()
self.graph = self.domain.graph
self.nx_graph: nx.DiGraph = self.graph.to_networkx()
self.successors_map = {}
self.predecessors_map = {}
# successors = nx.dfs_successors(self.nx_graph, 1, self.n_jobs+2)
self.successors = {
n: list(nx.algorithms.descendants(self.nx_graph, n))
for n in self.nx_graph.nodes()
}
self.source = 1
for k in self.successors:
self.successors_map[k] = {
"succs": self.successors[k],
"nb": len(self.successors[k]),
}
self.predecessors = {
n: list(nx.algorithms.ancestors(self.nx_graph, n))
for n in self.nx_graph.nodes()
}
for k in self.predecessors:
self.predecessors_map[k] = {
"succs": self.predecessors[k],
"nb": len(self.predecessors[k]),
}
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
s: State = observation
predecessors = {
n: nx.algorithms.ancestors(self.nx_graph, n) for n in self.nx_graph.nodes()
}
for k in predecessors:
self.predecessors_map[k] = {
"succs": predecessors[k],
"nb": len(predecessors[k]),
}
greedy_choice = self.greedy_method
possible_task_to_launch = self.domain.task_possible_to_launch_precedence(
state=s
)
possible_task_to_launch = [
t
for t in possible_task_to_launch
if self.domain.check_if_action_can_be_started(
state=s,
action=SchedulingAction(
task=t,
action=SchedulingActionEnum.START,
time_progress=False,
mode=1,
),
)[0]
]
if len(possible_task_to_launch) > 0:
if greedy_choice == GreedyChoice.MOST_SUCCESSORS:
next_activity = max(
possible_task_to_launch, key=lambda x: self.successors_map[x]["nb"]
)
if greedy_choice == GreedyChoice.SAMPLE_MOST_SUCCESSORS:
prob = np.array(
[
self.successors_map[possible_task_to_launch[i]]["nb"]
for i in range(len(possible_task_to_launch))
]
)
s = np.sum(prob)
if s != 0:
prob = prob / s
else:
prob = (
1.0
/ len(possible_task_to_launch)
* np.ones((len(possible_task_to_launch)))
)
next_activity = np.random.choice(
np.arange(0, len(possible_task_to_launch)), size=1, p=prob
)[0]
next_activity = possible_task_to_launch[next_activity]
if greedy_choice == GreedyChoice.FASTEST:
next_activity = min(
possible_task_to_launch,
key=lambda x: self.domain.sample_task_duration(x, 1, 0.0),
)
if greedy_choice == GreedyChoice.TOTALLY_RANDOM:
next_activity = random.choice(possible_task_to_launch)
return SchedulingAction(
task=next_activity,
mode=1,
action=SchedulingActionEnum.START,
time_progress=False,
)
else:
return SchedulingAction(
task=None,
mode=1,
action=SchedulingActionEnum.TIME_PR,
time_progress=True,
)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/pile_policy/pile_policy.py | pile_policy.py |
from __future__ import annotations
import random
from enum import Enum
from typing import Callable
import networkx as nx
import numpy as np
from skdecide.builders.domain.scheduling.scheduling_domains import SchedulingDomain
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import (
SchedulingAction,
SchedulingActionEnum,
State,
)
from skdecide.solvers import DeterministicPolicies, Solver
D = SchedulingDomain
class GreedyChoice(Enum):
MOST_SUCCESSORS = 1
SAMPLE_MOST_SUCCESSORS = 2
FASTEST = 3
TOTALLY_RANDOM = 4
class PilePolicy(Solver, DeterministicPolicies):
T_domain = D
def __init__(self, greedy_method: GreedyChoice = GreedyChoice.MOST_SUCCESSORS):
self.greedy_method = greedy_method
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self.domain = domain_factory()
self.graph = self.domain.graph
self.nx_graph: nx.DiGraph = self.graph.to_networkx()
self.successors_map = {}
self.predecessors_map = {}
# successors = nx.dfs_successors(self.nx_graph, 1, self.n_jobs+2)
self.successors = {
n: list(nx.algorithms.descendants(self.nx_graph, n))
for n in self.nx_graph.nodes()
}
self.source = 1
for k in self.successors:
self.successors_map[k] = {
"succs": self.successors[k],
"nb": len(self.successors[k]),
}
self.predecessors = {
n: list(nx.algorithms.ancestors(self.nx_graph, n))
for n in self.nx_graph.nodes()
}
for k in self.predecessors:
self.predecessors_map[k] = {
"succs": self.predecessors[k],
"nb": len(self.predecessors[k]),
}
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
s: State = observation
predecessors = {
n: nx.algorithms.ancestors(self.nx_graph, n) for n in self.nx_graph.nodes()
}
for k in predecessors:
self.predecessors_map[k] = {
"succs": predecessors[k],
"nb": len(predecessors[k]),
}
greedy_choice = self.greedy_method
possible_task_to_launch = self.domain.task_possible_to_launch_precedence(
state=s
)
possible_task_to_launch = [
t
for t in possible_task_to_launch
if self.domain.check_if_action_can_be_started(
state=s,
action=SchedulingAction(
task=t,
action=SchedulingActionEnum.START,
time_progress=False,
mode=1,
),
)[0]
]
if len(possible_task_to_launch) > 0:
if greedy_choice == GreedyChoice.MOST_SUCCESSORS:
next_activity = max(
possible_task_to_launch, key=lambda x: self.successors_map[x]["nb"]
)
if greedy_choice == GreedyChoice.SAMPLE_MOST_SUCCESSORS:
prob = np.array(
[
self.successors_map[possible_task_to_launch[i]]["nb"]
for i in range(len(possible_task_to_launch))
]
)
s = np.sum(prob)
if s != 0:
prob = prob / s
else:
prob = (
1.0
/ len(possible_task_to_launch)
* np.ones((len(possible_task_to_launch)))
)
next_activity = np.random.choice(
np.arange(0, len(possible_task_to_launch)), size=1, p=prob
)[0]
next_activity = possible_task_to_launch[next_activity]
if greedy_choice == GreedyChoice.FASTEST:
next_activity = min(
possible_task_to_launch,
key=lambda x: self.domain.sample_task_duration(x, 1, 0.0),
)
if greedy_choice == GreedyChoice.TOTALLY_RANDOM:
next_activity = random.choice(possible_task_to_launch)
return SchedulingAction(
task=next_activity,
mode=1,
action=SchedulingActionEnum.START,
time_progress=False,
)
else:
return SchedulingAction(
task=None,
mode=1,
action=SchedulingActionEnum.TIME_PR,
time_progress=True,
)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True | 0.750187 | 0.239816 |
from __future__ import annotations
import os
import sys
from typing import Callable, Optional
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
EnumerableTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _AOStarSolver_ as aostar_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class AOstar(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
discount: float = 1.0,
max_tip_expanions: int = 1,
parallel: bool = False,
shared_memory_proxy=None,
detect_cycles: bool = False,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._discount = discount
self._max_tip_expansions = max_tip_expanions
self._detect_cycles = detect_cycles
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = aostar_solver(
domain=self.get_domain(),
goal_checker=lambda d, s: d.is_goal(s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 0, s),
discount=self._discount,
max_tip_expansions=self._max_tip_expansions,
detect_cycles=self._detect_cycles,
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/aostar/aostar.py | aostar.py |
from __future__ import annotations
import os
import sys
from typing import Callable, Optional
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
EnumerableTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _AOStarSolver_ as aostar_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class AOstar(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
discount: float = 1.0,
max_tip_expanions: int = 1,
parallel: bool = False,
shared_memory_proxy=None,
detect_cycles: bool = False,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._discount = discount
self._max_tip_expansions = max_tip_expanions
self._detect_cycles = detect_cycles
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = aostar_solver(
domain=self.get_domain(),
goal_checker=lambda d, s: d.is_goal(s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 0, s),
discount=self._discount,
max_tip_expansions=self._max_tip_expansions,
detect_cycles=self._detect_cycles,
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | 0.579876 | 0.135976 |
from __future__ import annotations
from enum import Enum
from typing import Any, Callable, Dict, Union
from discrete_optimization.rcpsp.rcpsp_model import (
MultiModeRCPSPModel,
RCPSPModel,
RCPSPModelCalendar,
RCPSPSolution,
SingleModeRCPSPModel,
)
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import (
MS_RCPSPModel,
MS_RCPSPSolution,
MS_RCPSPSolution_Variant,
)
from skdecide.builders.domain.scheduling.scheduling_domains import SchedulingDomain
from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain
from skdecide.hub.solver.sgs_policies.sgs_policies import (
BasePolicyMethod,
PolicyMethodParams,
PolicyRCPSP,
)
from skdecide.solvers import DeterministicPolicies, Solver
class D(SchedulingDomain):
pass
class SolvingMethod(Enum):
PILE = 0
GA = 1
LS = 2
LP = 3
CP = 4
LNS_LP = 5
LNS_CP = 6
LNS_CP_CALENDAR = 7
# New algorithm, similar to lns, adding iterativelyu constraint to fulfill calendar constraints..
def build_solver(solving_method: SolvingMethod, do_domain):
if isinstance(do_domain, (RCPSPModelCalendar, RCPSPModel, MultiModeRCPSPModel)):
from discrete_optimization.rcpsp.rcpsp_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
solving_method_to_str = {
SolvingMethod.PILE: "greedy",
SolvingMethod.GA: "ga",
SolvingMethod.LS: "ls",
SolvingMethod.LP: "lp",
SolvingMethod.CP: "cp",
SolvingMethod.LNS_LP: "lns-lp",
SolvingMethod.LNS_CP: "lns-cp",
SolvingMethod.LNS_CP_CALENDAR: "lns-cp-calendar",
}
smap = [
(av, solvers_map[av])
for av in available
if solvers_map[av][0] == solving_method_to_str[solving_method]
]
if len(smap) > 0:
return smap[0]
if isinstance(do_domain, (MS_RCPSPModel, MS_RCPSPModel, MultiModeRCPSPModel)):
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
solving_method_to_str = {
SolvingMethod.PILE: "greedy",
SolvingMethod.GA: "ga",
SolvingMethod.LS: "ls",
SolvingMethod.LP: "lp",
SolvingMethod.CP: "cp",
SolvingMethod.LNS_LP: "lns-lp",
SolvingMethod.LNS_CP: "lns-cp",
SolvingMethod.LNS_CP_CALENDAR: "lns-cp-calendar",
}
smap = [
(av, solvers_map[av])
for av in available
if solvers_map[av][0] == solving_method_to_str[solving_method]
]
if len(smap) > 0:
return smap[0]
return None
def from_solution_to_policy(
solution: Union[RCPSPSolution, MS_RCPSPSolution, MS_RCPSPSolution_Variant],
domain,
policy_method_params: PolicyMethodParams,
):
permutation_task = None
modes_dictionnary = None
schedule = None
resource_allocation = None
resource_allocation_priority = None
if isinstance(solution, RCPSPSolution):
permutation_task = sorted(
solution.rcpsp_schedule,
key=lambda x: (solution.rcpsp_schedule[x]["start_time"], x),
)
schedule = solution.rcpsp_schedule
modes_dictionnary = {}
# set modes for start and end (dummy) jobs
modes_dictionnary[1] = 1
modes_dictionnary[solution.problem.n_jobs_non_dummy + 2] = 1
for i in range(len(solution.rcpsp_modes)):
modes_dictionnary[i + 2] = solution.rcpsp_modes[i]
elif isinstance(solution, MS_RCPSPSolution):
permutation_task = sorted(
solution.schedule, key=lambda x: (solution.schedule[x]["start_time"], x)
)
schedule = solution.schedule
employees = sorted(domain.get_resource_units_names())
resource_allocation = {
task: [
employees[i] for i in solution.employee_usage[task].keys()
] # warning here...
for task in solution.employee_usage
}
if isinstance(solution, MS_RCPSPSolution_Variant):
resource_allocation_priority = solution.priority_worker_per_task
modes_dictionnary = {}
# set modes for start and end (dummy) jobs
modes_dictionnary[1] = 1
modes_dictionnary[solution.problem.n_jobs_non_dummy + 2] = 1
for i in range(len(solution.modes_vector)):
modes_dictionnary[i + 2] = solution.modes_vector[i]
else:
modes_dictionnary = solution.modes
return PolicyRCPSP(
domain=domain,
policy_method_params=policy_method_params,
permutation_task=permutation_task,
modes_dictionnary=modes_dictionnary,
schedule=schedule,
resource_allocation=resource_allocation,
resource_allocation_priority=resource_allocation_priority,
)
class DOSolver(Solver, DeterministicPolicies):
T_domain = D
def __init__(
self,
policy_method_params: PolicyMethodParams,
method: SolvingMethod = SolvingMethod.PILE,
dict_params: Dict[Any, Any] = None,
):
self.method = method
self.policy_method_params = policy_method_params
self.dict_params = dict_params
if self.dict_params is None:
self.dict_params = {}
def get_available_methods(self, domain: SchedulingDomain):
do_domain = build_do_domain(domain)
if isinstance(do_domain, (MS_RCPSPModel)):
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
elif isinstance(
do_domain, (SingleModeRCPSPModel, RCPSPModel, MultiModeRCPSPModel)
):
from discrete_optimization.rcpsp.rcpsp_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
smap = [(av, solvers_map[av]) for av in available]
return smap
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self.domain = domain_factory()
self.do_domain = build_do_domain(self.domain)
solvers = build_solver(solving_method=self.method, do_domain=self.do_domain)
solver_class = solvers[0]
key, params = solvers[1]
for k in params:
if k not in self.dict_params:
self.dict_params[k] = params[k]
self.solver = solver_class(self.do_domain, **self.dict_params)
if hasattr(self.solver, "init_model") and callable(self.solver.init_model):
self.solver.init_model(**self.dict_params)
result_storage = self.solver.solve(**self.dict_params)
best_solution: RCPSPSolution = result_storage.get_best_solution()
assert best_solution is not None
fits = self.do_domain.evaluate(best_solution)
self.best_solution = best_solution
self.policy_object = from_solution_to_policy(
solution=best_solution,
domain=self.domain,
policy_method_params=self.policy_method_params,
)
def get_external_policy(self) -> PolicyRCPSP:
return self.policy_object
def compute_external_policy(self, policy_method_params: PolicyMethodParams):
return from_solution_to_policy(
solution=self.best_solution,
domain=self.domain,
policy_method_params=policy_method_params,
)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self.policy_object.get_next_action(observation=observation)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return self.policy_object.is_policy_defined_for(observation=observation) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/do_solver/do_solver_scheduling.py | do_solver_scheduling.py |
from __future__ import annotations
from enum import Enum
from typing import Any, Callable, Dict, Union
from discrete_optimization.rcpsp.rcpsp_model import (
MultiModeRCPSPModel,
RCPSPModel,
RCPSPModelCalendar,
RCPSPSolution,
SingleModeRCPSPModel,
)
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import (
MS_RCPSPModel,
MS_RCPSPSolution,
MS_RCPSPSolution_Variant,
)
from skdecide.builders.domain.scheduling.scheduling_domains import SchedulingDomain
from skdecide.hub.solver.do_solver.sk_to_do_binding import build_do_domain
from skdecide.hub.solver.sgs_policies.sgs_policies import (
BasePolicyMethod,
PolicyMethodParams,
PolicyRCPSP,
)
from skdecide.solvers import DeterministicPolicies, Solver
class D(SchedulingDomain):
pass
class SolvingMethod(Enum):
PILE = 0
GA = 1
LS = 2
LP = 3
CP = 4
LNS_LP = 5
LNS_CP = 6
LNS_CP_CALENDAR = 7
# New algorithm, similar to lns, adding iterativelyu constraint to fulfill calendar constraints..
def build_solver(solving_method: SolvingMethod, do_domain):
if isinstance(do_domain, (RCPSPModelCalendar, RCPSPModel, MultiModeRCPSPModel)):
from discrete_optimization.rcpsp.rcpsp_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
solving_method_to_str = {
SolvingMethod.PILE: "greedy",
SolvingMethod.GA: "ga",
SolvingMethod.LS: "ls",
SolvingMethod.LP: "lp",
SolvingMethod.CP: "cp",
SolvingMethod.LNS_LP: "lns-lp",
SolvingMethod.LNS_CP: "lns-cp",
SolvingMethod.LNS_CP_CALENDAR: "lns-cp-calendar",
}
smap = [
(av, solvers_map[av])
for av in available
if solvers_map[av][0] == solving_method_to_str[solving_method]
]
if len(smap) > 0:
return smap[0]
if isinstance(do_domain, (MS_RCPSPModel, MS_RCPSPModel, MultiModeRCPSPModel)):
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
solving_method_to_str = {
SolvingMethod.PILE: "greedy",
SolvingMethod.GA: "ga",
SolvingMethod.LS: "ls",
SolvingMethod.LP: "lp",
SolvingMethod.CP: "cp",
SolvingMethod.LNS_LP: "lns-lp",
SolvingMethod.LNS_CP: "lns-cp",
SolvingMethod.LNS_CP_CALENDAR: "lns-cp-calendar",
}
smap = [
(av, solvers_map[av])
for av in available
if solvers_map[av][0] == solving_method_to_str[solving_method]
]
if len(smap) > 0:
return smap[0]
return None
def from_solution_to_policy(
solution: Union[RCPSPSolution, MS_RCPSPSolution, MS_RCPSPSolution_Variant],
domain,
policy_method_params: PolicyMethodParams,
):
permutation_task = None
modes_dictionnary = None
schedule = None
resource_allocation = None
resource_allocation_priority = None
if isinstance(solution, RCPSPSolution):
permutation_task = sorted(
solution.rcpsp_schedule,
key=lambda x: (solution.rcpsp_schedule[x]["start_time"], x),
)
schedule = solution.rcpsp_schedule
modes_dictionnary = {}
# set modes for start and end (dummy) jobs
modes_dictionnary[1] = 1
modes_dictionnary[solution.problem.n_jobs_non_dummy + 2] = 1
for i in range(len(solution.rcpsp_modes)):
modes_dictionnary[i + 2] = solution.rcpsp_modes[i]
elif isinstance(solution, MS_RCPSPSolution):
permutation_task = sorted(
solution.schedule, key=lambda x: (solution.schedule[x]["start_time"], x)
)
schedule = solution.schedule
employees = sorted(domain.get_resource_units_names())
resource_allocation = {
task: [
employees[i] for i in solution.employee_usage[task].keys()
] # warning here...
for task in solution.employee_usage
}
if isinstance(solution, MS_RCPSPSolution_Variant):
resource_allocation_priority = solution.priority_worker_per_task
modes_dictionnary = {}
# set modes for start and end (dummy) jobs
modes_dictionnary[1] = 1
modes_dictionnary[solution.problem.n_jobs_non_dummy + 2] = 1
for i in range(len(solution.modes_vector)):
modes_dictionnary[i + 2] = solution.modes_vector[i]
else:
modes_dictionnary = solution.modes
return PolicyRCPSP(
domain=domain,
policy_method_params=policy_method_params,
permutation_task=permutation_task,
modes_dictionnary=modes_dictionnary,
schedule=schedule,
resource_allocation=resource_allocation,
resource_allocation_priority=resource_allocation_priority,
)
class DOSolver(Solver, DeterministicPolicies):
T_domain = D
def __init__(
self,
policy_method_params: PolicyMethodParams,
method: SolvingMethod = SolvingMethod.PILE,
dict_params: Dict[Any, Any] = None,
):
self.method = method
self.policy_method_params = policy_method_params
self.dict_params = dict_params
if self.dict_params is None:
self.dict_params = {}
def get_available_methods(self, domain: SchedulingDomain):
do_domain = build_do_domain(domain)
if isinstance(do_domain, (MS_RCPSPModel)):
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
elif isinstance(
do_domain, (SingleModeRCPSPModel, RCPSPModel, MultiModeRCPSPModel)
):
from discrete_optimization.rcpsp.rcpsp_solvers import (
look_for_solver,
solvers_map,
)
available = look_for_solver(do_domain)
smap = [(av, solvers_map[av]) for av in available]
return smap
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self.domain = domain_factory()
self.do_domain = build_do_domain(self.domain)
solvers = build_solver(solving_method=self.method, do_domain=self.do_domain)
solver_class = solvers[0]
key, params = solvers[1]
for k in params:
if k not in self.dict_params:
self.dict_params[k] = params[k]
self.solver = solver_class(self.do_domain, **self.dict_params)
if hasattr(self.solver, "init_model") and callable(self.solver.init_model):
self.solver.init_model(**self.dict_params)
result_storage = self.solver.solve(**self.dict_params)
best_solution: RCPSPSolution = result_storage.get_best_solution()
assert best_solution is not None
fits = self.do_domain.evaluate(best_solution)
self.best_solution = best_solution
self.policy_object = from_solution_to_policy(
solution=best_solution,
domain=self.domain,
policy_method_params=self.policy_method_params,
)
def get_external_policy(self) -> PolicyRCPSP:
return self.policy_object
def compute_external_policy(self, policy_method_params: PolicyMethodParams):
return from_solution_to_policy(
solution=self.best_solution,
domain=self.domain,
policy_method_params=policy_method_params,
)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self.policy_object.get_next_action(observation=observation)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return self.policy_object.is_policy_defined_for(observation=observation) | 0.8308 | 0.173656 |
from __future__ import annotations
from typing import Union
from discrete_optimization.rcpsp.rcpsp_model import (
MultiModeRCPSPModel,
RCPSPModel,
RCPSPModelCalendar,
RCPSPSolution,
SingleModeRCPSPModel,
)
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import (
Employee,
MS_RCPSPModel,
MS_RCPSPModel_Variant,
SkillDetail,
)
from skdecide.builders.domain.scheduling.scheduling_domains import (
MultiModeMultiSkillRCPSP,
MultiModeMultiSkillRCPSPCalendar,
MultiModeRCPSP,
MultiModeRCPSPCalendar,
MultiModeRCPSPWithCost,
SchedulingDomain,
SingleModeRCPSP,
SingleModeRCPSP_Stochastic_Durations,
SingleModeRCPSPCalendar,
State,
)
from skdecide.hub.domain.rcpsp.rcpsp_sk import (
MRCPSP,
MSRCPSP,
RCPSP,
MRCPSPCalendar,
MSRCPSPCalendar,
)
def from_last_state_to_solution(state: State, domain: SchedulingDomain):
modes = [state.tasks_mode.get(j, 1) for j in sorted(domain.get_tasks_ids())]
modes = modes[1:-1]
schedule = {
p.value.id: {"start_time": p.value.start, "end_time": p.value.end}
for p in state.tasks_complete_details
}
return RCPSPSolution(
problem=build_do_domain(domain),
rcpsp_permutation=None,
rcpsp_modes=modes,
rcpsp_schedule=schedule,
)
def build_do_domain(
scheduling_domain: Union[
SingleModeRCPSP,
SingleModeRCPSPCalendar,
MultiModeRCPSP,
MultiModeRCPSPWithCost,
MultiModeRCPSPCalendar,
MultiModeMultiSkillRCPSP,
MultiModeMultiSkillRCPSPCalendar,
SingleModeRCPSP_Stochastic_Durations,
]
):
if isinstance(scheduling_domain, SingleModeRCPSP):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
return SingleModeRCPSPModel(
resources={
r: scheduling_domain.get_original_quantity_resource(r)
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(scheduling_domain, SingleModeRCPSP_Stochastic_Durations):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.sample_task_duration(task=task, mode=mode)
return SingleModeRCPSPModel(
resources={
r: scheduling_domain.get_original_quantity_resource(r)
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(scheduling_domain, (MultiModeRCPSP, MultiModeRCPSPWithCost)):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
return MultiModeRCPSPModel(
resources={
r: scheduling_domain.get_original_quantity_resource(r)
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(scheduling_domain, (MultiModeRCPSPCalendar, SingleModeRCPSPCalendar)):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
horizon = scheduling_domain.get_max_horizon()
return RCPSPModelCalendar(
resources={
r: [
scheduling_domain.get_quantity_resource(r, time=t)
for t in range(horizon)
]
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(
scheduling_domain, (MultiModeMultiSkillRCPSP, MultiModeMultiSkillRCPSPCalendar)
):
modes_details = scheduling_domain.get_tasks_modes().copy()
skills_set = set()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
skills = scheduling_domain.get_skills_of_task(task=task, mode=mode)
for s in skills:
mode_details_do[task][mode][s] = skills[s]
skills_set.add(s)
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
horizon = scheduling_domain.get_max_horizon()
employees_dict = {}
employees = scheduling_domain.get_resource_units_names()
sorted_employees = sorted(employees)
for employee, i in zip(sorted_employees, range(len(sorted_employees))):
skills = scheduling_domain.get_skills_of_resource(resource=employee)
skills_details = {
r: SkillDetail(skill_value=skills[r], efficiency_ratio=0, experience=0)
for r in skills
}
employees_dict[i] = Employee(
dict_skill=skills_details,
calendar_employee=[
bool(scheduling_domain.get_quantity_resource(employee, time=t))
for t in range(horizon + 1)
],
)
return MS_RCPSPModel_Variant(
skills_set=scheduling_domain.get_skills_names(),
resources_set=set(scheduling_domain.get_resource_types_names()),
non_renewable_resources=set(
[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
]
),
resources_availability={
r: [
scheduling_domain.get_quantity_resource(r, time=t)
for t in range(horizon + 1)
]
for r in scheduling_domain.get_resource_types_names()
},
employees=employees_dict,
employees_availability=[
sum(
[
scheduling_domain.get_quantity_resource(employee, time=t)
for employee in employees
]
)
for t in range(horizon + 1)
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=horizon,
horizon_multiplier=1,
sink_task=max(scheduling_domain.get_tasks_ids()),
source_task=min(scheduling_domain.get_tasks_ids()),
one_unit_per_task_max=False,
)
# TODO : for imopse this should be True
def build_sk_domain(
rcpsp_do_domain: Union[
MS_RCPSPModel, SingleModeRCPSPModel, MultiModeRCPSP, RCPSPModelCalendar
],
varying_ressource: bool = False,
):
if isinstance(rcpsp_do_domain, RCPSPModelCalendar):
if varying_ressource:
my_domain = MRCPSPCalendar(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=rcpsp_do_domain.resources,
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
# Even if the DO domain is a calendar one... ignore it.
else:
my_domain = MRCPSP(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability={
r: max(rcpsp_do_domain.resources[r])
for r in rcpsp_do_domain.resources
},
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
if isinstance(rcpsp_do_domain, SingleModeRCPSPModel):
my_domain = RCPSP(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=rcpsp_do_domain.resources,
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
elif isinstance(rcpsp_do_domain, (MultiModeRCPSPModel, RCPSPModel)):
my_domain = MRCPSP(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=rcpsp_do_domain.resources,
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
elif isinstance(rcpsp_do_domain, MS_RCPSPModel):
if not varying_ressource:
resource_type_names = list(rcpsp_do_domain.resources_list)
resource_skills = {r: {} for r in resource_type_names}
resource_availability = {
r: rcpsp_do_domain.resources_availability[r][0]
for r in rcpsp_do_domain.resources_availability
}
resource_renewable = {
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
}
resource_unit_names = []
for employee in rcpsp_do_domain.employees:
resource_unit_names += [str(employee)]
resource_skills[resource_unit_names[-1]] = {}
resource_availability[resource_unit_names[-1]] = 1
resource_renewable[resource_unit_names[-1]] = True
for s in rcpsp_do_domain.employees[employee].dict_skill:
resource_skills[resource_unit_names[-1]][s] = (
rcpsp_do_domain.employees[employee].dict_skill[s].skill_value
)
return MSRCPSP(
skills_names=list(rcpsp_do_domain.skills_set),
resource_unit_names=resource_unit_names,
resource_type_names=resource_type_names,
resource_skills=resource_skills,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=resource_availability,
resource_renewable=resource_renewable,
)
else:
resource_type_names = list(rcpsp_do_domain.resources_list)
resource_skills = {r: {} for r in resource_type_names}
resource_availability = {
r: rcpsp_do_domain.resources_availability[r]
for r in rcpsp_do_domain.resources_availability
}
resource_renewable = {
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
}
resource_unit_names = []
for employee in rcpsp_do_domain.employees:
resource_unit_names += [str(employee)]
resource_skills[resource_unit_names[-1]] = {}
resource_availability[resource_unit_names[-1]] = [
1 if x else 0
for x in rcpsp_do_domain.employees[employee].calendar_employee
]
resource_renewable[resource_unit_names[-1]] = True
for s in rcpsp_do_domain.employees[employee].dict_skill:
resource_skills[resource_unit_names[-1]][s] = (
rcpsp_do_domain.employees[employee].dict_skill[s].skill_value
)
return MSRCPSPCalendar(
skills_names=list(rcpsp_do_domain.skills_set),
resource_unit_names=resource_unit_names,
resource_type_names=resource_type_names,
resource_skills=resource_skills,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=resource_availability,
resource_renewable=resource_renewable,
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/do_solver/sk_to_do_binding.py | sk_to_do_binding.py |
from __future__ import annotations
from typing import Union
from discrete_optimization.rcpsp.rcpsp_model import (
MultiModeRCPSPModel,
RCPSPModel,
RCPSPModelCalendar,
RCPSPSolution,
SingleModeRCPSPModel,
)
from discrete_optimization.rcpsp_multiskill.rcpsp_multiskill import (
Employee,
MS_RCPSPModel,
MS_RCPSPModel_Variant,
SkillDetail,
)
from skdecide.builders.domain.scheduling.scheduling_domains import (
MultiModeMultiSkillRCPSP,
MultiModeMultiSkillRCPSPCalendar,
MultiModeRCPSP,
MultiModeRCPSPCalendar,
MultiModeRCPSPWithCost,
SchedulingDomain,
SingleModeRCPSP,
SingleModeRCPSP_Stochastic_Durations,
SingleModeRCPSPCalendar,
State,
)
from skdecide.hub.domain.rcpsp.rcpsp_sk import (
MRCPSP,
MSRCPSP,
RCPSP,
MRCPSPCalendar,
MSRCPSPCalendar,
)
def from_last_state_to_solution(state: State, domain: SchedulingDomain):
modes = [state.tasks_mode.get(j, 1) for j in sorted(domain.get_tasks_ids())]
modes = modes[1:-1]
schedule = {
p.value.id: {"start_time": p.value.start, "end_time": p.value.end}
for p in state.tasks_complete_details
}
return RCPSPSolution(
problem=build_do_domain(domain),
rcpsp_permutation=None,
rcpsp_modes=modes,
rcpsp_schedule=schedule,
)
def build_do_domain(
scheduling_domain: Union[
SingleModeRCPSP,
SingleModeRCPSPCalendar,
MultiModeRCPSP,
MultiModeRCPSPWithCost,
MultiModeRCPSPCalendar,
MultiModeMultiSkillRCPSP,
MultiModeMultiSkillRCPSPCalendar,
SingleModeRCPSP_Stochastic_Durations,
]
):
if isinstance(scheduling_domain, SingleModeRCPSP):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
return SingleModeRCPSPModel(
resources={
r: scheduling_domain.get_original_quantity_resource(r)
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(scheduling_domain, SingleModeRCPSP_Stochastic_Durations):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.sample_task_duration(task=task, mode=mode)
return SingleModeRCPSPModel(
resources={
r: scheduling_domain.get_original_quantity_resource(r)
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(scheduling_domain, (MultiModeRCPSP, MultiModeRCPSPWithCost)):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
return MultiModeRCPSPModel(
resources={
r: scheduling_domain.get_original_quantity_resource(r)
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(scheduling_domain, (MultiModeRCPSPCalendar, SingleModeRCPSPCalendar)):
modes_details = scheduling_domain.get_tasks_modes().copy()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
horizon = scheduling_domain.get_max_horizon()
return RCPSPModelCalendar(
resources={
r: [
scheduling_domain.get_quantity_resource(r, time=t)
for t in range(horizon)
]
for r in scheduling_domain.get_resource_types_names()
},
non_renewable_resources=[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=scheduling_domain.get_max_horizon(),
horizon_multiplier=1,
)
if isinstance(
scheduling_domain, (MultiModeMultiSkillRCPSP, MultiModeMultiSkillRCPSPCalendar)
):
modes_details = scheduling_domain.get_tasks_modes().copy()
skills_set = set()
mode_details_do = {}
for task in modes_details:
mode_details_do[task] = {}
for mode in modes_details[task]:
mode_details_do[task][mode] = {}
for r in modes_details[task][mode].get_ressource_names():
mode_details_do[task][mode][r] = modes_details[task][
mode
].get_resource_need_at_time(
r, time=0
) # should be constant anyway
skills = scheduling_domain.get_skills_of_task(task=task, mode=mode)
for s in skills:
mode_details_do[task][mode][s] = skills[s]
skills_set.add(s)
mode_details_do[task][mode][
"duration"
] = scheduling_domain.get_task_duration(task=task, mode=mode)
horizon = scheduling_domain.get_max_horizon()
employees_dict = {}
employees = scheduling_domain.get_resource_units_names()
sorted_employees = sorted(employees)
for employee, i in zip(sorted_employees, range(len(sorted_employees))):
skills = scheduling_domain.get_skills_of_resource(resource=employee)
skills_details = {
r: SkillDetail(skill_value=skills[r], efficiency_ratio=0, experience=0)
for r in skills
}
employees_dict[i] = Employee(
dict_skill=skills_details,
calendar_employee=[
bool(scheduling_domain.get_quantity_resource(employee, time=t))
for t in range(horizon + 1)
],
)
return MS_RCPSPModel_Variant(
skills_set=scheduling_domain.get_skills_names(),
resources_set=set(scheduling_domain.get_resource_types_names()),
non_renewable_resources=set(
[
r
for r in scheduling_domain.get_resource_renewability()
if not scheduling_domain.get_resource_renewability()[r]
]
),
resources_availability={
r: [
scheduling_domain.get_quantity_resource(r, time=t)
for t in range(horizon + 1)
]
for r in scheduling_domain.get_resource_types_names()
},
employees=employees_dict,
employees_availability=[
sum(
[
scheduling_domain.get_quantity_resource(employee, time=t)
for employee in employees
]
)
for t in range(horizon + 1)
],
mode_details=mode_details_do,
successors=scheduling_domain.get_successors(),
horizon=horizon,
horizon_multiplier=1,
sink_task=max(scheduling_domain.get_tasks_ids()),
source_task=min(scheduling_domain.get_tasks_ids()),
one_unit_per_task_max=False,
)
# TODO : for imopse this should be True
def build_sk_domain(
rcpsp_do_domain: Union[
MS_RCPSPModel, SingleModeRCPSPModel, MultiModeRCPSP, RCPSPModelCalendar
],
varying_ressource: bool = False,
):
if isinstance(rcpsp_do_domain, RCPSPModelCalendar):
if varying_ressource:
my_domain = MRCPSPCalendar(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=rcpsp_do_domain.resources,
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
# Even if the DO domain is a calendar one... ignore it.
else:
my_domain = MRCPSP(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability={
r: max(rcpsp_do_domain.resources[r])
for r in rcpsp_do_domain.resources
},
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
if isinstance(rcpsp_do_domain, SingleModeRCPSPModel):
my_domain = RCPSP(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=rcpsp_do_domain.resources,
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
elif isinstance(rcpsp_do_domain, (MultiModeRCPSPModel, RCPSPModel)):
my_domain = MRCPSP(
resource_names=rcpsp_do_domain.resources_list,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=rcpsp_do_domain.resources,
resource_renewable={
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
},
)
return my_domain
elif isinstance(rcpsp_do_domain, MS_RCPSPModel):
if not varying_ressource:
resource_type_names = list(rcpsp_do_domain.resources_list)
resource_skills = {r: {} for r in resource_type_names}
resource_availability = {
r: rcpsp_do_domain.resources_availability[r][0]
for r in rcpsp_do_domain.resources_availability
}
resource_renewable = {
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
}
resource_unit_names = []
for employee in rcpsp_do_domain.employees:
resource_unit_names += [str(employee)]
resource_skills[resource_unit_names[-1]] = {}
resource_availability[resource_unit_names[-1]] = 1
resource_renewable[resource_unit_names[-1]] = True
for s in rcpsp_do_domain.employees[employee].dict_skill:
resource_skills[resource_unit_names[-1]][s] = (
rcpsp_do_domain.employees[employee].dict_skill[s].skill_value
)
return MSRCPSP(
skills_names=list(rcpsp_do_domain.skills_set),
resource_unit_names=resource_unit_names,
resource_type_names=resource_type_names,
resource_skills=resource_skills,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=resource_availability,
resource_renewable=resource_renewable,
)
else:
resource_type_names = list(rcpsp_do_domain.resources_list)
resource_skills = {r: {} for r in resource_type_names}
resource_availability = {
r: rcpsp_do_domain.resources_availability[r]
for r in rcpsp_do_domain.resources_availability
}
resource_renewable = {
r: r not in rcpsp_do_domain.non_renewable_resources
for r in rcpsp_do_domain.resources_list
}
resource_unit_names = []
for employee in rcpsp_do_domain.employees:
resource_unit_names += [str(employee)]
resource_skills[resource_unit_names[-1]] = {}
resource_availability[resource_unit_names[-1]] = [
1 if x else 0
for x in rcpsp_do_domain.employees[employee].calendar_employee
]
resource_renewable[resource_unit_names[-1]] = True
for s in rcpsp_do_domain.employees[employee].dict_skill:
resource_skills[resource_unit_names[-1]][s] = (
rcpsp_do_domain.employees[employee].dict_skill[s].skill_value
)
return MSRCPSPCalendar(
skills_names=list(rcpsp_do_domain.skills_set),
resource_unit_names=resource_unit_names,
resource_type_names=resource_type_names,
resource_skills=resource_skills,
task_ids=sorted(rcpsp_do_domain.mode_details.keys()),
tasks_mode=rcpsp_do_domain.mode_details,
successors=rcpsp_do_domain.successors,
max_horizon=rcpsp_do_domain.horizon,
resource_availability=resource_availability,
resource_renewable=resource_renewable,
) | 0.778733 | 0.088308 |
from __future__ import annotations
import os
import sys
from typing import Callable, Dict, Optional, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
EnumerableTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _ILAOStarSolver_ as ilaostar_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class ILAOstar(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
discount: float = 1.0,
epsilon: float = 0.001,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._discount = discount
self._epsilon = epsilon
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = ilaostar_solver(
domain=self.get_domain(),
goal_checker=lambda d, s: d.is_goal(s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 0, s),
discount=self._discount,
epsilon=self._epsilon,
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def best_solution_graph_size(self) -> int:
return self._solver.best_solution_graph_size()
def get_policy(
self,
) -> Dict[
D.T_agent[D.T_observation],
Tuple[D.T_agent[D.T_concurrency[D.T_event]], float],
]:
return self._solver.get_policy()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/ilaostar/ilaostar.py | ilaostar.py |
from __future__ import annotations
import os
import sys
from typing import Callable, Dict, Optional, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
EnumerableTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _ILAOStarSolver_ as ilaostar_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
EnumerableTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class ILAOstar(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
discount: float = 1.0,
epsilon: float = 0.001,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._discount = discount
self._epsilon = epsilon
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = ilaostar_solver(
domain=self.get_domain(),
goal_checker=lambda d, s: d.is_goal(s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 0, s),
discount=self._discount,
epsilon=self._epsilon,
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def best_solution_graph_size(self) -> int:
return self._solver.best_solution_graph_size()
def get_policy(
self,
) -> Dict[
D.T_agent[D.T_observation],
Tuple[D.T_agent[D.T_concurrency[D.T_event]], float],
]:
return self._solver.get_policy()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | 0.58676 | 0.150559 |
from __future__ import annotations
import os
import sys
from typing import Callable, Dict, Optional, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
UncertainTransitions,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _LRTDPSolver_ as lrtdp_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
UncertainTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class LRTDP(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain] = None,
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
use_labels: bool = True,
time_budget: int = 3600000,
rollout_budget: int = 100000,
max_depth: int = 1000,
epsilon_moving_average_window: int = 100,
epsilon: float = 0.001,
discount: float = 1.0,
online_node_garbage: bool = False,
continuous_planning: bool = True,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
watchdog: Callable[[int, int, float, float], bool] = None,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._use_labels = use_labels
self._time_budget = time_budget
self._rollout_budget = rollout_budget
self._max_depth = max_depth
self._epsilon_moving_average_window = epsilon_moving_average_window
self._epsilon = epsilon
self._discount = discount
self._online_node_garbage = online_node_garbage
self._continuous_planning = continuous_planning
self._debug_logs = debug_logs
if watchdog is None:
self._watchdog = (
lambda elapsed_time, number_rollouts, best_value, epsilon_moving_average: True
)
else:
self._watchdog = watchdog
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = lrtdp_solver(
domain=self.get_domain(),
goal_checker=lambda d, s, i=None: d.is_goal(s)
if not self._parallel
else d.is_goal(s, i),
heuristic=lambda d, s, i=None: self._heuristic(d, s)
if not self._parallel
else d.call(i, 0, s),
use_labels=self._use_labels,
time_budget=self._time_budget,
rollout_budget=self._rollout_budget,
max_depth=self._max_depth,
epsilon_moving_average_window=self._epsilon_moving_average_window,
epsilon=self._epsilon,
discount=self._discount,
online_node_garbage=self._online_node_garbage,
parallel=self._parallel,
debug_logs=self._debug_logs,
watchdog=self._watchdog,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if self._continuous_planning or not self._is_solution_defined_for(
observation
):
self._solve_from(observation)
action = self._solver.get_next_action(observation)
if action is None:
print(
"\x1b[3;33;40m"
+ "No best action found in observation "
+ str(observation)
+ ", applying random action"
+ "\x1b[0m"
)
return self.call_domain_method("get_action_space").sample()
else:
return action
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def get_nb_rollouts(self) -> int:
return self._solver.get_nb_rollouts()
def get_policy(
self,
) -> Dict[
D.T_agent[D.T_observation],
Tuple[D.T_agent[D.T_concurrency[D.T_event]], float],
]:
return self._solver.get_policy()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/lrtdp/lrtdp.py | lrtdp.py |
from __future__ import annotations
import os
import sys
from typing import Callable, Dict, Optional, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
UncertainTransitions,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _LRTDPSolver_ as lrtdp_solver
# TODO: remove Markovian req?
class D(
Domain,
SingleAgent,
Sequential,
UncertainTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class LRTDP(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain] = None,
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
use_labels: bool = True,
time_budget: int = 3600000,
rollout_budget: int = 100000,
max_depth: int = 1000,
epsilon_moving_average_window: int = 100,
epsilon: float = 0.001,
discount: float = 1.0,
online_node_garbage: bool = False,
continuous_planning: bool = True,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
watchdog: Callable[[int, int, float, float], bool] = None,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [self._heuristic]
self._use_labels = use_labels
self._time_budget = time_budget
self._rollout_budget = rollout_budget
self._max_depth = max_depth
self._epsilon_moving_average_window = epsilon_moving_average_window
self._epsilon = epsilon
self._discount = discount
self._online_node_garbage = online_node_garbage
self._continuous_planning = continuous_planning
self._debug_logs = debug_logs
if watchdog is None:
self._watchdog = (
lambda elapsed_time, number_rollouts, best_value, epsilon_moving_average: True
)
else:
self._watchdog = watchdog
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], Domain]) -> None:
self._domain_factory = domain_factory
self._solver = lrtdp_solver(
domain=self.get_domain(),
goal_checker=lambda d, s, i=None: d.is_goal(s)
if not self._parallel
else d.is_goal(s, i),
heuristic=lambda d, s, i=None: self._heuristic(d, s)
if not self._parallel
else d.call(i, 0, s),
use_labels=self._use_labels,
time_budget=self._time_budget,
rollout_budget=self._rollout_budget,
max_depth=self._max_depth,
epsilon_moving_average_window=self._epsilon_moving_average_window,
epsilon=self._epsilon,
discount=self._discount,
online_node_garbage=self._online_node_garbage,
parallel=self._parallel,
debug_logs=self._debug_logs,
watchdog=self._watchdog,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if self._continuous_planning or not self._is_solution_defined_for(
observation
):
self._solve_from(observation)
action = self._solver.get_next_action(observation)
if action is None:
print(
"\x1b[3;33;40m"
+ "No best action found in observation "
+ str(observation)
+ ", applying random action"
+ "\x1b[0m"
)
return self.call_domain_method("get_action_space").sample()
else:
return action
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def get_nb_rollouts(self) -> int:
return self._solver.get_nb_rollouts()
def get_policy(
self,
) -> Dict[
D.T_agent[D.T_observation],
Tuple[D.T_agent[D.T_concurrency[D.T_event]], float],
]:
return self._solver.get_policy()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | 0.590543 | 0.144873 |
from __future__ import annotations
import os
import sys
from typing import Any, Callable
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicInitialized,
DeterministicTransitions,
FullyObservable,
Markovian,
Rewards,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _BFWSSolver_ as bfws_solver
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
): # TODO: check why DeterministicInitialized & PositiveCosts/Rewards?
pass
class BFWS(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
state_features: Callable[[Domain, D.T_state], Any],
heuristic: Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]],
termination_checker: Callable[
[Domain, D.T_state], D.T_agent[D.T_predicate]
],
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._domain = None
self._state_features = state_features
self._termination_checker = termination_checker
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [
self._state_features,
self._heuristic,
self._termination_checker,
]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], D]) -> None:
self._domain_factory = domain_factory
self._solver = bfws_solver(
domain=self.get_domain(),
state_features=lambda d, s: self._state_features(d, s)
if not self._parallel
else d.call(None, 0, s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 1, s),
termination_checker=lambda d, s: self._termination_checker(d, s)
if not self._parallel
else d.call(None, 2, s),
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/bfws/bfws.py | bfws.py |
from __future__ import annotations
import os
import sys
from typing import Any, Callable
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicInitialized,
DeterministicTransitions,
FullyObservable,
Markovian,
Rewards,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.core import Value
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _BFWSSolver_ as bfws_solver
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
): # TODO: check why DeterministicInitialized & PositiveCosts/Rewards?
pass
class BFWS(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
state_features: Callable[[Domain, D.T_state], Any],
heuristic: Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]],
termination_checker: Callable[
[Domain, D.T_state], D.T_agent[D.T_predicate]
],
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._domain = None
self._state_features = state_features
self._termination_checker = termination_checker
self._debug_logs = debug_logs
if heuristic is None:
self._heuristic = lambda d, s: Value(cost=0)
else:
self._heuristic = heuristic
self._lambdas = [
self._state_features,
self._heuristic,
self._termination_checker,
]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], D]) -> None:
self._domain_factory = domain_factory
self._solver = bfws_solver(
domain=self.get_domain(),
state_features=lambda d, s: self._state_features(d, s)
if not self._parallel
else d.call(None, 0, s),
heuristic=lambda d, s: self._heuristic(d, s)
if not self._parallel
else d.call(None, 1, s),
termination_checker=lambda d, s: self._termination_checker(d, s)
if not self._parallel
else d.call(None, 2, s),
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | 0.493409 | 0.138753 |
from __future__ import annotations
from heapq import heappop, heappush
from itertools import count
from typing import Any, Dict, Tuple
from skdecide import D, DeterministicPlanningDomain, GoalMDPDomain, MDPDomain, Memory
from skdecide.hub.solver.graph_explorer.GraphDomain import (
GraphDomain,
GraphDomainUncertain,
)
from skdecide.hub.solver.graph_explorer.GraphExploration import GraphExploration
# WARNING : adapted for the scheduling domains.
class DFSExploration(GraphExploration):
def __init__(
self,
domain: GoalMDPDomain,
score_function=None,
max_edges=None,
max_nodes=None,
max_path=None,
):
self.domain = domain
self.score_function = score_function
self.c = count()
if score_function is None:
self.score_function = lambda s: (next(self.c))
self.max_edges = max_edges
self.max_nodes = max_nodes
self.max_path = max_path
def build_graph_domain(self, init_state: Any = None) -> GraphDomainUncertain:
if init_state is None:
initial_state = self.domain.get_initial_state()
else:
initial_state = init_state
stack = [(self.score_function(initial_state), initial_state)]
domain = self.domain
goal_states = set()
terminal_states = set()
num_s = 0
state_to_ind = {}
nb_states = 1
nb_edges = 0
result = {initial_state}
next_state_map: Dict[
D.T_state, Dict[D.T_event, Dict[D.T_state, Tuple[float, float]]]
] = {}
state_terminal: Dict[D.T_state, bool] = {}
state_goal: Dict[D.T_state, bool] = {}
state_terminal[initial_state] = self.domain.is_terminal(initial_state)
state_goal[initial_state] = self.domain.is_goal(initial_state)
while len(stack) > 0:
if not len(result) % 100 and len(result) > nb_states:
print("Expanded {} states.".format(len(result)))
nb_states = len(result)
tuple, s = heappop(stack)
if s not in state_to_ind:
state_to_ind[s] = num_s
num_s += 1
if domain.is_terminal(s):
terminal_states.add(s)
if domain.is_goal(s):
goal_states.add(s)
if domain.is_goal(s) or domain.is_terminal(s):
continue
actions = domain.get_applicable_actions(s).get_elements()
for action in actions:
successors = domain.get_next_state_distribution(s, action).get_values()
for succ, prob in successors:
if s not in next_state_map:
next_state_map[s] = {}
if action not in next_state_map[s]:
next_state_map[s][action] = {}
if prob != 0 and succ not in result:
nb_states += 1
nb_edges += 1
result.add(succ)
heappush(stack, (self.score_function(succ), succ))
cost = domain.get_transition_value(s, action, succ)
next_state_map[s][action][succ] = (prob, cost.cost)
state_goal[succ] = domain.is_goal(succ)
state_terminal[succ] = domain.is_terminal(succ)
if (nb_states > self.max_nodes) or (nb_edges > self.max_edges):
break
return GraphDomainUncertain(
next_state_map=next_state_map,
state_terminal=state_terminal,
state_goal=state_goal,
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/graph_explorer/DFS_Uncertain_Exploration.py | DFS_Uncertain_Exploration.py |
from __future__ import annotations
from heapq import heappop, heappush
from itertools import count
from typing import Any, Dict, Tuple
from skdecide import D, DeterministicPlanningDomain, GoalMDPDomain, MDPDomain, Memory
from skdecide.hub.solver.graph_explorer.GraphDomain import (
GraphDomain,
GraphDomainUncertain,
)
from skdecide.hub.solver.graph_explorer.GraphExploration import GraphExploration
# WARNING : adapted for the scheduling domains.
class DFSExploration(GraphExploration):
def __init__(
self,
domain: GoalMDPDomain,
score_function=None,
max_edges=None,
max_nodes=None,
max_path=None,
):
self.domain = domain
self.score_function = score_function
self.c = count()
if score_function is None:
self.score_function = lambda s: (next(self.c))
self.max_edges = max_edges
self.max_nodes = max_nodes
self.max_path = max_path
def build_graph_domain(self, init_state: Any = None) -> GraphDomainUncertain:
if init_state is None:
initial_state = self.domain.get_initial_state()
else:
initial_state = init_state
stack = [(self.score_function(initial_state), initial_state)]
domain = self.domain
goal_states = set()
terminal_states = set()
num_s = 0
state_to_ind = {}
nb_states = 1
nb_edges = 0
result = {initial_state}
next_state_map: Dict[
D.T_state, Dict[D.T_event, Dict[D.T_state, Tuple[float, float]]]
] = {}
state_terminal: Dict[D.T_state, bool] = {}
state_goal: Dict[D.T_state, bool] = {}
state_terminal[initial_state] = self.domain.is_terminal(initial_state)
state_goal[initial_state] = self.domain.is_goal(initial_state)
while len(stack) > 0:
if not len(result) % 100 and len(result) > nb_states:
print("Expanded {} states.".format(len(result)))
nb_states = len(result)
tuple, s = heappop(stack)
if s not in state_to_ind:
state_to_ind[s] = num_s
num_s += 1
if domain.is_terminal(s):
terminal_states.add(s)
if domain.is_goal(s):
goal_states.add(s)
if domain.is_goal(s) or domain.is_terminal(s):
continue
actions = domain.get_applicable_actions(s).get_elements()
for action in actions:
successors = domain.get_next_state_distribution(s, action).get_values()
for succ, prob in successors:
if s not in next_state_map:
next_state_map[s] = {}
if action not in next_state_map[s]:
next_state_map[s][action] = {}
if prob != 0 and succ not in result:
nb_states += 1
nb_edges += 1
result.add(succ)
heappush(stack, (self.score_function(succ), succ))
cost = domain.get_transition_value(s, action, succ)
next_state_map[s][action][succ] = (prob, cost.cost)
state_goal[succ] = domain.is_goal(succ)
state_terminal[succ] = domain.is_terminal(succ)
if (nb_states > self.max_nodes) or (nb_edges > self.max_edges):
break
return GraphDomainUncertain(
next_state_map=next_state_map,
state_terminal=state_terminal,
state_goal=state_goal,
) | 0.634543 | 0.247498 |
from __future__ import annotations
from typing import Any
from skdecide import DeterministicPlanningDomain, Memory
from skdecide.hub.solver.graph_explorer.GraphDomain import GraphDomain
from skdecide.hub.solver.graph_explorer.GraphExploration import GraphExploration
class FullSpaceExploration(GraphExploration):
def __init__(
self,
domain: DeterministicPlanningDomain,
max_edges=None,
max_nodes=None,
max_path=None,
):
self.domain = domain
self.max_edges = max_edges
self.max_nodes = max_nodes
self.max_path = max_path
def build_graph_domain(self, init_state: Any = None) -> GraphDomain:
next_state_map = {}
next_state_attributes = {}
if init_state is None:
init_state = self.domain.get_initial_state()
stack = [(init_state, [init_state])]
nb_nodes = 1
nb_edges = 0
nb_path = 0
next_state_map[init_state] = {}
next_state_attributes[init_state] = {}
while stack:
(vertex, path) = stack.pop()
actions = self.domain.get_applicable_actions(vertex).get_elements()
for action in actions:
next = self.domain.get_next_state(vertex, action)
if next not in next_state_map:
next_state_map[next] = {}
next_state_attributes[next] = {}
nb_nodes += 1
if action not in next_state_map[vertex]:
nb_edges += 1
next_state_map[vertex][action] = next
next_state_attributes[vertex][action] = {
"cost": self.domain.get_transition_value(
Memory([vertex]), action, next
).cost,
"reward": self.domain.get_transition_value(
Memory([vertex]), action, next
).reward,
}
if self.domain.is_goal(next):
nb_path += 1
else:
if next not in next_state_map:
stack.append((next, path + [next]))
if (
nb_path > self.max_path
or (nb_nodes > self.max_nodes and nb_path >= 1)
or (nb_edges > self.max_edges and nb_path >= 1)
):
break
return GraphDomain(next_state_map, next_state_attributes, None, None)
def reachable_states(self, s0: Any):
"""Computes all states reachable from s0."""
result = {s0}
stack = [s0]
domain = self._domain
while len(stack) > 0:
if not len(result) % 100:
print("Expanded {} states.".format(len(result)))
s = stack.pop()
if domain.is_terminal(s):
continue
# Add successors
actions = domain.get_applicable_actions(s).get_elements()
for action in actions:
successors = domain.get_next_state_distribution(s, action).get_values()
for succ, prob in successors:
if prob != 0 and succ not in result:
result.add(succ)
stack.append(succ)
return result | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/graph_explorer/FullSpaceExploration.py | FullSpaceExploration.py |
from __future__ import annotations
from typing import Any
from skdecide import DeterministicPlanningDomain, Memory
from skdecide.hub.solver.graph_explorer.GraphDomain import GraphDomain
from skdecide.hub.solver.graph_explorer.GraphExploration import GraphExploration
class FullSpaceExploration(GraphExploration):
def __init__(
self,
domain: DeterministicPlanningDomain,
max_edges=None,
max_nodes=None,
max_path=None,
):
self.domain = domain
self.max_edges = max_edges
self.max_nodes = max_nodes
self.max_path = max_path
def build_graph_domain(self, init_state: Any = None) -> GraphDomain:
next_state_map = {}
next_state_attributes = {}
if init_state is None:
init_state = self.domain.get_initial_state()
stack = [(init_state, [init_state])]
nb_nodes = 1
nb_edges = 0
nb_path = 0
next_state_map[init_state] = {}
next_state_attributes[init_state] = {}
while stack:
(vertex, path) = stack.pop()
actions = self.domain.get_applicable_actions(vertex).get_elements()
for action in actions:
next = self.domain.get_next_state(vertex, action)
if next not in next_state_map:
next_state_map[next] = {}
next_state_attributes[next] = {}
nb_nodes += 1
if action not in next_state_map[vertex]:
nb_edges += 1
next_state_map[vertex][action] = next
next_state_attributes[vertex][action] = {
"cost": self.domain.get_transition_value(
Memory([vertex]), action, next
).cost,
"reward": self.domain.get_transition_value(
Memory([vertex]), action, next
).reward,
}
if self.domain.is_goal(next):
nb_path += 1
else:
if next not in next_state_map:
stack.append((next, path + [next]))
if (
nb_path > self.max_path
or (nb_nodes > self.max_nodes and nb_path >= 1)
or (nb_edges > self.max_edges and nb_path >= 1)
):
break
return GraphDomain(next_state_map, next_state_attributes, None, None)
def reachable_states(self, s0: Any):
"""Computes all states reachable from s0."""
result = {s0}
stack = [s0]
domain = self._domain
while len(stack) > 0:
if not len(result) % 100:
print("Expanded {} states.".format(len(result)))
s = stack.pop()
if domain.is_terminal(s):
continue
# Add successors
actions = domain.get_applicable_actions(s).get_elements()
for action in actions:
successors = domain.get_next_state_distribution(s, action).get_values()
for succ, prob in successors:
if prob != 0 and succ not in result:
result.add(succ)
stack.append(succ)
return result | 0.71413 | 0.256203 |
from __future__ import annotations
from typing import Any
from skdecide import DeterministicPlanningDomain, Memory
from skdecide.hub.solver.graph_explorer.GraphDomain import GraphDomain
from skdecide.hub.solver.graph_explorer.GraphExploration import GraphExploration
class DFSExploration(GraphExploration):
def __init__(
self,
domain: DeterministicPlanningDomain,
max_edges=None,
max_nodes=None,
max_path=None,
):
self.domain = domain
self.max_edges = max_edges
self.max_nodes = max_nodes
self.max_path = max_path
def build_graph_domain(
self, init_state: Any = None, transition_extractor=None, verbose=True
) -> GraphDomain:
if transition_extractor is None:
transition_extractor = lambda s, a, s_prime: {
"cost": self.domain.get_transition_value(s, a, s_prime).cost
}
next_state_map = {}
next_state_attributes = {}
if init_state is None:
init_state = self.domain.get_initial_state()
stack = [(init_state, [init_state])]
nb_nodes = 1
nb_edges = 0
nb_path = 0
next_state_map[init_state] = {}
next_state_attributes[init_state] = {}
paths_dict = {}
while stack:
(vertex, path) = stack.pop()
actions = self.domain.get_applicable_actions(vertex).get_elements()
for action in actions:
next = self.domain.get_next_state(Memory([vertex]), action)
if action not in next_state_map[vertex]:
nb_edges += 1
else:
continue
next_state_map[vertex][action] = next
next_state_attributes[vertex][action] = transition_extractor(
vertex, action, next
)
if self.domain.is_goal(next):
nb_path += 1
if verbose:
print(nb_path, " / ", self.max_path)
print("nodes ", nb_nodes, " / ", self.max_nodes)
print("edges ", nb_edges, " / ", self.max_edges)
else:
if next not in next_state_map:
stack.append((next, path + [next]))
paths_dict[next] = set(tuple(path + [next]))
# else:
# if tuple(path+[next]) not in paths_dict[next]:
# stack.append((next, path + [next]))
# paths_dict[next].add(tuple(path + [next]))
if next not in next_state_map:
next_state_map[next] = {}
next_state_attributes[next] = {}
nb_nodes += 1
if (
nb_path > self.max_path
or (nb_nodes > self.max_nodes and nb_path >= 1)
or (nb_edges > self.max_edges and nb_path >= 1)
):
break
return GraphDomain(next_state_map, next_state_attributes, None, None) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/graph_explorer/DFSExploration.py | DFSExploration.py |
from __future__ import annotations
from typing import Any
from skdecide import DeterministicPlanningDomain, Memory
from skdecide.hub.solver.graph_explorer.GraphDomain import GraphDomain
from skdecide.hub.solver.graph_explorer.GraphExploration import GraphExploration
class DFSExploration(GraphExploration):
def __init__(
self,
domain: DeterministicPlanningDomain,
max_edges=None,
max_nodes=None,
max_path=None,
):
self.domain = domain
self.max_edges = max_edges
self.max_nodes = max_nodes
self.max_path = max_path
def build_graph_domain(
self, init_state: Any = None, transition_extractor=None, verbose=True
) -> GraphDomain:
if transition_extractor is None:
transition_extractor = lambda s, a, s_prime: {
"cost": self.domain.get_transition_value(s, a, s_prime).cost
}
next_state_map = {}
next_state_attributes = {}
if init_state is None:
init_state = self.domain.get_initial_state()
stack = [(init_state, [init_state])]
nb_nodes = 1
nb_edges = 0
nb_path = 0
next_state_map[init_state] = {}
next_state_attributes[init_state] = {}
paths_dict = {}
while stack:
(vertex, path) = stack.pop()
actions = self.domain.get_applicable_actions(vertex).get_elements()
for action in actions:
next = self.domain.get_next_state(Memory([vertex]), action)
if action not in next_state_map[vertex]:
nb_edges += 1
else:
continue
next_state_map[vertex][action] = next
next_state_attributes[vertex][action] = transition_extractor(
vertex, action, next
)
if self.domain.is_goal(next):
nb_path += 1
if verbose:
print(nb_path, " / ", self.max_path)
print("nodes ", nb_nodes, " / ", self.max_nodes)
print("edges ", nb_edges, " / ", self.max_edges)
else:
if next not in next_state_map:
stack.append((next, path + [next]))
paths_dict[next] = set(tuple(path + [next]))
# else:
# if tuple(path+[next]) not in paths_dict[next]:
# stack.append((next, path + [next]))
# paths_dict[next].add(tuple(path + [next]))
if next not in next_state_map:
next_state_map[next] = {}
next_state_attributes[next] = {}
nb_nodes += 1
if (
nb_path > self.max_path
or (nb_nodes > self.max_nodes and nb_path >= 1)
or (nb_edges > self.max_edges and nb_path >= 1)
):
break
return GraphDomain(next_state_map, next_state_attributes, None, None) | 0.516595 | 0.205296 |
from __future__ import annotations
import os
import sys
from typing import Any, Callable, Dict, List, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicInitialized,
Environment,
FullyObservable,
Markovian,
Rewards,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _RIWSolver_ as riw_solver
class D(
Domain,
SingleAgent,
Sequential,
Environment,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
): # TODO: check why DeterministicInitialized & PositiveCosts/Rewards?
pass
class RIW(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
state_features: Callable[[Domain, D.T_state], Any],
use_state_feature_hash: bool = False,
use_simulation_domain: bool = False,
time_budget: int = 3600000,
rollout_budget: int = 100000,
max_depth: int = 1000,
exploration: float = 0.25,
epsilon_moving_average_window: int = 100,
epsilon: float = 0.001,
discount: float = 1.0,
online_node_garbage: bool = False,
continuous_planning: bool = True,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
watchdog: Callable[[int, int, float, float], bool] = None,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._domain = None
self._state_features = state_features
self._use_state_feature_hash = use_state_feature_hash
self._use_simulation_domain = use_simulation_domain
self._time_budget = time_budget
self._rollout_budget = rollout_budget
self._max_depth = max_depth
self._exploration = exploration
self._epsilon_moving_average_window = epsilon_moving_average_window
self._epsilon = epsilon
self._discount = discount
self._online_node_garbage = online_node_garbage
self._continuous_planning = continuous_planning
self._debug_logs = debug_logs
if watchdog is None:
self._watchdog = (
lambda elapsed_time, number_rollouts, best_value, epsilon_moving_average: True
)
else:
self._watchdog = watchdog
self._lambdas = [self._state_features]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], D]) -> None:
self._domain_factory = domain_factory
self._solver = riw_solver(
domain=self.get_domain(),
state_features=lambda d, s, i=None: self._state_features(d, s)
if not self._parallel
else d.call(i, 0, s),
use_state_feature_hash=self._use_state_feature_hash,
use_simulation_domain=self._use_simulation_domain,
time_budget=self._time_budget,
rollout_budget=self._rollout_budget,
max_depth=self._max_depth,
exploration=self._exploration,
epsilon_moving_average_window=self._epsilon_moving_average_window,
epsilon=self._epsilon,
discount=self._discount,
online_node_garbage=self._online_node_garbage,
parallel=self._parallel,
debug_logs=self._debug_logs,
watchdog=self._watchdog,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if self._continuous_planning or not self._is_solution_defined_for(
observation
):
self._solve_from(observation)
action = self._solver.get_next_action(observation)
if action is None:
print(
"\x1b[3;33;40m"
+ "No best action found in observation "
+ str(observation)
+ ", applying random action"
+ "\x1b[0m"
)
return self.call_domain_method("get_action_space").sample()
else:
return action
def _reset(self) -> None:
self._solver.clear()
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def get_nb_of_pruned_states(self) -> int:
return self._solver.get_nb_of_pruned_states()
def get_nb_rollouts(self) -> int:
return self._solver.get_nb_rollouts()
def get_policy(
self,
) -> Dict[
D.T_agent[D.T_observation],
Tuple[D.T_agent[D.T_concurrency[D.T_event]], float],
]:
return self._solver.get_policy()
def get_action_prefix(self) -> List[D.T_agent[D.T_observation]]:
return self._solver.get_action_prefix()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/riw/riw.py | riw.py |
from __future__ import annotations
import os
import sys
from typing import Any, Callable, Dict, List, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicInitialized,
Environment,
FullyObservable,
Markovian,
Rewards,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _RIWSolver_ as riw_solver
class D(
Domain,
SingleAgent,
Sequential,
Environment,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
): # TODO: check why DeterministicInitialized & PositiveCosts/Rewards?
pass
class RIW(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
state_features: Callable[[Domain, D.T_state], Any],
use_state_feature_hash: bool = False,
use_simulation_domain: bool = False,
time_budget: int = 3600000,
rollout_budget: int = 100000,
max_depth: int = 1000,
exploration: float = 0.25,
epsilon_moving_average_window: int = 100,
epsilon: float = 0.001,
discount: float = 1.0,
online_node_garbage: bool = False,
continuous_planning: bool = True,
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
watchdog: Callable[[int, int, float, float], bool] = None,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._domain = None
self._state_features = state_features
self._use_state_feature_hash = use_state_feature_hash
self._use_simulation_domain = use_simulation_domain
self._time_budget = time_budget
self._rollout_budget = rollout_budget
self._max_depth = max_depth
self._exploration = exploration
self._epsilon_moving_average_window = epsilon_moving_average_window
self._epsilon = epsilon
self._discount = discount
self._online_node_garbage = online_node_garbage
self._continuous_planning = continuous_planning
self._debug_logs = debug_logs
if watchdog is None:
self._watchdog = (
lambda elapsed_time, number_rollouts, best_value, epsilon_moving_average: True
)
else:
self._watchdog = watchdog
self._lambdas = [self._state_features]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], D]) -> None:
self._domain_factory = domain_factory
self._solver = riw_solver(
domain=self.get_domain(),
state_features=lambda d, s, i=None: self._state_features(d, s)
if not self._parallel
else d.call(i, 0, s),
use_state_feature_hash=self._use_state_feature_hash,
use_simulation_domain=self._use_simulation_domain,
time_budget=self._time_budget,
rollout_budget=self._rollout_budget,
max_depth=self._max_depth,
exploration=self._exploration,
epsilon_moving_average_window=self._epsilon_moving_average_window,
epsilon=self._epsilon,
discount=self._discount,
online_node_garbage=self._online_node_garbage,
parallel=self._parallel,
debug_logs=self._debug_logs,
watchdog=self._watchdog,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if self._continuous_planning or not self._is_solution_defined_for(
observation
):
self._solve_from(observation)
action = self._solver.get_next_action(observation)
if action is None:
print(
"\x1b[3;33;40m"
+ "No best action found in observation "
+ str(observation)
+ ", applying random action"
+ "\x1b[0m"
)
return self.call_domain_method("get_action_space").sample()
else:
return action
def _reset(self) -> None:
self._solver.clear()
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def get_nb_of_pruned_states(self) -> int:
return self._solver.get_nb_of_pruned_states()
def get_nb_rollouts(self) -> int:
return self._solver.get_nb_rollouts()
def get_policy(
self,
) -> Dict[
D.T_agent[D.T_observation],
Tuple[D.T_agent[D.T_concurrency[D.T_event]], float],
]:
return self._solver.get_policy()
def get_action_prefix(self) -> List[D.T_agent[D.T_observation]]:
return self._solver.get_action_prefix()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | 0.556641 | 0.173849 |
from __future__ import annotations
from typing import Callable, Optional
from skdecide import Domain, Solver, Value
from skdecide.builders.domain import (
Actions,
DeterministicTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, Utilities
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class LRTAstar(Solver, DeterministicPolicies, Utilities):
T_domain = D
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self._policy.get(observation, None)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return observation is self._policy
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
if observation not in self.values:
return self._heuristic(self._domain, observation).cost
return self.values[observation]
def __init__(
self,
from_state: Optional[D.T_state] = None,
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
weight: float = 1.0,
verbose: bool = False,
max_iter=5000,
max_depth=200,
) -> None:
self._from_state = from_state
self._heuristic = (
(lambda _, __: Value(cost=0.0)) if heuristic is None else heuristic
)
self._weight = weight
self.max_iter = max_iter
self.max_depth = max_depth
self._plan = []
self.values = {}
self._verbose = verbose
self.heuristic_changed = False
self._policy = {}
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._domain = domain_factory()
self.values = {}
iteration = 0
best_cost = float("inf")
if self._from_state is None:
# get initial observation from domain (assuming DeterministicInitialized)
from_observation = self._domain.get_initial_state()
else:
from_observation = self._from_state
# best_path = None
while True:
print(from_observation)
dead_end, cumulated_cost, current_roll, list_action = self.doTrial(
from_observation
)
if self._verbose:
print(
"iter ",
iteration,
"/",
self.max_iter,
" : dead end, ",
dead_end,
" cost : ",
cumulated_cost,
)
if not dead_end and cumulated_cost < best_cost:
best_cost = cumulated_cost
# best_path = current_roll
for k in range(len(current_roll)):
self._policy[current_roll[k][0]] = current_roll[k][1]["action"]
if not self.heuristic_changed:
print(self.heuristic_changed)
return
iteration += 1
if iteration > self.max_iter:
return
def doTrial(self, from_observation: D.T_agent[D.T_observation]):
list_action = []
current_state = from_observation
depth = 0
dead_end = False
cumulated_reward = 0.0
current_roll = [current_state]
current_roll_and_action = []
self.heuristic_changed = False
while (not self._domain.is_goal(current_state)) and (depth < self.max_depth):
next_action = None
next_state = None
best_estimated_cost = float("inf")
applicable_actions = self._domain.get_applicable_actions(current_state)
for action in applicable_actions.get_elements():
st = self._domain.get_next_state(current_state, action)
r = self._domain.get_transition_value(current_state, action, st).cost
if st in current_roll:
continue
if st not in self.values:
self.values[st] = self._heuristic(self._domain, st).cost
if r + self.values[st] < best_estimated_cost:
next_state = st
next_action = action
best_estimated_cost = r + self.values[st]
if next_action is None:
self.values[current_state] = float("inf")
dead_end = True
self.heuristic_changed = True
break
else:
if (not current_state in self.values) or (
self.values[current_state] != best_estimated_cost
):
self.heuristic_changed = True
self.values[current_state] = best_estimated_cost
cumulated_reward += best_estimated_cost - (
self.values[next_state]
if next_state in self.values
else self._heuristic(self._domain, next_state).cost
)
list_action.append(next_action)
current_roll_and_action.append((current_state, {"action": next_action}))
current_state = next_state
depth += 1
current_roll.append(current_state)
current_roll_and_action.append((current_state, {"action": None}))
cumulated_reward += self.values[current_state]
return dead_end, cumulated_reward, current_roll_and_action, list_action | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/lrtastar/lrtastar.py | lrtastar.py |
from __future__ import annotations
from typing import Callable, Optional
from skdecide import Domain, Solver, Value
from skdecide.builders.domain import (
Actions,
DeterministicTransitions,
FullyObservable,
Goals,
Markovian,
PositiveCosts,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, Utilities
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
Goals,
Markovian,
FullyObservable,
PositiveCosts,
):
pass
class LRTAstar(Solver, DeterministicPolicies, Utilities):
T_domain = D
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self._policy.get(observation, None)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return observation is self._policy
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
if observation not in self.values:
return self._heuristic(self._domain, observation).cost
return self.values[observation]
def __init__(
self,
from_state: Optional[D.T_state] = None,
heuristic: Optional[
Callable[[Domain, D.T_state], D.T_agent[Value[D.T_value]]]
] = None,
weight: float = 1.0,
verbose: bool = False,
max_iter=5000,
max_depth=200,
) -> None:
self._from_state = from_state
self._heuristic = (
(lambda _, __: Value(cost=0.0)) if heuristic is None else heuristic
)
self._weight = weight
self.max_iter = max_iter
self.max_depth = max_depth
self._plan = []
self.values = {}
self._verbose = verbose
self.heuristic_changed = False
self._policy = {}
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._domain = domain_factory()
self.values = {}
iteration = 0
best_cost = float("inf")
if self._from_state is None:
# get initial observation from domain (assuming DeterministicInitialized)
from_observation = self._domain.get_initial_state()
else:
from_observation = self._from_state
# best_path = None
while True:
print(from_observation)
dead_end, cumulated_cost, current_roll, list_action = self.doTrial(
from_observation
)
if self._verbose:
print(
"iter ",
iteration,
"/",
self.max_iter,
" : dead end, ",
dead_end,
" cost : ",
cumulated_cost,
)
if not dead_end and cumulated_cost < best_cost:
best_cost = cumulated_cost
# best_path = current_roll
for k in range(len(current_roll)):
self._policy[current_roll[k][0]] = current_roll[k][1]["action"]
if not self.heuristic_changed:
print(self.heuristic_changed)
return
iteration += 1
if iteration > self.max_iter:
return
def doTrial(self, from_observation: D.T_agent[D.T_observation]):
list_action = []
current_state = from_observation
depth = 0
dead_end = False
cumulated_reward = 0.0
current_roll = [current_state]
current_roll_and_action = []
self.heuristic_changed = False
while (not self._domain.is_goal(current_state)) and (depth < self.max_depth):
next_action = None
next_state = None
best_estimated_cost = float("inf")
applicable_actions = self._domain.get_applicable_actions(current_state)
for action in applicable_actions.get_elements():
st = self._domain.get_next_state(current_state, action)
r = self._domain.get_transition_value(current_state, action, st).cost
if st in current_roll:
continue
if st not in self.values:
self.values[st] = self._heuristic(self._domain, st).cost
if r + self.values[st] < best_estimated_cost:
next_state = st
next_action = action
best_estimated_cost = r + self.values[st]
if next_action is None:
self.values[current_state] = float("inf")
dead_end = True
self.heuristic_changed = True
break
else:
if (not current_state in self.values) or (
self.values[current_state] != best_estimated_cost
):
self.heuristic_changed = True
self.values[current_state] = best_estimated_cost
cumulated_reward += best_estimated_cost - (
self.values[next_state]
if next_state in self.values
else self._heuristic(self._domain, next_state).cost
)
list_action.append(next_action)
current_roll_and_action.append((current_state, {"action": next_action}))
current_state = next_state
depth += 1
current_roll.append(current_state)
current_roll_and_action.append((current_state, {"action": None}))
cumulated_reward += self.values[current_state]
return dead_end, cumulated_reward, current_roll_and_action, list_action | 0.754237 | 0.342929 |
from __future__ import annotations
from enum import Enum
from functools import partial
from typing import Dict, List, Optional, Union
from skdecide.builders.domain.scheduling.scheduling_domains import (
D,
MultiModeRCPSP,
SchedulingDomain,
SingleModeRCPSP,
)
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import (
SchedulingAction,
SchedulingActionEnum,
State,
)
from skdecide.builders.solver.policy import DeterministicPolicies
class BasePolicyMethod(Enum):
FOLLOW_GANTT = 0
SGS_PRECEDENCE = 1
SGS_READY = 2
SGS_STRICT = 3
SGS_TIME_FREEDOM = 4
SGS_INDEX_FREEDOM = 5
PILE = 6
class PolicyMethodParams:
def __init__(
self,
base_policy_method: BasePolicyMethod,
delta_time_freedom=10,
delta_index_freedom=10,
):
self.base_policy_method = base_policy_method
self.delta_time_freedom = delta_time_freedom
self.delta_index_freedom = delta_index_freedom
class PolicyRCPSP(DeterministicPolicies):
T_domain = D
def __init__(
self,
domain: SchedulingDomain,
policy_method_params: PolicyMethodParams,
permutation_task: List[int],
modes_dictionnary: Dict[int, int],
schedule: Optional[
Dict[int, Dict[str, int]]
] = None, # {id: {"start_time":, "end_time"}}
resource_allocation: Optional[Dict[int, List[str]]] = None,
resource_allocation_priority: Optional[Dict[int, List[str]]] = None,
):
self.domain = domain
self.policy_method_params = policy_method_params
self.permutation_task = permutation_task
self.modes_dictionnary = modes_dictionnary
self.schedule = schedule
self.store_start_date = {}
if self.schedule is not None:
for task_id in self.schedule:
start_date = self.schedule[task_id]["start_time"]
if start_date not in self.store_start_date:
self.store_start_date[start_date] = set()
self.store_start_date[start_date].add(task_id)
self.resource_allocation = resource_allocation
self.resource_allocation_priority = resource_allocation_priority
self.build_function()
def reset(self):
pass
def build_function(self):
func = partial(
map_method_to_function[self.policy_method_params.base_policy_method],
policy_rcpsp=self,
check_if_applicable=False,
domain_sk_decide=self.domain,
delta_time_freedom=self.policy_method_params.delta_time_freedom,
delta_index_freedom=self.policy_method_params.delta_index_freedom,
)
self.func = func
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self.func(state=observation)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def action_in_applicable_actions(
domain_sk_decide,
observation: D.T_agent[D.T_observation],
the_action: SchedulingAction,
):
return domain_sk_decide.check_if_action_can_be_started(observation, the_action)
def next_action_follow_static_gantt(
policy_rcpsp: PolicyRCPSP, state: State, check_if_applicable: bool = False, **kwargs
):
obs: State = state
t = obs.t
ongoing_task = obs.tasks_ongoing
complete_task = obs.tasks_complete
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
if t in policy_rcpsp.store_start_date:
tasks = [
task
for task in policy_rcpsp.store_start_date[t]
if task not in ongoing_task
and task not in complete_task
and task in possible_task_to_launch
]
if len(tasks) > 0:
the_action = SchedulingAction(
task=tasks[0],
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tasks[0]],
time_progress=False,
resource_unit_names=None,
)
if policy_rcpsp.resource_allocation is not None:
if tasks[0] in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tasks[0]
]
if True:
action_available = action_in_applicable_actions(
policy_rcpsp.domain, state, the_action
)
if not action_available[0]:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_first_task_precedence_ready(
policy_rcpsp: PolicyRCPSP, state: State, check_if_applicable: bool = False, **kwargs
):
obs: State = state
next_task_to_launch = None
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
],
key=lambda x: x[0],
)
for i in range(len(sorted_task_not_done)):
task = sorted_task_not_done[i][1]
if task in possible_task_to_launch:
next_task_to_launch = task
break
if next_task_to_launch is not None:
if policy_rcpsp.schedule is not None:
original_time_start_task = policy_rcpsp.schedule[next_task_to_launch][
"start_time"
]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if task_id != next_task_to_launch
and policy_rcpsp.schedule[task_id]["start_time"]
== original_time_start_task
and task_id in possible_task_to_launch
]
else:
other_tasks_same_time = []
tasks_of_interest = [next_task_to_launch] + other_tasks_same_time
the_action = None
for tinterest in tasks_of_interest:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
domain_sk_decide=policy_rcpsp.domain,
observation=state,
the_action=the_action,
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
else:
return SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
def next_action_sgs_first_task_ready(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
**kwargs,
):
obs: State = state
t = obs.t
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
],
key=lambda x: x[0],
)
next_task_to_launch = None
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
for i in range(len(sorted_task_not_done)):
task = sorted_task_not_done[i][1]
if task not in possible_task_to_launch:
continue
the_action = SchedulingAction(
task=task,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[task],
time_progress=False,
resource_unit_names=None,
)
if policy_rcpsp.resource_allocation is not None:
if task in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[task]
action_available = action_in_applicable_actions(
policy_rcpsp.domain, state, the_action
)
if action_available[0]:
return the_action
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_strict(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
**kwargs,
):
obs: State = state
t = obs.t
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
and policy_rcpsp.permutation_task[index] in possible_task_to_launch
],
key=lambda x: x[0],
)
the_action = None
if len(sorted_task_not_done) > 0:
other_tasks_same_time = [sorted_task_not_done[0][1]]
if policy_rcpsp.schedule is not None:
scheduled_time = policy_rcpsp.schedule[sorted_task_not_done[0][1]][
"start_time"
]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if policy_rcpsp.schedule[task_id]["start_time"] == scheduled_time
]
for tinterest in other_tasks_same_time:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
) # start_tasks=[tinterest], advance_time=False)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
policy_rcpsp.domain, observation=state, the_action=the_action
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_time_freedom(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
delta_time_freedom: int = 10,
**kwargs,
):
obs: State = state
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
and policy_rcpsp.permutation_task[index] in possible_task_to_launch
],
key=lambda x: x[0],
)
the_action = None
if len(sorted_task_not_done) > 0:
other_tasks_same_time = [sorted_task_not_done[0][1]]
if policy_rcpsp.schedule is not None:
scheduled_time = policy_rcpsp.schedule[sorted_task_not_done[0][1]][
"start_time"
]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if scheduled_time
<= policy_rcpsp.schedule[task_id]["start_time"]
<= scheduled_time + delta_time_freedom
]
for tinterest in other_tasks_same_time:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
) # start_tasks=[tinterest], advance_time=False)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
policy_rcpsp.domain, observation=state, the_action=the_action
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_index_freedom(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
delta_index_freedom: int = 10,
**kwargs,
):
obs: State = state
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
and policy_rcpsp.permutation_task[index] in possible_task_to_launch
],
key=lambda x: x[0],
)
the_action = None
if len(sorted_task_not_done) > 0:
index_t = sorted_task_not_done[0][0]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if ind <= index_t + delta_index_freedom
]
for tinterest in other_tasks_same_time:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
) # start_tasks=[tinterest], advance_time=False)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
policy_rcpsp.domain, observation=state, the_action=the_action
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
map_method_to_function = {
BasePolicyMethod.FOLLOW_GANTT: next_action_follow_static_gantt,
BasePolicyMethod.SGS_PRECEDENCE: next_action_sgs_first_task_precedence_ready,
BasePolicyMethod.SGS_STRICT: next_action_sgs_strict,
BasePolicyMethod.SGS_READY: next_action_sgs_first_task_ready,
BasePolicyMethod.SGS_TIME_FREEDOM: next_action_sgs_time_freedom,
BasePolicyMethod.SGS_INDEX_FREEDOM: next_action_sgs_index_freedom,
} | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/sgs_policies/sgs_policies.py | sgs_policies.py |
from __future__ import annotations
from enum import Enum
from functools import partial
from typing import Dict, List, Optional, Union
from skdecide.builders.domain.scheduling.scheduling_domains import (
D,
MultiModeRCPSP,
SchedulingDomain,
SingleModeRCPSP,
)
from skdecide.builders.domain.scheduling.scheduling_domains_modelling import (
SchedulingAction,
SchedulingActionEnum,
State,
)
from skdecide.builders.solver.policy import DeterministicPolicies
class BasePolicyMethod(Enum):
FOLLOW_GANTT = 0
SGS_PRECEDENCE = 1
SGS_READY = 2
SGS_STRICT = 3
SGS_TIME_FREEDOM = 4
SGS_INDEX_FREEDOM = 5
PILE = 6
class PolicyMethodParams:
def __init__(
self,
base_policy_method: BasePolicyMethod,
delta_time_freedom=10,
delta_index_freedom=10,
):
self.base_policy_method = base_policy_method
self.delta_time_freedom = delta_time_freedom
self.delta_index_freedom = delta_index_freedom
class PolicyRCPSP(DeterministicPolicies):
T_domain = D
def __init__(
self,
domain: SchedulingDomain,
policy_method_params: PolicyMethodParams,
permutation_task: List[int],
modes_dictionnary: Dict[int, int],
schedule: Optional[
Dict[int, Dict[str, int]]
] = None, # {id: {"start_time":, "end_time"}}
resource_allocation: Optional[Dict[int, List[str]]] = None,
resource_allocation_priority: Optional[Dict[int, List[str]]] = None,
):
self.domain = domain
self.policy_method_params = policy_method_params
self.permutation_task = permutation_task
self.modes_dictionnary = modes_dictionnary
self.schedule = schedule
self.store_start_date = {}
if self.schedule is not None:
for task_id in self.schedule:
start_date = self.schedule[task_id]["start_time"]
if start_date not in self.store_start_date:
self.store_start_date[start_date] = set()
self.store_start_date[start_date].add(task_id)
self.resource_allocation = resource_allocation
self.resource_allocation_priority = resource_allocation_priority
self.build_function()
def reset(self):
pass
def build_function(self):
func = partial(
map_method_to_function[self.policy_method_params.base_policy_method],
policy_rcpsp=self,
check_if_applicable=False,
domain_sk_decide=self.domain,
delta_time_freedom=self.policy_method_params.delta_time_freedom,
delta_index_freedom=self.policy_method_params.delta_index_freedom,
)
self.func = func
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
return self.func(state=observation)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def action_in_applicable_actions(
domain_sk_decide,
observation: D.T_agent[D.T_observation],
the_action: SchedulingAction,
):
return domain_sk_decide.check_if_action_can_be_started(observation, the_action)
def next_action_follow_static_gantt(
policy_rcpsp: PolicyRCPSP, state: State, check_if_applicable: bool = False, **kwargs
):
obs: State = state
t = obs.t
ongoing_task = obs.tasks_ongoing
complete_task = obs.tasks_complete
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
if t in policy_rcpsp.store_start_date:
tasks = [
task
for task in policy_rcpsp.store_start_date[t]
if task not in ongoing_task
and task not in complete_task
and task in possible_task_to_launch
]
if len(tasks) > 0:
the_action = SchedulingAction(
task=tasks[0],
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tasks[0]],
time_progress=False,
resource_unit_names=None,
)
if policy_rcpsp.resource_allocation is not None:
if tasks[0] in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tasks[0]
]
if True:
action_available = action_in_applicable_actions(
policy_rcpsp.domain, state, the_action
)
if not action_available[0]:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_first_task_precedence_ready(
policy_rcpsp: PolicyRCPSP, state: State, check_if_applicable: bool = False, **kwargs
):
obs: State = state
next_task_to_launch = None
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
],
key=lambda x: x[0],
)
for i in range(len(sorted_task_not_done)):
task = sorted_task_not_done[i][1]
if task in possible_task_to_launch:
next_task_to_launch = task
break
if next_task_to_launch is not None:
if policy_rcpsp.schedule is not None:
original_time_start_task = policy_rcpsp.schedule[next_task_to_launch][
"start_time"
]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if task_id != next_task_to_launch
and policy_rcpsp.schedule[task_id]["start_time"]
== original_time_start_task
and task_id in possible_task_to_launch
]
else:
other_tasks_same_time = []
tasks_of_interest = [next_task_to_launch] + other_tasks_same_time
the_action = None
for tinterest in tasks_of_interest:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
domain_sk_decide=policy_rcpsp.domain,
observation=state,
the_action=the_action,
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
else:
return SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
def next_action_sgs_first_task_ready(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
**kwargs,
):
obs: State = state
t = obs.t
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
],
key=lambda x: x[0],
)
next_task_to_launch = None
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
for i in range(len(sorted_task_not_done)):
task = sorted_task_not_done[i][1]
if task not in possible_task_to_launch:
continue
the_action = SchedulingAction(
task=task,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[task],
time_progress=False,
resource_unit_names=None,
)
if policy_rcpsp.resource_allocation is not None:
if task in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[task]
action_available = action_in_applicable_actions(
policy_rcpsp.domain, state, the_action
)
if action_available[0]:
return the_action
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_strict(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
**kwargs,
):
obs: State = state
t = obs.t
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
and policy_rcpsp.permutation_task[index] in possible_task_to_launch
],
key=lambda x: x[0],
)
the_action = None
if len(sorted_task_not_done) > 0:
other_tasks_same_time = [sorted_task_not_done[0][1]]
if policy_rcpsp.schedule is not None:
scheduled_time = policy_rcpsp.schedule[sorted_task_not_done[0][1]][
"start_time"
]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if policy_rcpsp.schedule[task_id]["start_time"] == scheduled_time
]
for tinterest in other_tasks_same_time:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
) # start_tasks=[tinterest], advance_time=False)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
policy_rcpsp.domain, observation=state, the_action=the_action
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_time_freedom(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
delta_time_freedom: int = 10,
**kwargs,
):
obs: State = state
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
and policy_rcpsp.permutation_task[index] in possible_task_to_launch
],
key=lambda x: x[0],
)
the_action = None
if len(sorted_task_not_done) > 0:
other_tasks_same_time = [sorted_task_not_done[0][1]]
if policy_rcpsp.schedule is not None:
scheduled_time = policy_rcpsp.schedule[sorted_task_not_done[0][1]][
"start_time"
]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if scheduled_time
<= policy_rcpsp.schedule[task_id]["start_time"]
<= scheduled_time + delta_time_freedom
]
for tinterest in other_tasks_same_time:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
) # start_tasks=[tinterest], advance_time=False)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
policy_rcpsp.domain, observation=state, the_action=the_action
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
def next_action_sgs_index_freedom(
policy_rcpsp: PolicyRCPSP,
state: State,
check_if_applicable: bool = False,
domain_sk_decide: Union[MultiModeRCPSP, SingleModeRCPSP] = None,
delta_index_freedom: int = 10,
**kwargs,
):
obs: State = state
possible_task_to_launch = policy_rcpsp.domain.task_possible_to_launch_precedence(
state=state
)
tasks_remaining = set(state.tasks_remaining)
sorted_task_not_done = sorted(
[
(index, policy_rcpsp.permutation_task[index])
for index in range(len(policy_rcpsp.permutation_task))
if policy_rcpsp.permutation_task[index] in tasks_remaining
and policy_rcpsp.permutation_task[index] in possible_task_to_launch
],
key=lambda x: x[0],
)
the_action = None
if len(sorted_task_not_done) > 0:
index_t = sorted_task_not_done[0][0]
other_tasks_same_time = [
task_id
for ind, task_id in sorted_task_not_done
if ind <= index_t + delta_index_freedom
]
for tinterest in other_tasks_same_time:
the_action = SchedulingAction(
task=tinterest,
action=SchedulingActionEnum.START,
mode=policy_rcpsp.modes_dictionnary[tinterest],
time_progress=False,
resource_unit_names=None,
) # start_tasks=[tinterest], advance_time=False)
if policy_rcpsp.resource_allocation is not None:
if tinterest in policy_rcpsp.resource_allocation:
the_action.resource_unit_names = policy_rcpsp.resource_allocation[
tinterest
]
applicable = action_in_applicable_actions(
policy_rcpsp.domain, observation=state, the_action=the_action
)
if applicable[0]:
break
else:
the_action = None
if the_action is None:
the_action = SchedulingAction(
task=None,
action=SchedulingActionEnum.TIME_PR,
mode=None,
time_progress=True,
resource_unit_names=None,
)
return the_action
map_method_to_function = {
BasePolicyMethod.FOLLOW_GANTT: next_action_follow_static_gantt,
BasePolicyMethod.SGS_PRECEDENCE: next_action_sgs_first_task_precedence_ready,
BasePolicyMethod.SGS_STRICT: next_action_sgs_strict,
BasePolicyMethod.SGS_READY: next_action_sgs_first_task_ready,
BasePolicyMethod.SGS_TIME_FREEDOM: next_action_sgs_time_freedom,
BasePolicyMethod.SGS_INDEX_FREEDOM: next_action_sgs_index_freedom,
} | 0.828245 | 0.164752 |
from __future__ import annotations
import glob
import os
from typing import Callable, Dict, Optional, Type
import ray
from ray.rllib.agents.trainer import Trainer
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
from skdecide import Domain, Solver
from skdecide.builders.domain import (
Initializable,
Sequential,
SingleAgent,
UnrestrictedActions,
)
from skdecide.builders.solver import Policies, Restorable
from skdecide.hub.space.gym import GymSpace
# TODO: remove UnrestrictedActions?
class D(Domain, Sequential, UnrestrictedActions, Initializable):
pass
class RayRLlib(Solver, Policies, Restorable):
"""This class wraps a Ray RLlib solver (ray[rllib]) as a scikit-decide solver.
!!! warning
Using this class requires Ray RLlib to be installed.
"""
T_domain = D
def __init__(
self,
algo_class: Type[Trainer],
train_iterations: int,
config: Optional[Dict] = None,
policy_configs: Dict[str, Dict] = {"policy": {}},
policy_mapping_fn: Callable[[str], str] = lambda agent_id: "policy",
) -> None:
"""Initialize Ray RLlib.
# Parameters
algo_class: The class of Ray RLlib trainer/agent to wrap.
train_iterations: The number of iterations to call the trainer's train() method.
config: The configuration dictionary for the trainer.
policy_configs: The mapping from policy id (str) to additional config (dict) (leave default for single policy).
policy_mapping_fn: The function mapping agent ids to policy ids (leave default for single policy).
"""
self._algo_class = algo_class
self._train_iterations = train_iterations
self._config = config or {}
self._policy_configs = policy_configs
self._policy_mapping_fn = policy_mapping_fn
ray.init(ignore_reinit_error=True)
@classmethod
def _check_domain_additional(cls, domain: Domain) -> bool:
if isinstance(domain, SingleAgent):
return isinstance(domain.get_action_space(), GymSpace) and isinstance(
domain.get_observation_space(), GymSpace
)
else:
return all(
isinstance(a, GymSpace) for a in domain.get_action_space().values()
) and all(
isinstance(o, GymSpace) for o in domain.get_observation_space().values()
)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
# Reuse algo if possible (enables further learning)
if not hasattr(self, "_algo"):
self._init_algo(domain_factory)
# Training loop
for _ in range(self._train_iterations):
self._algo.train()
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
action = {
k: self._algo.compute_action(
self._unwrap_obs(v, k), policy_id=self._policy_mapping_fn(k)
)
for k, v in observation.items()
}
return self._wrap_action(action)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def _save(self, path: str) -> None:
self._algo.save(path)
def _load(self, path: str, domain_factory: Callable[[], D]):
if not os.path.isfile(path):
# Find latest checkpoint
metadata_files = glob.glob(f"{path}/**/*.tune_metadata")
latest_metadata_file = max(metadata_files, key=os.path.getctime)
path = latest_metadata_file[: -len(".tune_metadata")]
self._init_algo(domain_factory)
self._algo.restore(path)
def _init_algo(self, domain_factory: Callable[[], D]):
domain = domain_factory()
self._wrap_action = lambda a: {
k: next(iter(domain.get_action_space()[k].from_unwrapped([v])))
for k, v in a.items()
}
self._unwrap_obs = lambda o, agent: next(
iter(domain.get_observation_space()[agent].to_unwrapped([o]))
)
# Overwrite multi-agent config
pol_obs_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_observation_space().items()
}
pol_act_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_action_space().items()
}
policies = {
k: (None, pol_obs_spaces[k], pol_act_spaces[k], v or {})
for k, v in self._policy_configs.items()
}
self._config["multiagent"] = {
"policies": policies,
"policy_mapping_fn": self._policy_mapping_fn,
}
# Instanciate algo
register_env("skdecide_env", lambda _: AsRLlibMultiAgentEnv(domain_factory()))
self._algo = self._algo_class(env="skdecide_env", config=self._config)
class AsRLlibMultiAgentEnv(MultiAgentEnv):
def __init__(self, domain: D) -> None:
"""Initialize AsRLlibMultiAgentEnv.
# Parameters
domain: The scikit-decide domain to wrap as a RLlib multi-agent environment.
"""
self._domain = domain
def reset(self):
"""Resets the env and returns observations from ready agents.
# Returns
obs (dict): New observations for each ready agent.
"""
raw_observation = self._domain.reset()
observation = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in raw_observation.items()
}
return observation
def step(self, action_dict):
"""Returns observations from ready agents.
The returns are dicts mapping from agent_id strings to values. The
number of agents in the env can vary over time.
# Returns
obs (dict): New observations for each ready agent.
rewards (dict): Reward values for each ready agent. If the episode is just started, the value will be None.
dones (dict): Done values for each ready agent. The special key "__all__" (required) is used to indicate env
termination.
infos (dict): Optional info values for each agent id.
"""
action = {
k: next(iter(self._domain.get_action_space()[k].from_unwrapped([v])))
for k, v in action_dict.items()
}
outcome = self._domain.step(action)
observations = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in outcome.observation.items()
}
rewards = {k: v.reward for k, v in outcome.value.items()}
done = {"__all__": outcome.termination}
infos = {k: (v or {}) for k, v in outcome.info.items()}
return observations, rewards, done, infos
def unwrapped(self):
"""Unwrap the scikit-decide domain and return it.
# Returns
The original scikit-decide domain.
"""
return self._domain
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
from skdecide.hub.domain.rock_paper_scissors import RockPaperScissors
from skdecide.utils import rollout
domain_factory = lambda: RockPaperScissors()
domain = domain_factory()
if RayRLlib.check_domain(domain):
solver_factory = lambda: RayRLlib(
PPOTrainer, train_iterations=1, config={"framework": "torch"}
)
solver = RockPaperScissors.solve_with(solver_factory, domain_factory)
rollout(
domain,
solver,
action_formatter=lambda a: str({k: v.name for k, v in a.items()}),
outcome_formatter=lambda o: f"{ {k: v.name for k, v in o.observation.items()} }"
f" - rewards: { {k: v.reward for k, v in o.value.items()} }",
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/ray_rllib/ray_rllib.py | ray_rllib.py |
from __future__ import annotations
import glob
import os
from typing import Callable, Dict, Optional, Type
import ray
from ray.rllib.agents.trainer import Trainer
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.tune.registry import register_env
from skdecide import Domain, Solver
from skdecide.builders.domain import (
Initializable,
Sequential,
SingleAgent,
UnrestrictedActions,
)
from skdecide.builders.solver import Policies, Restorable
from skdecide.hub.space.gym import GymSpace
# TODO: remove UnrestrictedActions?
class D(Domain, Sequential, UnrestrictedActions, Initializable):
pass
class RayRLlib(Solver, Policies, Restorable):
"""This class wraps a Ray RLlib solver (ray[rllib]) as a scikit-decide solver.
!!! warning
Using this class requires Ray RLlib to be installed.
"""
T_domain = D
def __init__(
self,
algo_class: Type[Trainer],
train_iterations: int,
config: Optional[Dict] = None,
policy_configs: Dict[str, Dict] = {"policy": {}},
policy_mapping_fn: Callable[[str], str] = lambda agent_id: "policy",
) -> None:
"""Initialize Ray RLlib.
# Parameters
algo_class: The class of Ray RLlib trainer/agent to wrap.
train_iterations: The number of iterations to call the trainer's train() method.
config: The configuration dictionary for the trainer.
policy_configs: The mapping from policy id (str) to additional config (dict) (leave default for single policy).
policy_mapping_fn: The function mapping agent ids to policy ids (leave default for single policy).
"""
self._algo_class = algo_class
self._train_iterations = train_iterations
self._config = config or {}
self._policy_configs = policy_configs
self._policy_mapping_fn = policy_mapping_fn
ray.init(ignore_reinit_error=True)
@classmethod
def _check_domain_additional(cls, domain: Domain) -> bool:
if isinstance(domain, SingleAgent):
return isinstance(domain.get_action_space(), GymSpace) and isinstance(
domain.get_observation_space(), GymSpace
)
else:
return all(
isinstance(a, GymSpace) for a in domain.get_action_space().values()
) and all(
isinstance(o, GymSpace) for o in domain.get_observation_space().values()
)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
# Reuse algo if possible (enables further learning)
if not hasattr(self, "_algo"):
self._init_algo(domain_factory)
# Training loop
for _ in range(self._train_iterations):
self._algo.train()
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
action = {
k: self._algo.compute_action(
self._unwrap_obs(v, k), policy_id=self._policy_mapping_fn(k)
)
for k, v in observation.items()
}
return self._wrap_action(action)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def _save(self, path: str) -> None:
self._algo.save(path)
def _load(self, path: str, domain_factory: Callable[[], D]):
if not os.path.isfile(path):
# Find latest checkpoint
metadata_files = glob.glob(f"{path}/**/*.tune_metadata")
latest_metadata_file = max(metadata_files, key=os.path.getctime)
path = latest_metadata_file[: -len(".tune_metadata")]
self._init_algo(domain_factory)
self._algo.restore(path)
def _init_algo(self, domain_factory: Callable[[], D]):
domain = domain_factory()
self._wrap_action = lambda a: {
k: next(iter(domain.get_action_space()[k].from_unwrapped([v])))
for k, v in a.items()
}
self._unwrap_obs = lambda o, agent: next(
iter(domain.get_observation_space()[agent].to_unwrapped([o]))
)
# Overwrite multi-agent config
pol_obs_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_observation_space().items()
}
pol_act_spaces = {
self._policy_mapping_fn(k): v.unwrapped()
for k, v in domain.get_action_space().items()
}
policies = {
k: (None, pol_obs_spaces[k], pol_act_spaces[k], v or {})
for k, v in self._policy_configs.items()
}
self._config["multiagent"] = {
"policies": policies,
"policy_mapping_fn": self._policy_mapping_fn,
}
# Instanciate algo
register_env("skdecide_env", lambda _: AsRLlibMultiAgentEnv(domain_factory()))
self._algo = self._algo_class(env="skdecide_env", config=self._config)
class AsRLlibMultiAgentEnv(MultiAgentEnv):
def __init__(self, domain: D) -> None:
"""Initialize AsRLlibMultiAgentEnv.
# Parameters
domain: The scikit-decide domain to wrap as a RLlib multi-agent environment.
"""
self._domain = domain
def reset(self):
"""Resets the env and returns observations from ready agents.
# Returns
obs (dict): New observations for each ready agent.
"""
raw_observation = self._domain.reset()
observation = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in raw_observation.items()
}
return observation
def step(self, action_dict):
"""Returns observations from ready agents.
The returns are dicts mapping from agent_id strings to values. The
number of agents in the env can vary over time.
# Returns
obs (dict): New observations for each ready agent.
rewards (dict): Reward values for each ready agent. If the episode is just started, the value will be None.
dones (dict): Done values for each ready agent. The special key "__all__" (required) is used to indicate env
termination.
infos (dict): Optional info values for each agent id.
"""
action = {
k: next(iter(self._domain.get_action_space()[k].from_unwrapped([v])))
for k, v in action_dict.items()
}
outcome = self._domain.step(action)
observations = {
k: next(iter(self._domain.get_observation_space()[k].to_unwrapped([v])))
for k, v in outcome.observation.items()
}
rewards = {k: v.reward for k, v in outcome.value.items()}
done = {"__all__": outcome.termination}
infos = {k: (v or {}) for k, v in outcome.info.items()}
return observations, rewards, done, infos
def unwrapped(self):
"""Unwrap the scikit-decide domain and return it.
# Returns
The original scikit-decide domain.
"""
return self._domain
if __name__ == "__main__":
from ray.rllib.agents.ppo import PPOTrainer
from skdecide.hub.domain.rock_paper_scissors import RockPaperScissors
from skdecide.utils import rollout
domain_factory = lambda: RockPaperScissors()
domain = domain_factory()
if RayRLlib.check_domain(domain):
solver_factory = lambda: RayRLlib(
PPOTrainer, train_iterations=1, config={"framework": "torch"}
)
solver = RockPaperScissors.solve_with(solver_factory, domain_factory)
rollout(
domain,
solver,
action_formatter=lambda a: str({k: v.name for k, v in a.items()}),
outcome_formatter=lambda o: f"{ {k: v.name for k, v in o.observation.items()} }"
f" - rewards: { {k: v.reward for k, v in o.value.items()} }",
) | 0.836955 | 0.240869 |
from __future__ import annotations
from typing import Callable
from skdecide import DeterministicPolicySolver, Domain, EnumerableSpace, Memory
from skdecide.builders.domain import EnumerableTransitions, FullyObservable, SingleAgent
class D(Domain, SingleAgent, EnumerableTransitions, FullyObservable):
pass
class SimpleGreedy(DeterministicPolicySolver):
T_domain = D
@classmethod
def _check_domain_additional(cls, domain: D) -> bool:
return isinstance(domain.get_action_space(), EnumerableSpace)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._domain = (
domain_factory()
) # no further solving code required here since everything is computed online
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
# This solver selects the first action with the highest expected immediate reward (greedy)
domain = self._domain
memory = Memory(
[observation]
) # note: observation == state (because FullyObservable)
applicable_actions = domain.get_applicable_actions(memory)
if domain.is_transition_value_dependent_on_next_state():
values = []
for a in applicable_actions.get_elements():
next_state_prob = domain.get_next_state_distribution(
memory, [a]
).get_values()
expected_value = sum(
p * domain.get_transition_value(memory, [a], s).reward
for s, p in next_state_prob
)
values.append(expected_value)
else:
values = [
domain.get_transition_value(memory, a).reward
for a in applicable_actions
]
argmax = max(range(len(values)), key=lambda i: values[i])
return [
applicable_actions.get_elements()[argmax]
] # list of action here because we handle Parallel domains
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/simple_greedy/simple_greedy.py | simple_greedy.py |
from __future__ import annotations
from typing import Callable
from skdecide import DeterministicPolicySolver, Domain, EnumerableSpace, Memory
from skdecide.builders.domain import EnumerableTransitions, FullyObservable, SingleAgent
class D(Domain, SingleAgent, EnumerableTransitions, FullyObservable):
pass
class SimpleGreedy(DeterministicPolicySolver):
T_domain = D
@classmethod
def _check_domain_additional(cls, domain: D) -> bool:
return isinstance(domain.get_action_space(), EnumerableSpace)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._domain = (
domain_factory()
) # no further solving code required here since everything is computed online
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
# This solver selects the first action with the highest expected immediate reward (greedy)
domain = self._domain
memory = Memory(
[observation]
) # note: observation == state (because FullyObservable)
applicable_actions = domain.get_applicable_actions(memory)
if domain.is_transition_value_dependent_on_next_state():
values = []
for a in applicable_actions.get_elements():
next_state_prob = domain.get_next_state_distribution(
memory, [a]
).get_values()
expected_value = sum(
p * domain.get_transition_value(memory, [a], s).reward
for s, p in next_state_prob
)
values.append(expected_value)
else:
values = [
domain.get_transition_value(memory, a).reward
for a in applicable_actions
]
argmax = max(range(len(values)), key=lambda i: values[i])
return [
applicable_actions.get_elements()[argmax]
] # list of action here because we handle Parallel domains
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True | 0.927096 | 0.435781 |
from __future__ import annotations
from typing import Any, Callable, Dict
from stable_baselines3.common.vec_env import DummyVecEnv
from skdecide import Domain, Solver
from skdecide.builders.domain import (
Initializable,
Sequential,
SingleAgent,
UnrestrictedActions,
)
from skdecide.builders.solver import Policies, Restorable
from skdecide.hub.domain.gym import AsGymEnv
from skdecide.hub.space.gym import GymSpace
class D(Domain, SingleAgent, Sequential, UnrestrictedActions, Initializable):
pass
class StableBaseline(Solver, Policies, Restorable):
"""This class wraps a stable OpenAI Baselines solver (stable_baselines3) as a scikit-decide solver.
!!! warning
Using this class requires Stable Baselines 3 to be installed.
"""
T_domain = D
def __init__(
self,
algo_class: type,
baselines_policy: Any,
learn_config: Dict = None,
**kwargs: Any,
) -> None:
"""Initialize StableBaselines.
# Parameters
algo_class: The class of Baselines solver (stable_baselines3) to wrap.
baselines_policy: The class of Baselines policy network (stable_baselines3.common.policies or str) to use.
"""
self._algo_class = algo_class
self._baselines_policy = baselines_policy
self._learn_config = learn_config if learn_config is not None else {}
self._algo_kwargs = kwargs
@classmethod
def _check_domain_additional(cls, domain: Domain) -> bool:
return isinstance(domain.get_action_space(), GymSpace) and isinstance(
domain.get_observation_space(), GymSpace
)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
# TODO: improve code for parallelism
# (https://stable-baselines3.readthedocs.io/en/master/guide/examples.html
# #multiprocessing-unleashing-the-power-of-vectorized-environments)?
if not hasattr(
self, "_algo"
): # reuse algo if possible (enables further learning)
domain = domain_factory()
env = DummyVecEnv(
[lambda: AsGymEnv(domain)]
) # the algorithms require a vectorized environment to run
self._algo = self._algo_class(
self._baselines_policy, env, **self._algo_kwargs
)
self._init_algo(domain)
self._algo.learn(**self._learn_config)
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
action, _ = self._algo.predict(observation)
return self._wrap_action(action)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def _save(self, path: str) -> None:
self._algo.save(path)
def _load(self, path: str, domain_factory: Callable[[], D]):
self._algo = self._algo_class.load(path)
self._init_algo(domain_factory())
def _init_algo(self, domain: D):
self._wrap_action = lambda a: next(
iter(domain.get_action_space().from_unwrapped([a]))
) | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/stable_baselines/stable_baselines.py | stable_baselines.py |
from __future__ import annotations
from typing import Any, Callable, Dict
from stable_baselines3.common.vec_env import DummyVecEnv
from skdecide import Domain, Solver
from skdecide.builders.domain import (
Initializable,
Sequential,
SingleAgent,
UnrestrictedActions,
)
from skdecide.builders.solver import Policies, Restorable
from skdecide.hub.domain.gym import AsGymEnv
from skdecide.hub.space.gym import GymSpace
class D(Domain, SingleAgent, Sequential, UnrestrictedActions, Initializable):
pass
class StableBaseline(Solver, Policies, Restorable):
"""This class wraps a stable OpenAI Baselines solver (stable_baselines3) as a scikit-decide solver.
!!! warning
Using this class requires Stable Baselines 3 to be installed.
"""
T_domain = D
def __init__(
self,
algo_class: type,
baselines_policy: Any,
learn_config: Dict = None,
**kwargs: Any,
) -> None:
"""Initialize StableBaselines.
# Parameters
algo_class: The class of Baselines solver (stable_baselines3) to wrap.
baselines_policy: The class of Baselines policy network (stable_baselines3.common.policies or str) to use.
"""
self._algo_class = algo_class
self._baselines_policy = baselines_policy
self._learn_config = learn_config if learn_config is not None else {}
self._algo_kwargs = kwargs
@classmethod
def _check_domain_additional(cls, domain: Domain) -> bool:
return isinstance(domain.get_action_space(), GymSpace) and isinstance(
domain.get_observation_space(), GymSpace
)
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
# TODO: improve code for parallelism
# (https://stable-baselines3.readthedocs.io/en/master/guide/examples.html
# #multiprocessing-unleashing-the-power-of-vectorized-environments)?
if not hasattr(
self, "_algo"
): # reuse algo if possible (enables further learning)
domain = domain_factory()
env = DummyVecEnv(
[lambda: AsGymEnv(domain)]
) # the algorithms require a vectorized environment to run
self._algo = self._algo_class(
self._baselines_policy, env, **self._algo_kwargs
)
self._init_algo(domain)
self._algo.learn(**self._learn_config)
def _sample_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
action, _ = self._algo.predict(observation)
return self._wrap_action(action)
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True
def _save(self, path: str) -> None:
self._algo.save(path)
def _load(self, path: str, domain_factory: Callable[[], D]):
self._algo = self._algo_class.load(path)
self._init_algo(domain_factory())
def _init_algo(self, domain: D):
self._wrap_action = lambda a: next(
iter(domain.get_action_space().from_unwrapped([a]))
) | 0.820326 | 0.238683 |
from __future__ import annotations
from typing import Any, Dict
from skdecide import rollout_episode
from skdecide.builders.domain.scheduling.scheduling_domains import D, SchedulingDomain
from skdecide.builders.solver import DeterministicPolicies
class MetaPolicy(DeterministicPolicies):
T_domain = D
def __init__(
self,
policies: Dict[Any, DeterministicPolicies],
execution_domain: SchedulingDomain,
known_domain: SchedulingDomain,
nb_rollout_estimation=1,
verbose=True,
):
self.known_domain = known_domain
self.known_domain.fast = True
self.execution_domain = execution_domain
self.policies = policies
self.current_states = {method: None for method in policies}
self.nb_rollout_estimation = nb_rollout_estimation
self.verbose = verbose
def reset(self):
self.current_states = {method: None for method in self.policies}
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
results = {}
actions_map = {}
self.known_domain.set_inplace_environment(True)
actions_c = [
self.policies[method].get_next_action(observation)
for method in self.policies
]
if len(set(actions_c)) > 1:
for method in self.policies:
results[method] = 0.0
for j in range(self.nb_rollout_estimation):
states, actions, values = rollout_episode(
domain=self.known_domain,
solver=self.policies[method],
outcome_formatter=None,
action_formatter=None,
verbose=False,
from_memory=observation.copy(),
)
# cost = sum(v.cost for v in values)
results[method] += (
states[-1].t - observation.t
) # TODO, this is a trick...
actions_map[method] = actions[0]
if self.verbose:
# print(results)
print(actions_map[min(results, key=lambda x: results[x])])
return actions_map[min(results, key=lambda x: results[x])]
else:
return actions_c[0]
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/meta_policy/meta_policies.py | meta_policies.py |
from __future__ import annotations
from typing import Any, Dict
from skdecide import rollout_episode
from skdecide.builders.domain.scheduling.scheduling_domains import D, SchedulingDomain
from skdecide.builders.solver import DeterministicPolicies
class MetaPolicy(DeterministicPolicies):
T_domain = D
def __init__(
self,
policies: Dict[Any, DeterministicPolicies],
execution_domain: SchedulingDomain,
known_domain: SchedulingDomain,
nb_rollout_estimation=1,
verbose=True,
):
self.known_domain = known_domain
self.known_domain.fast = True
self.execution_domain = execution_domain
self.policies = policies
self.current_states = {method: None for method in policies}
self.nb_rollout_estimation = nb_rollout_estimation
self.verbose = verbose
def reset(self):
self.current_states = {method: None for method in self.policies}
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
results = {}
actions_map = {}
self.known_domain.set_inplace_environment(True)
actions_c = [
self.policies[method].get_next_action(observation)
for method in self.policies
]
if len(set(actions_c)) > 1:
for method in self.policies:
results[method] = 0.0
for j in range(self.nb_rollout_estimation):
states, actions, values = rollout_episode(
domain=self.known_domain,
solver=self.policies[method],
outcome_formatter=None,
action_formatter=None,
verbose=False,
from_memory=observation.copy(),
)
# cost = sum(v.cost for v in values)
results[method] += (
states[-1].t - observation.t
) # TODO, this is a trick...
actions_map[method] = actions[0]
if self.verbose:
# print(results)
print(actions_map[min(results, key=lambda x: results[x])])
return actions_map[min(results, key=lambda x: results[x])]
else:
return actions_c[0]
def _is_policy_defined_for(self, observation: D.T_agent[D.T_observation]) -> bool:
return True | 0.623606 | 0.213705 |
from __future__ import annotations
import os
import sys
from typing import Any, Callable, List, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicInitialized,
DeterministicTransitions,
FullyObservable,
Markovian,
Rewards,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.hub.space.gym import ListSpace
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _IWSolver_ as iw_solver
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
): # TODO: check why DeterministicInitialized & PositiveCosts/Rewards?
pass
class IW(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
state_features: Callable[[Domain, D.T_state], Any],
use_state_feature_hash: bool = False,
node_ordering: Callable[[float, int, int, float, int, int], bool] = None,
time_budget: int = 0, # time budget to continue searching for better plans after a goal has been reached
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._domain = None
self._state_features = state_features
self._use_state_feature_hash = use_state_feature_hash
self._node_ordering = node_ordering
self._time_budget = time_budget
self._debug_logs = debug_logs
self._lambdas = [self._state_features]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], D]) -> None:
self._domain_factory = domain_factory
self._solver = iw_solver(
domain=self.get_domain(),
state_features=lambda d, s: self._state_features(d, s)
if not self._parallel
else d.call(None, 0, s),
use_state_feature_hash=self._use_state_feature_hash,
node_ordering=self._node_ordering,
time_budget=self._time_budget,
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def _reset(self) -> None:
self._solver.clear()
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def get_nb_of_pruned_states(self) -> int:
return self._solver.get_nb_of_pruned_states()
def get_intermediate_scores(self) -> List[Tuple[int, float]]:
return self._solver.get_intermediate_scores()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | scikit-decide | /scikit_decide-0.9.6-cp310-cp310-macosx_10_15_x86_64.whl/skdecide/hub/solver/iw/iw.py | iw.py |
from __future__ import annotations
import os
import sys
from typing import Any, Callable, List, Tuple
from skdecide import Domain, Solver, hub
from skdecide.builders.domain import (
Actions,
DeterministicInitialized,
DeterministicTransitions,
FullyObservable,
Markovian,
Rewards,
Sequential,
SingleAgent,
)
from skdecide.builders.solver import DeterministicPolicies, ParallelSolver, Utilities
from skdecide.hub.space.gym import ListSpace
record_sys_path = sys.path
skdecide_cpp_extension_lib_path = os.path.abspath(hub.__path__[0])
if skdecide_cpp_extension_lib_path not in sys.path:
sys.path.append(skdecide_cpp_extension_lib_path)
try:
from __skdecide_hub_cpp import _IWSolver_ as iw_solver
class D(
Domain,
SingleAgent,
Sequential,
DeterministicTransitions,
Actions,
DeterministicInitialized,
Markovian,
FullyObservable,
Rewards,
): # TODO: check why DeterministicInitialized & PositiveCosts/Rewards?
pass
class IW(ParallelSolver, Solver, DeterministicPolicies, Utilities):
T_domain = D
def __init__(
self,
domain_factory: Callable[[], Domain],
state_features: Callable[[Domain, D.T_state], Any],
use_state_feature_hash: bool = False,
node_ordering: Callable[[float, int, int, float, int, int], bool] = None,
time_budget: int = 0, # time budget to continue searching for better plans after a goal has been reached
parallel: bool = False,
shared_memory_proxy=None,
debug_logs: bool = False,
) -> None:
ParallelSolver.__init__(
self,
domain_factory=domain_factory,
parallel=parallel,
shared_memory_proxy=shared_memory_proxy,
)
self._solver = None
self._domain = None
self._state_features = state_features
self._use_state_feature_hash = use_state_feature_hash
self._node_ordering = node_ordering
self._time_budget = time_budget
self._debug_logs = debug_logs
self._lambdas = [self._state_features]
self._ipc_notify = True
def close(self):
"""Joins the parallel domains' processes.
Not calling this method (or not using the 'with' context statement)
results in the solver forever waiting for the domain processes to exit.
"""
if self._parallel:
self._solver.close()
ParallelSolver.close(self)
def _init_solve(self, domain_factory: Callable[[], D]) -> None:
self._domain_factory = domain_factory
self._solver = iw_solver(
domain=self.get_domain(),
state_features=lambda d, s: self._state_features(d, s)
if not self._parallel
else d.call(None, 0, s),
use_state_feature_hash=self._use_state_feature_hash,
node_ordering=self._node_ordering,
time_budget=self._time_budget,
parallel=self._parallel,
debug_logs=self._debug_logs,
)
self._solver.clear()
def _solve_domain(self, domain_factory: Callable[[], D]) -> None:
self._init_solve(domain_factory)
def _solve_from(self, memory: D.T_memory[D.T_state]) -> None:
self._solver.solve(memory)
def _is_solution_defined_for(
self, observation: D.T_agent[D.T_observation]
) -> bool:
return self._solver.is_solution_defined_for(observation)
def _get_next_action(
self, observation: D.T_agent[D.T_observation]
) -> D.T_agent[D.T_concurrency[D.T_event]]:
if not self._is_solution_defined_for(observation):
self._solve_from(observation)
return self._solver.get_next_action(observation)
def _get_utility(self, observation: D.T_agent[D.T_observation]) -> D.T_value:
return self._solver.get_utility(observation)
def _reset(self) -> None:
self._solver.clear()
def get_nb_of_explored_states(self) -> int:
return self._solver.get_nb_of_explored_states()
def get_nb_of_pruned_states(self) -> int:
return self._solver.get_nb_of_pruned_states()
def get_intermediate_scores(self) -> List[Tuple[int, float]]:
return self._solver.get_intermediate_scores()
except ImportError:
sys.path = record_sys_path
print(
'Scikit-decide C++ hub library not found. Please check it is installed in "skdecide/hub".'
)
raise | 0.521471 | 0.162746 |
# scikit-deploy
Deploy models trained with scikit-learn with Docker.
## Requirements
You will need python 3 and Docker installed on your system.
You can deploy any model trained with scikit-learn, or which implements the same `predict()` method as scikit-learn models (eg. xgboost).
## Installing
`pip install scikit-deploy`
## Configuration
First, you will need a model trained using scikit-learn 0.20 (should support all versions later on) which has been pickled.
You will also need a `configuration.json` file describing the model metadata and build information.
It takes the following form:
```
{
"image_tag": "{{the tag given to the generated docker image}}",
"endpoints": ["{{the endpoints to call for scoring}}"],
}
```
Endpoints have the following format:
```
{
"route": "{{the HTTP route to call for scoring}}",
"model_path": "{{model absolute path}}",
"inputs": [{{the input features, objects with "name" and optional fields "default", "offset" and "scaling" }}],
"outputs": [{{the output targets, objects with "name" and optional fields "offset" and "scaling"}}]
}
```
For inputs, the offset will be substracted to the value and then the difference will be divided by the scaling. For outputs, the offset will be added to the value and the sum will be multiplied by the scaling.
Offset and scaling values are typically used to normalize and denormalize the inputs and outputs respectively.
Here is an example config file :
```
{
"image_tag": "my_super_model:latest",
"endpoints": [
{
"route": "/super-score",
"model_path": "/home/toto/model.pkl",
"inputs": [{"name": "x"}, {"name": "y", "default": 1.551, "offset": 50, "scaling": 2}],
"outputs": [{"name": "z", "offset": 3, "scaling": 1.4}]
}
],
}
```
## Building your image
Run the following command:
`skdeploy -c /path/to/config.json`
This will run a Docker build using the image name you have provided.
If your models require extra dependencies, you can specify an additional `requirements.txt` file to include
for your server with the `-r`flag:
`skdeploy -c /path/to/config -r /path/to/requirements.txt`
If you need to specify a SSH private key in case your requirements are part of a private repository, use the `-k` flag:
`skdeploy -c /path/to/config -r /path/to/requirements.txt -k "$(cat /path/to/private_key)"`
## Running and testing the server
The server running inside the Docker container listens on port 5000.
To test your server on local port 8080 for example, run it using docker:
`docker run -p 8080:5000 your_image_tag`
And you can start querying the server ; for the config file above, this would be done as :
`GET localhost:8080/super-score?x=1337&y=115.16`
Which would yield the json
```
{
"prediction": {
"z": 11525
}
}
```
You can also send a `POST` request to the endpoint. In this case, the body must be a JSON array of the inputs. Using the `POST` method, you can ask the server for several predictions in one request. For example:
```
[
{"x": 1337, "y": 115.16},
{"x": 2664, "y": 98.3},
]
```
Which would yield
```
{
[
{"prediction": {"z": 11525}},
{"prediction": {"z": 3457}}
]
}
```
| scikit-deploy | /scikit-deploy-2.2.0.tar.gz/scikit-deploy-2.2.0/README.md | README.md | {
"image_tag": "{{the tag given to the generated docker image}}",
"endpoints": ["{{the endpoints to call for scoring}}"],
}
{
"route": "{{the HTTP route to call for scoring}}",
"model_path": "{{model absolute path}}",
"inputs": [{{the input features, objects with "name" and optional fields "default", "offset" and "scaling" }}],
"outputs": [{{the output targets, objects with "name" and optional fields "offset" and "scaling"}}]
}
{
"image_tag": "my_super_model:latest",
"endpoints": [
{
"route": "/super-score",
"model_path": "/home/toto/model.pkl",
"inputs": [{"name": "x"}, {"name": "y", "default": 1.551, "offset": 50, "scaling": 2}],
"outputs": [{"name": "z", "offset": 3, "scaling": 1.4}]
}
],
}
{
"prediction": {
"z": 11525
}
}
[
{"x": 1337, "y": 115.16},
{"x": 2664, "y": 98.3},
]
{
[
{"prediction": {"z": 11525}},
{"prediction": {"z": 3457}}
]
} | 0.585101 | 0.955651 |
import tempfile
import shutil
import logging
import pkg_resources
import sys
import os.path as osp
import docker
import json
import pickle
from typing import Optional
logger = logging.getLogger("scikit-deploy")
def _prepare_requirements(temp_dir, requirements_path):
"""
Concatenates an user-provided requirements file with the default one.
"""
# read default requirements
final_path = osp.join(temp_dir, "workspace", "requirements.txt")
with open(final_path) as f:
reqs = f.read()
# concatenate with user-specified
with open(requirements_path) as f:
reqs = "{}\n{}".format(reqs, f.read())
# write back to file
with open(final_path, "w+") as f:
f.write(reqs)
def _prepare_workspace(temp_dir, config, requirements_path):
"""
Prepares the temporary workspace directory for the docker build
"""
# copy the server package as the workspace directory
shutil.copytree(
pkg_resources.resource_filename(__name__, "scikit_deploy_server"),
osp.join(temp_dir, "workspace"),
)
# copy resources (clf, config) to the workspace server resources
resources_folder = osp.join(temp_dir, "workspace", "server", "resources")
json.dump(config, open(osp.join(resources_folder, "config.json"), "w"))
for endpoint in config["endpoints"]:
shutil.copy(
endpoint["model_path"], osp.join(resources_folder, endpoint["model_name"])
)
if requirements_path is not None:
_prepare_requirements(temp_dir, requirements_path)
def _build_docker(temp_dir, image_tag, ssh_key: Optional[str] = None):
path = osp.join(temp_dir, "workspace")
client = docker.APIClient()
try:
if ssh_key is None:
output = client.build(
path=path,
dockerfile="Dockerfile",
tag=image_tag,
quiet=False,
network_mode="host",
)
else:
output = client.build(
path=path,
dockerfile="Dockerfile-multi-stage",
tag=image_tag,
quiet=False,
network_mode="host",
forcerm=True, # ensure SSH keys don't persist
target="multi-stage-build-final",
buildargs=dict(SSH_PRIVATE_KEY=ssh_key),
)
for l in output:
logs = l.decode("utf-8").split("\r\n")
for line in logs:
if line:
line = json.loads(line)
# Show only relevant outputs
if "stream" in line:
logger.info("(DOCKER) - {}".format(line["stream"]))
if "errorDetail" in line:
logger.error("(DOCKER) - {}".format(line["errorDetail"]))
except (docker.errors.BuildError, docker.errors.APIError) as e:
logger.error("Docker build failed with logs: ")
for l in e.build_log:
logger.error(l)
raise
def _validate_route(route: str):
if not route.startswith("/"):
logger.error("route must begin with a /")
raise ValueError()
if route.endswith("/"):
logger.error("route cannot end with a /")
raise ValueError()
def _generate_request(config):
endpoint = config["endpoints"][0]
route = endpoint["route"]
qs = "&".join([f"{o['name']}=0" for o in endpoint["inputs"]])
return f"http://localhost:8000{route}?{qs}"
def build(config_path, requirements_path, ssh_key: Optional[str] = None):
"""
Builds the docker image
"""
logger.info("Beginning build.")
temp_dir = tempfile.mkdtemp()
try:
with open(config_path) as f:
config = json.load(f)
image_tag = config.get("image_tag")
if image_tag is None:
logger.error("No image_tag specified in config")
exit(1)
if "endpoints" not in config:
logger.error("No endpoints specified in config")
exit(1)
for endpoint in config["endpoints"]:
_validate_route(endpoint.get("route"))
endpoint["model_name"] = (
endpoint["route"].replace("/", "_") + "_clf.pkl"
)
_prepare_workspace(temp_dir, config, requirements_path)
_build_docker(temp_dir, image_tag, ssh_key)
logger.info(f"Successfully built image {image_tag}")
logger.info("To test, run :")
logger.info(f" docker run -p 8000:8000 {image_tag}")
logger.info("and then perform http request")
logger.info(f" GET {_generate_request(config)}")
status = 0
except:
logger.exception("Failed to build image")
status = 1
finally:
shutil.rmtree(temp_dir)
exit(status) | scikit-deploy | /scikit-deploy-2.2.0.tar.gz/scikit-deploy-2.2.0/scikit_deploy/build.py | build.py | import tempfile
import shutil
import logging
import pkg_resources
import sys
import os.path as osp
import docker
import json
import pickle
from typing import Optional
logger = logging.getLogger("scikit-deploy")
def _prepare_requirements(temp_dir, requirements_path):
"""
Concatenates an user-provided requirements file with the default one.
"""
# read default requirements
final_path = osp.join(temp_dir, "workspace", "requirements.txt")
with open(final_path) as f:
reqs = f.read()
# concatenate with user-specified
with open(requirements_path) as f:
reqs = "{}\n{}".format(reqs, f.read())
# write back to file
with open(final_path, "w+") as f:
f.write(reqs)
def _prepare_workspace(temp_dir, config, requirements_path):
"""
Prepares the temporary workspace directory for the docker build
"""
# copy the server package as the workspace directory
shutil.copytree(
pkg_resources.resource_filename(__name__, "scikit_deploy_server"),
osp.join(temp_dir, "workspace"),
)
# copy resources (clf, config) to the workspace server resources
resources_folder = osp.join(temp_dir, "workspace", "server", "resources")
json.dump(config, open(osp.join(resources_folder, "config.json"), "w"))
for endpoint in config["endpoints"]:
shutil.copy(
endpoint["model_path"], osp.join(resources_folder, endpoint["model_name"])
)
if requirements_path is not None:
_prepare_requirements(temp_dir, requirements_path)
def _build_docker(temp_dir, image_tag, ssh_key: Optional[str] = None):
path = osp.join(temp_dir, "workspace")
client = docker.APIClient()
try:
if ssh_key is None:
output = client.build(
path=path,
dockerfile="Dockerfile",
tag=image_tag,
quiet=False,
network_mode="host",
)
else:
output = client.build(
path=path,
dockerfile="Dockerfile-multi-stage",
tag=image_tag,
quiet=False,
network_mode="host",
forcerm=True, # ensure SSH keys don't persist
target="multi-stage-build-final",
buildargs=dict(SSH_PRIVATE_KEY=ssh_key),
)
for l in output:
logs = l.decode("utf-8").split("\r\n")
for line in logs:
if line:
line = json.loads(line)
# Show only relevant outputs
if "stream" in line:
logger.info("(DOCKER) - {}".format(line["stream"]))
if "errorDetail" in line:
logger.error("(DOCKER) - {}".format(line["errorDetail"]))
except (docker.errors.BuildError, docker.errors.APIError) as e:
logger.error("Docker build failed with logs: ")
for l in e.build_log:
logger.error(l)
raise
def _validate_route(route: str):
if not route.startswith("/"):
logger.error("route must begin with a /")
raise ValueError()
if route.endswith("/"):
logger.error("route cannot end with a /")
raise ValueError()
def _generate_request(config):
endpoint = config["endpoints"][0]
route = endpoint["route"]
qs = "&".join([f"{o['name']}=0" for o in endpoint["inputs"]])
return f"http://localhost:8000{route}?{qs}"
def build(config_path, requirements_path, ssh_key: Optional[str] = None):
"""
Builds the docker image
"""
logger.info("Beginning build.")
temp_dir = tempfile.mkdtemp()
try:
with open(config_path) as f:
config = json.load(f)
image_tag = config.get("image_tag")
if image_tag is None:
logger.error("No image_tag specified in config")
exit(1)
if "endpoints" not in config:
logger.error("No endpoints specified in config")
exit(1)
for endpoint in config["endpoints"]:
_validate_route(endpoint.get("route"))
endpoint["model_name"] = (
endpoint["route"].replace("/", "_") + "_clf.pkl"
)
_prepare_workspace(temp_dir, config, requirements_path)
_build_docker(temp_dir, image_tag, ssh_key)
logger.info(f"Successfully built image {image_tag}")
logger.info("To test, run :")
logger.info(f" docker run -p 8000:8000 {image_tag}")
logger.info("and then perform http request")
logger.info(f" GET {_generate_request(config)}")
status = 0
except:
logger.exception("Failed to build image")
status = 1
finally:
shutil.rmtree(temp_dir)
exit(status) | 0.398289 | 0.089177 |
import pickle
import json
import pkg_resources
import numpy as np
import os
from flask import Blueprint, request, jsonify
from server.prediction import predict
from server.config import Config
from server.logging import get_logger
from server.util import APIError
from collections import Iterable
LOGGER = get_logger(__name__)
config = Config()
LOGGER.info("Started server with config")
LOGGER.info(json.dumps(config.config_data))
models = {}
endpoints_config = {}
for endpoint in config.endpoints:
# We remove the leading /
route_key = endpoint.route[1:]
models[route_key] = pickle.loads(pkg_resources.resource_string(
__name__, f'resources/{endpoint.model_name}'))
endpoints_config[route_key] = endpoint
app_blueprint = Blueprint('app', __name__)
@app_blueprint.route('/<route>', methods=['GET'])
def index_endpoint(route:str):
try:
model = models.get(route)
endpoint_config = endpoints_config.get(route)
if not model:
raise APIError('Not found', 404)
LOGGER.info("Received scoring request")
prediction = predict(model, request.args, endpoint_config)
except APIError as e:
return e.message, e.status_code
LOGGER.info("Successful prediction")
return jsonify(dict(prediction=prediction))
@app_blueprint.route('/<route>', methods=['POST'])
def score_multiple_endpoint(route:str):
try:
model = models.get(route)
endpoint_config = endpoints_config.get(route)
if not model:
raise APIError('Not found', 404)
LOGGER.info('Received scoring request for multiple samples')
data = request.get_json()
if not isinstance(data, Iterable):
return "Body should be a json array of parameters", 400
res = [dict(prediction=predict(model, x, endpoint_config))
for x in data]
except APIError as e:
return e.message, e.status_code
return jsonify(res)
@app_blueprint.route("/instance/health", methods=['GET'])
def health():
return "healthy", 200
@app_blueprint.route("/config", methods=['GET'])
def config_endpoint():
return jsonify(config.config_data), 200 | scikit-deploy | /scikit-deploy-2.2.0.tar.gz/scikit-deploy-2.2.0/scikit_deploy/scikit_deploy_server/server/scoring.py | scoring.py | import pickle
import json
import pkg_resources
import numpy as np
import os
from flask import Blueprint, request, jsonify
from server.prediction import predict
from server.config import Config
from server.logging import get_logger
from server.util import APIError
from collections import Iterable
LOGGER = get_logger(__name__)
config = Config()
LOGGER.info("Started server with config")
LOGGER.info(json.dumps(config.config_data))
models = {}
endpoints_config = {}
for endpoint in config.endpoints:
# We remove the leading /
route_key = endpoint.route[1:]
models[route_key] = pickle.loads(pkg_resources.resource_string(
__name__, f'resources/{endpoint.model_name}'))
endpoints_config[route_key] = endpoint
app_blueprint = Blueprint('app', __name__)
@app_blueprint.route('/<route>', methods=['GET'])
def index_endpoint(route:str):
try:
model = models.get(route)
endpoint_config = endpoints_config.get(route)
if not model:
raise APIError('Not found', 404)
LOGGER.info("Received scoring request")
prediction = predict(model, request.args, endpoint_config)
except APIError as e:
return e.message, e.status_code
LOGGER.info("Successful prediction")
return jsonify(dict(prediction=prediction))
@app_blueprint.route('/<route>', methods=['POST'])
def score_multiple_endpoint(route:str):
try:
model = models.get(route)
endpoint_config = endpoints_config.get(route)
if not model:
raise APIError('Not found', 404)
LOGGER.info('Received scoring request for multiple samples')
data = request.get_json()
if not isinstance(data, Iterable):
return "Body should be a json array of parameters", 400
res = [dict(prediction=predict(model, x, endpoint_config))
for x in data]
except APIError as e:
return e.message, e.status_code
return jsonify(res)
@app_blueprint.route("/instance/health", methods=['GET'])
def health():
return "healthy", 200
@app_blueprint.route("/config", methods=['GET'])
def config_endpoint():
return jsonify(config.config_data), 200 | 0.33546 | 0.053825 |
from __future__ import annotations
import json
import importlib
import inspect
import pkgutil
from types import ModuleType, FunctionType
from typing import Dict
import sklearn
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils._pprint import _changed_params
MODULES = [
"sklearn.base",
"sklearn.calibration",
"sklearn.cluster",
"sklearn.compose",
"sklearn.covariance",
"sklearn.cross_decomposition",
"sklearn.datasets",
"sklearn.decomposition",
"sklearn.discriminant_analysis",
"sklearn.dummy",
"sklearn.ensemble",
"sklearn.exceptions",
"sklearn.experimental",
"sklearn.feature_extraction",
"sklearn.feature_selection",
"sklearn.gaussian_process",
"sklearn.impute",
"sklearn.inspection",
"sklearn.isotonic",
"sklearn.kernel_approximation",
"sklearn.kernel_ridge",
"sklearn.linear_model",
"sklearn.manifold",
"sklearn.metrics",
"sklearn.mixture",
"sklearn.model_selection",
"sklearn.multiclass",
"sklearn.multioutput",
"sklearn.naive_bayes",
"sklearn.neighbors",
"sklearn.neural_network",
"sklearn.pipeline",
"sklearn.preprocessing",
"sklearn.random_projection",
"sklearn.semi_supervised",
"sklearn.svm",
"sklearn.tree",
"sklearn.utils",
]
for mod in MODULES:
importlib.import_module(mod)
def _get_submodules(module):
"""Get all submodules of a module."""
if hasattr(module, "__path__"):
return [name for _, name, _ in pkgutil.iter_modules(module.__path__)]
return []
def get_all_sklearn_objects(
module: ModuleType,
) -> Dict[str, FunctionType | BaseEstimator | TransformerMixin]:
"""Get all objects from a module."""
objs = {}
submodules = _get_submodules(module)
for name in dir(module):
if not name.startswith("_"):
obj = getattr(module, name)
if name in submodules:
objs.update(get_all_sklearn_objects(obj))
elif inspect.isclass(obj) or inspect.isfunction(obj):
objs[name] = obj
return objs
def load(dict_: dict, /) -> BaseEstimator | TransformerMixin:
"""Create a python instance from dict structure."""
objs = get_all_sklearn_objects(sklearn)
if isinstance(dict_, list):
for i, item in enumerate(dict_):
dict_[i] = load(item)
return dict_
if isinstance(dict_, dict):
for key in dict_.keys():
dict_[key] = load(dict_[key])
kwargs = dict_[key] if dict_[key] is not None else {}
try:
return objs[key](**kwargs)
except KeyError:
pass
return dict_
return dict_
class SKLearnEncoder(json.JSONEncoder):
"""Encode SKLearn objects to JSON."""
def default(self, o):
"""Default encoding."""
if isinstance(o, (sklearn.base.BaseEstimator, sklearn.base.TransformerMixin)):
name = o.__class__.__name__
params = _changed_params(o)
if params == {}:
params = None
return {name: params}
return json.JSONEncoder.default(self, o)
def dump(obj: BaseEstimator | TransformerMixin, /) -> dict:
"""Create a dict from a python object."""
return json.loads(json.dumps(obj, cls=SKLearnEncoder)) | scikit-dict | /scikit_dict-0.1.0-py3-none-any.whl/skdict/__init__.py | __init__.py |
from __future__ import annotations
import json
import importlib
import inspect
import pkgutil
from types import ModuleType, FunctionType
from typing import Dict
import sklearn
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils._pprint import _changed_params
MODULES = [
"sklearn.base",
"sklearn.calibration",
"sklearn.cluster",
"sklearn.compose",
"sklearn.covariance",
"sklearn.cross_decomposition",
"sklearn.datasets",
"sklearn.decomposition",
"sklearn.discriminant_analysis",
"sklearn.dummy",
"sklearn.ensemble",
"sklearn.exceptions",
"sklearn.experimental",
"sklearn.feature_extraction",
"sklearn.feature_selection",
"sklearn.gaussian_process",
"sklearn.impute",
"sklearn.inspection",
"sklearn.isotonic",
"sklearn.kernel_approximation",
"sklearn.kernel_ridge",
"sklearn.linear_model",
"sklearn.manifold",
"sklearn.metrics",
"sklearn.mixture",
"sklearn.model_selection",
"sklearn.multiclass",
"sklearn.multioutput",
"sklearn.naive_bayes",
"sklearn.neighbors",
"sklearn.neural_network",
"sklearn.pipeline",
"sklearn.preprocessing",
"sklearn.random_projection",
"sklearn.semi_supervised",
"sklearn.svm",
"sklearn.tree",
"sklearn.utils",
]
for mod in MODULES:
importlib.import_module(mod)
def _get_submodules(module):
"""Get all submodules of a module."""
if hasattr(module, "__path__"):
return [name for _, name, _ in pkgutil.iter_modules(module.__path__)]
return []
def get_all_sklearn_objects(
module: ModuleType,
) -> Dict[str, FunctionType | BaseEstimator | TransformerMixin]:
"""Get all objects from a module."""
objs = {}
submodules = _get_submodules(module)
for name in dir(module):
if not name.startswith("_"):
obj = getattr(module, name)
if name in submodules:
objs.update(get_all_sklearn_objects(obj))
elif inspect.isclass(obj) or inspect.isfunction(obj):
objs[name] = obj
return objs
def load(dict_: dict, /) -> BaseEstimator | TransformerMixin:
"""Create a python instance from dict structure."""
objs = get_all_sklearn_objects(sklearn)
if isinstance(dict_, list):
for i, item in enumerate(dict_):
dict_[i] = load(item)
return dict_
if isinstance(dict_, dict):
for key in dict_.keys():
dict_[key] = load(dict_[key])
kwargs = dict_[key] if dict_[key] is not None else {}
try:
return objs[key](**kwargs)
except KeyError:
pass
return dict_
return dict_
class SKLearnEncoder(json.JSONEncoder):
"""Encode SKLearn objects to JSON."""
def default(self, o):
"""Default encoding."""
if isinstance(o, (sklearn.base.BaseEstimator, sklearn.base.TransformerMixin)):
name = o.__class__.__name__
params = _changed_params(o)
if params == {}:
params = None
return {name: params}
return json.JSONEncoder.default(self, o)
def dump(obj: BaseEstimator | TransformerMixin, /) -> dict:
"""Create a dict from a python object."""
return json.loads(json.dumps(obj, cls=SKLearnEncoder)) | 0.749087 | 0.421492 |
from numpy import min, max, percentile, zeros, bool_, pad, sin, arange, pi, concatenate
from numpy.random import default_rng
from skdh.utility import moving_mean, moving_median, moving_sd
from skdh.sleep.utility import (
compute_z_angle,
compute_absolute_difference,
drop_min_blocks,
arg_longest_bout,
)
def get_total_sleep_opportunity(
fs,
time,
accel,
temperature,
wear_starts,
wear_stops,
min_rest_block,
max_act_break,
tso_min_thresh,
tso_max_thresh,
tso_perc,
tso_factor,
int_wear_temp,
int_wear_move,
plot_fn,
idx_start=0,
add_active_time=0.0,
):
"""
Compute the period of time in which sleep can occur for a given days worth of data.
For this algorithm, it is the longest period of wear-time that has low activity.
Parameters
----------
fs : float
Sampling frequency of the time and acceleration data, in Hz.
time : numpy.ndarray
Timestamps for the acceleration.
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
temperature : numpy.ndarray
(N, 3) array of temperature values in celsius.
wear_starts : {numpy.ndarray, None}
Indices for the starts of wear-time. Note that while `time` and `accel` should
be the values for one day, `wear_starts` is likely indexed to the whole data
series. This offset can be adjusted by `idx_start`. If indexing only into
the one day, `idx_start` should be 0. If None, will compute wear internally.
wear_stops : {numpy.ndarray, None}
Indices for the stops of wear-time. Note that while `time` and `accel` should
be the values for one day, `wear_stops` is likely indexed to the whole data
series. This offset can be adjusted by `idx_start`. If indexing only into
the one day, `idx_start` should be 0. If None, will compute wear internally.
min_rest_block : int
Minimum number of minutes that a rest period can be
max_act_break : int
Maximum number of minutes an active block can be so that it doesn't interrupt
a longer rest period.
tso_min_thresh : float
Minimum angle value the TSO threshold can be.
tso_max_thresh : float
Maximum angle value the TSO threshold can be.
tso_perc : int
The percentile to use when calculating the TSO threshold from daily data.
tso_factor : float
The factor to multiply the percentile value by co get the TSO threshold.
int_wear_temp : float
Internal wear temperature threshold in celsius.
int_wear_move : float
Internal wear movement threshold in g.
plot_fn : function
Plotting function for the arm angle.
idx_start : int, optional
Offset index for wear-time indices. If `wear_starts` and `wear_stops` are
relative to the day of interest, then `idx_start` should equal 0.
add_active_time : float, optional
Add active time to the accelerometer signal start and end when detecting
the total sleep opportunity. This can occasionally be useful if less than
24 hrs of data are collected, as sleep-period skewed data can effect the
sleep window cutoff, effecting the end results. Suggested is not adding
more than 1.5 hours. Default is 0.0 for no added data.
Returns
-------
start : float
Total sleep opportunity start timestamp.
stop : float
Total sleep opportunity stop timestamp.
arg_start : int
Total sleep opportunity start index, into the specific period of time.
arg_stop : int
Total sleep opportunity stop index, into the specific period of time.
"""
# samples in 5 seconds. GGIR makes this always odd, which is a function
# of the library (zoo) they are using for rollmedian
n5 = int(5 * fs)
# compute the rolling median for 5s windows
acc_rmd = moving_median(accel, n5, skip=1, axis=0)
# compute the z-angle
z = compute_z_angle(acc_rmd)
# rolling 5s mean with non-overlapping windows for the z-angle
_z_rm = moving_mean(z, n5, n5)
# plot arm angle
plot_fn(_z_rm)
# add data as required
rng = default_rng()
blocksize = max([int(12 * 60 * add_active_time), 0])
angleblock = sin(arange(blocksize) / pi * 0.1) * 40
angleblock += rng.normal(loc=0.0, scale=10.0, size=blocksize)
z_rm = concatenate((angleblock, _z_rm, angleblock))
# the angle differences
dz_rm = compute_absolute_difference(z_rm)
# rolling 5 minute median. 12 windows per minute * 5 minutes
dz_rm_rmd = moving_median(dz_rm, 12 * 5, skip=1)
# compute the TSO threshold
tso_thresh = compute_tso_threshold(
dz_rm_rmd,
min_td=tso_min_thresh,
max_td=tso_max_thresh,
perc=tso_perc,
factor=tso_factor,
)
# get the number of windows there would be without additional data
# .size because the difference is computed and left at the same size
nw = (_z_rm.size - (12 * 5)) + 1 # "// 1" left out
# create the TSO mask (1 -> sleep opportunity, only happens during wear)
tso = zeros(nw, dtype=bool_)
# block off external non-wear times, scale by 5s blocks
for strt, stp in zip((wear_starts - idx_start) / n5, (wear_stops - idx_start) / n5):
tso[int(strt) : int(stp)] = True
# apply the threshold before any internal wear checking
tso &= (
dz_rm_rmd[blocksize : blocksize + nw] < tso_thresh
) # now only blocks where there is no movement, and wear are left
# check if we can compute wear internally
if temperature is not None and int_wear_temp > 0.0:
t_rmed_5s = moving_median(temperature, n5, 1)
t_rmean_5s = moving_mean(t_rmed_5s, n5, n5)
t_rmed_5m = moving_median(t_rmean_5s, 60, 1) # 5 min rolling median
temp_nonwear = t_rmed_5m < int_wear_temp
tso[temp_nonwear] = False # non-wear -> not a TSO opportunity
if int_wear_move > 0.0:
acc_rmean_5s = moving_mean(acc_rmd, n5, n5, axis=0)
acc_rsd_30m = moving_sd(acc_rmean_5s, 360, 1, axis=0, return_previous=False)
move_nonwear = pad(
(acc_rsd_30m < int_wear_move).any(axis=1),
pad_width=(150, 150),
constant_values=False,
)
tso[move_nonwear] = False
# drop rest blocks less than minimum allowed rest length
# rolling 5min, the underlying windows are 5s, so 12 * minutes => # of samples
tso = drop_min_blocks(
tso, 12 * min_rest_block, drop_value=1, replace_value=0, skip_bounds=True
)
# drop active blocks less than maximum allowed active length
tso = drop_min_blocks(
tso, 12 * max_act_break, drop_value=0, replace_value=1, skip_bounds=True
)
# get the indices of the longest bout
arg_start, arg_end = arg_longest_bout(tso, 1)
# get the timestamps of the longest bout
if arg_start is not None:
# account for left justified windows - times need to be bumped up half a window
# account for 5s windows in indexing
arg_start = (arg_start + 30) * n5 # 12 * 5 / 2 = 30
arg_end = (arg_end + 30) * n5
start, end = time[arg_start], time[arg_end]
else:
start = end = None
return start, end, arg_start, arg_end
def compute_tso_threshold(arr, min_td=0.1, max_td=0.5, perc=10, factor=15.0):
"""
Computes the daily threshold value separating rest periods from active periods
for the TSO detection algorithm.
Parameters
----------
arr : array
Array of the absolute difference of the z-angle.
min_td : float
Minimum acceptable threshold value.
max_td : float
Maximum acceptable threshold value.
perc : integer, optional
Percentile to use for the threshold. Default is 10.
factor : float, optional
Factor to multiply the percentil value by. Default is 15.0.
Returns
-------
td : float
"""
td = min((max((percentile(arr, perc) * factor, min_td)), max_td))
return td | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/sleep/tso.py | tso.py | from numpy import min, max, percentile, zeros, bool_, pad, sin, arange, pi, concatenate
from numpy.random import default_rng
from skdh.utility import moving_mean, moving_median, moving_sd
from skdh.sleep.utility import (
compute_z_angle,
compute_absolute_difference,
drop_min_blocks,
arg_longest_bout,
)
def get_total_sleep_opportunity(
fs,
time,
accel,
temperature,
wear_starts,
wear_stops,
min_rest_block,
max_act_break,
tso_min_thresh,
tso_max_thresh,
tso_perc,
tso_factor,
int_wear_temp,
int_wear_move,
plot_fn,
idx_start=0,
add_active_time=0.0,
):
"""
Compute the period of time in which sleep can occur for a given days worth of data.
For this algorithm, it is the longest period of wear-time that has low activity.
Parameters
----------
fs : float
Sampling frequency of the time and acceleration data, in Hz.
time : numpy.ndarray
Timestamps for the acceleration.
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
temperature : numpy.ndarray
(N, 3) array of temperature values in celsius.
wear_starts : {numpy.ndarray, None}
Indices for the starts of wear-time. Note that while `time` and `accel` should
be the values for one day, `wear_starts` is likely indexed to the whole data
series. This offset can be adjusted by `idx_start`. If indexing only into
the one day, `idx_start` should be 0. If None, will compute wear internally.
wear_stops : {numpy.ndarray, None}
Indices for the stops of wear-time. Note that while `time` and `accel` should
be the values for one day, `wear_stops` is likely indexed to the whole data
series. This offset can be adjusted by `idx_start`. If indexing only into
the one day, `idx_start` should be 0. If None, will compute wear internally.
min_rest_block : int
Minimum number of minutes that a rest period can be
max_act_break : int
Maximum number of minutes an active block can be so that it doesn't interrupt
a longer rest period.
tso_min_thresh : float
Minimum angle value the TSO threshold can be.
tso_max_thresh : float
Maximum angle value the TSO threshold can be.
tso_perc : int
The percentile to use when calculating the TSO threshold from daily data.
tso_factor : float
The factor to multiply the percentile value by co get the TSO threshold.
int_wear_temp : float
Internal wear temperature threshold in celsius.
int_wear_move : float
Internal wear movement threshold in g.
plot_fn : function
Plotting function for the arm angle.
idx_start : int, optional
Offset index for wear-time indices. If `wear_starts` and `wear_stops` are
relative to the day of interest, then `idx_start` should equal 0.
add_active_time : float, optional
Add active time to the accelerometer signal start and end when detecting
the total sleep opportunity. This can occasionally be useful if less than
24 hrs of data are collected, as sleep-period skewed data can effect the
sleep window cutoff, effecting the end results. Suggested is not adding
more than 1.5 hours. Default is 0.0 for no added data.
Returns
-------
start : float
Total sleep opportunity start timestamp.
stop : float
Total sleep opportunity stop timestamp.
arg_start : int
Total sleep opportunity start index, into the specific period of time.
arg_stop : int
Total sleep opportunity stop index, into the specific period of time.
"""
# samples in 5 seconds. GGIR makes this always odd, which is a function
# of the library (zoo) they are using for rollmedian
n5 = int(5 * fs)
# compute the rolling median for 5s windows
acc_rmd = moving_median(accel, n5, skip=1, axis=0)
# compute the z-angle
z = compute_z_angle(acc_rmd)
# rolling 5s mean with non-overlapping windows for the z-angle
_z_rm = moving_mean(z, n5, n5)
# plot arm angle
plot_fn(_z_rm)
# add data as required
rng = default_rng()
blocksize = max([int(12 * 60 * add_active_time), 0])
angleblock = sin(arange(blocksize) / pi * 0.1) * 40
angleblock += rng.normal(loc=0.0, scale=10.0, size=blocksize)
z_rm = concatenate((angleblock, _z_rm, angleblock))
# the angle differences
dz_rm = compute_absolute_difference(z_rm)
# rolling 5 minute median. 12 windows per minute * 5 minutes
dz_rm_rmd = moving_median(dz_rm, 12 * 5, skip=1)
# compute the TSO threshold
tso_thresh = compute_tso_threshold(
dz_rm_rmd,
min_td=tso_min_thresh,
max_td=tso_max_thresh,
perc=tso_perc,
factor=tso_factor,
)
# get the number of windows there would be without additional data
# .size because the difference is computed and left at the same size
nw = (_z_rm.size - (12 * 5)) + 1 # "// 1" left out
# create the TSO mask (1 -> sleep opportunity, only happens during wear)
tso = zeros(nw, dtype=bool_)
# block off external non-wear times, scale by 5s blocks
for strt, stp in zip((wear_starts - idx_start) / n5, (wear_stops - idx_start) / n5):
tso[int(strt) : int(stp)] = True
# apply the threshold before any internal wear checking
tso &= (
dz_rm_rmd[blocksize : blocksize + nw] < tso_thresh
) # now only blocks where there is no movement, and wear are left
# check if we can compute wear internally
if temperature is not None and int_wear_temp > 0.0:
t_rmed_5s = moving_median(temperature, n5, 1)
t_rmean_5s = moving_mean(t_rmed_5s, n5, n5)
t_rmed_5m = moving_median(t_rmean_5s, 60, 1) # 5 min rolling median
temp_nonwear = t_rmed_5m < int_wear_temp
tso[temp_nonwear] = False # non-wear -> not a TSO opportunity
if int_wear_move > 0.0:
acc_rmean_5s = moving_mean(acc_rmd, n5, n5, axis=0)
acc_rsd_30m = moving_sd(acc_rmean_5s, 360, 1, axis=0, return_previous=False)
move_nonwear = pad(
(acc_rsd_30m < int_wear_move).any(axis=1),
pad_width=(150, 150),
constant_values=False,
)
tso[move_nonwear] = False
# drop rest blocks less than minimum allowed rest length
# rolling 5min, the underlying windows are 5s, so 12 * minutes => # of samples
tso = drop_min_blocks(
tso, 12 * min_rest_block, drop_value=1, replace_value=0, skip_bounds=True
)
# drop active blocks less than maximum allowed active length
tso = drop_min_blocks(
tso, 12 * max_act_break, drop_value=0, replace_value=1, skip_bounds=True
)
# get the indices of the longest bout
arg_start, arg_end = arg_longest_bout(tso, 1)
# get the timestamps of the longest bout
if arg_start is not None:
# account for left justified windows - times need to be bumped up half a window
# account for 5s windows in indexing
arg_start = (arg_start + 30) * n5 # 12 * 5 / 2 = 30
arg_end = (arg_end + 30) * n5
start, end = time[arg_start], time[arg_end]
else:
start = end = None
return start, end, arg_start, arg_end
def compute_tso_threshold(arr, min_td=0.1, max_td=0.5, perc=10, factor=15.0):
"""
Computes the daily threshold value separating rest periods from active periods
for the TSO detection algorithm.
Parameters
----------
arr : array
Array of the absolute difference of the z-angle.
min_td : float
Minimum acceptable threshold value.
max_td : float
Maximum acceptable threshold value.
perc : integer, optional
Percentile to use for the threshold. Default is 10.
factor : float, optional
Factor to multiply the percentil value by. Default is 15.0.
Returns
-------
td : float
"""
td = min((max((percentile(arr, perc) * factor, min_td)), max_td))
return td | 0.895554 | 0.616243 |
from numpy import (
any,
arctan,
pi,
roll,
abs,
argmax,
diff,
nonzero,
insert,
sqrt,
pad,
int_,
append,
mean,
var,
ascontiguousarray,
)
from scipy.signal import butter, sosfiltfilt
from skdh.utility import get_windowed_view
from skdh.utility import moving_mean, moving_sd, moving_median
from skdh.utility.internal import rle
__all__ = [
"compute_z_angle",
"compute_absolute_difference",
"drop_min_blocks",
"arg_longest_bout",
"compute_activity_index",
]
def get_weartime(acc_rmed, temp, fs, move_thresh, temp_thresh):
"""
Compute the wear time using acceleration and temperature data.
Parameters
----------
acc_rmed : numpy.ndarray
Rolling median acceleration with 5s windows and 1 sample skips.
temp : numpy.ndarray
Raw temperature data.
fs : float
Sampling frequency.
move_thresh : float
Threshold to classify acceleration as wear/nonwear
temp_thresh : float
Temperature threshold to classify as wear/nonwear
Returns
-------
wear : numpy.ndarray
(N, 2) array of [start, stop] indices of blocks of wear time.
"""
n5 = int(5 * fs)
# rolling 5s mean (non-overlapping windows)
mn = moving_mean(acc_rmed, n5, n5, axis=0)
# rolling 30min StDev. 5s windows -> 12 windows per minute
acc_rsd = moving_sd(mn, 12 * 30, 1, axis=0, return_previous=False)
# TODO note that this 30 min rolling standard deviation likely means that our wear/nonwear
# timest could be off by as much as 30 mins, due to windows extending into the wear time.
# this is likely going to be an issue for all wear time algorithms due to long
# windows, however.
# rolling 5s median of temperature
rmd = moving_median(temp, n5, skip=1)
# rolling 5s mean (non-overlapping)
mn = moving_mean(rmd, n5, n5)
# rolling 5m median
temp_rmd = moving_median(mn, 12 * 5, skip=1)
move_mask = any(acc_rsd > move_thresh, axis=1)
temp_mask = temp_rmd >= temp_thresh
# pad the movement mask, temperature mask is the correct size
npad = temp_mask.size - move_mask.size
move_mask = pad(move_mask, (0, npad), mode="constant", constant_values=0)
dwear = diff((move_mask | temp_mask).astype(int_))
starts = nonzero(dwear == 1)[0] + 1
stops = nonzero(dwear == -1)[0] + 1
if move_mask[0] or temp_mask[0]:
starts = insert(starts, 0, 0)
if move_mask[-1] or temp_mask[-1]:
stops = append(stops, move_mask.size)
return starts * n5, stops * n5
def compute_z_angle(acc):
"""
Computes the z-angle of a tri-axial accelerometer signal with columns X, Y, Z per sample.
Parameters
----------
acc : array
Returns
-------
z : array
"""
z = arctan(acc[:, 2] / sqrt(acc[:, 0] ** 2 + acc[:, 1] ** 2)) * (180.0 / pi)
return z
def compute_absolute_difference(arr):
"""
Computes the absolute difference between an array and itself shifted by 1 sample along the
first axis.
Parameters
----------
arr : array
Returns
-------
absd: array
"""
shifted = roll(arr, 1)
shifted[0] = shifted[1]
absd = abs(arr - shifted)
return absd
def drop_min_blocks(arr, min_block_size, drop_value, replace_value, skip_bounds=True):
"""
Drops (rescores) blocks of a desired value with length less than some minimum length.
(Ex. drop all blocks of value 1 with length < 5 and replace with new value 0).
Parameters
----------
arr : array
min_block_size : integer
Minimum acceptable block length in samples.
drop_value : integer
Value of blocks to examine.
replace_value : integer
Value to replace dropped blocks to.
skip_bounds : boolean
If True, ignores the first and last blocks.
Returns
-------
arr : array
"""
lengths, starts, vals = rle(arr)
ctr = 0
n = len(lengths)
for length, start, val in zip(lengths, starts, vals):
ctr += 1
if skip_bounds and (ctr == 1 or ctr == n):
continue
if val == drop_value and length < min_block_size:
arr[start : start + length] = replace_value
return arr
def arg_longest_bout(arr, block_val):
"""
Finds the first and last indices of the longest block of a given value present in a 1D array.
Parameters
----------
arr : array
One-dimensional array.
block_val : integer
Value of the desired blocks.
Returns
-------
longest_bout : tuple
First, last indices of the longest block.
"""
lengths, starts, vals = rle(arr)
vals = vals.flatten()
val_mask = vals == block_val
if len(lengths[val_mask]):
max_index = argmax(lengths[val_mask])
max_start = starts[val_mask][max_index]
longest_bout = max_start, max_start + lengths[val_mask][max_index]
else:
longest_bout = None, None
return longest_bout
def compute_activity_index(fs, accel, hp_cut=0.25):
"""
Calculate the activity index
Parameters
----------
fs : float
Sampling frequency in Hz
accel : numpy.ndarray
Acceleration
hp_cut : float
High-pass filter cutoff
Returns
-------
ai : numpy.ndarray
The activity index of non-overlapping 60s windows
"""
# high pass filter
sos = butter(3, hp_cut * 2 / fs, btype="high", output="sos")
accel_hf = ascontiguousarray(sosfiltfilt(sos, accel, axis=0))
# non-overlapping 60s windows
acc_w = get_windowed_view(accel_hf, int(60 * fs), int(60 * fs))
# compute activity index
act_ind = sqrt(mean(var(acc_w, axis=2), axis=1))
return act_ind | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/sleep/utility.py | utility.py | from numpy import (
any,
arctan,
pi,
roll,
abs,
argmax,
diff,
nonzero,
insert,
sqrt,
pad,
int_,
append,
mean,
var,
ascontiguousarray,
)
from scipy.signal import butter, sosfiltfilt
from skdh.utility import get_windowed_view
from skdh.utility import moving_mean, moving_sd, moving_median
from skdh.utility.internal import rle
__all__ = [
"compute_z_angle",
"compute_absolute_difference",
"drop_min_blocks",
"arg_longest_bout",
"compute_activity_index",
]
def get_weartime(acc_rmed, temp, fs, move_thresh, temp_thresh):
"""
Compute the wear time using acceleration and temperature data.
Parameters
----------
acc_rmed : numpy.ndarray
Rolling median acceleration with 5s windows and 1 sample skips.
temp : numpy.ndarray
Raw temperature data.
fs : float
Sampling frequency.
move_thresh : float
Threshold to classify acceleration as wear/nonwear
temp_thresh : float
Temperature threshold to classify as wear/nonwear
Returns
-------
wear : numpy.ndarray
(N, 2) array of [start, stop] indices of blocks of wear time.
"""
n5 = int(5 * fs)
# rolling 5s mean (non-overlapping windows)
mn = moving_mean(acc_rmed, n5, n5, axis=0)
# rolling 30min StDev. 5s windows -> 12 windows per minute
acc_rsd = moving_sd(mn, 12 * 30, 1, axis=0, return_previous=False)
# TODO note that this 30 min rolling standard deviation likely means that our wear/nonwear
# timest could be off by as much as 30 mins, due to windows extending into the wear time.
# this is likely going to be an issue for all wear time algorithms due to long
# windows, however.
# rolling 5s median of temperature
rmd = moving_median(temp, n5, skip=1)
# rolling 5s mean (non-overlapping)
mn = moving_mean(rmd, n5, n5)
# rolling 5m median
temp_rmd = moving_median(mn, 12 * 5, skip=1)
move_mask = any(acc_rsd > move_thresh, axis=1)
temp_mask = temp_rmd >= temp_thresh
# pad the movement mask, temperature mask is the correct size
npad = temp_mask.size - move_mask.size
move_mask = pad(move_mask, (0, npad), mode="constant", constant_values=0)
dwear = diff((move_mask | temp_mask).astype(int_))
starts = nonzero(dwear == 1)[0] + 1
stops = nonzero(dwear == -1)[0] + 1
if move_mask[0] or temp_mask[0]:
starts = insert(starts, 0, 0)
if move_mask[-1] or temp_mask[-1]:
stops = append(stops, move_mask.size)
return starts * n5, stops * n5
def compute_z_angle(acc):
"""
Computes the z-angle of a tri-axial accelerometer signal with columns X, Y, Z per sample.
Parameters
----------
acc : array
Returns
-------
z : array
"""
z = arctan(acc[:, 2] / sqrt(acc[:, 0] ** 2 + acc[:, 1] ** 2)) * (180.0 / pi)
return z
def compute_absolute_difference(arr):
"""
Computes the absolute difference between an array and itself shifted by 1 sample along the
first axis.
Parameters
----------
arr : array
Returns
-------
absd: array
"""
shifted = roll(arr, 1)
shifted[0] = shifted[1]
absd = abs(arr - shifted)
return absd
def drop_min_blocks(arr, min_block_size, drop_value, replace_value, skip_bounds=True):
"""
Drops (rescores) blocks of a desired value with length less than some minimum length.
(Ex. drop all blocks of value 1 with length < 5 and replace with new value 0).
Parameters
----------
arr : array
min_block_size : integer
Minimum acceptable block length in samples.
drop_value : integer
Value of blocks to examine.
replace_value : integer
Value to replace dropped blocks to.
skip_bounds : boolean
If True, ignores the first and last blocks.
Returns
-------
arr : array
"""
lengths, starts, vals = rle(arr)
ctr = 0
n = len(lengths)
for length, start, val in zip(lengths, starts, vals):
ctr += 1
if skip_bounds and (ctr == 1 or ctr == n):
continue
if val == drop_value and length < min_block_size:
arr[start : start + length] = replace_value
return arr
def arg_longest_bout(arr, block_val):
"""
Finds the first and last indices of the longest block of a given value present in a 1D array.
Parameters
----------
arr : array
One-dimensional array.
block_val : integer
Value of the desired blocks.
Returns
-------
longest_bout : tuple
First, last indices of the longest block.
"""
lengths, starts, vals = rle(arr)
vals = vals.flatten()
val_mask = vals == block_val
if len(lengths[val_mask]):
max_index = argmax(lengths[val_mask])
max_start = starts[val_mask][max_index]
longest_bout = max_start, max_start + lengths[val_mask][max_index]
else:
longest_bout = None, None
return longest_bout
def compute_activity_index(fs, accel, hp_cut=0.25):
"""
Calculate the activity index
Parameters
----------
fs : float
Sampling frequency in Hz
accel : numpy.ndarray
Acceleration
hp_cut : float
High-pass filter cutoff
Returns
-------
ai : numpy.ndarray
The activity index of non-overlapping 60s windows
"""
# high pass filter
sos = butter(3, hp_cut * 2 / fs, btype="high", output="sos")
accel_hf = ascontiguousarray(sosfiltfilt(sos, accel, axis=0))
# non-overlapping 60s windows
acc_w = get_windowed_view(accel_hf, int(60 * fs), int(60 * fs))
# compute activity index
act_ind = sqrt(mean(var(acc_w, axis=2), axis=1))
return act_ind | 0.803521 | 0.569912 |
from numpy import array, convolve, int_
from skdh.sleep.utility import rle
def compute_sleep_predictions(act_index, sf=0.243, rescore=True):
"""
Apply the Cole-Kripke algorithm to activity index data
Parameters
----------
act_index : numpy.ndarray
Activity index calculated from accelerometer data on 1 minute windows.
sf : float, optional
Scale factor used for the predictions. Default is 0.243, which was optimized
for activity index. Recommended range if changing is between 0.15 and 0.3 depending
on desired sensitivity, and possibly the population being observed.
rescore : bool, optional
If True, applies Webster's rescoring rules to the sleep predictions to improve
specificity.
Returns
-------
Notes
-----
Applies Webster's rescoring rules as described in the Cole-Kripke paper.
"""
# paper writes this backwards [::-1]. For convolution has to be written this way though
kernel = array([0.0, 0.0, 4.024, 5.84, 16.19, 5.07, 3.75, 6.87, 4.64]) * sf
scores = convolve(act_index, kernel, "same")
predictions = (scores < 0.5).astype(int_) # sleep as positive
if rescore:
wake_bin = 0
for t in range(predictions.size):
if not predictions[t]:
wake_bin += 1
else:
if (
wake_bin >= 15
): # rule c: >= 15 minutes of wake -> next 4min of sleep rescored
predictions[t : t + 4] = 0
elif (
10 <= wake_bin < 15
): # rule b: >= 10 minutes of wake -> next 3 min rescored
predictions[t : t + 3] = 0
elif (
4 <= wake_bin < 10
): # rule a: >=4 min of wake -> next 1min of sleep rescored
predictions[t] = 0
wake_bin = 0 # reset
# rule d: [>10 min wake][<=6 min sleep][>10min wake] gets rescored
dt, changes, vals = rle(predictions)
mask = (changes >= 10) & (changes < (predictions.size - 10)) & (dt <= 6) & vals
for start, dur in zip(changes[mask], dt[mask]):
predictions[start : start + dur] = 0
return predictions | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/sleep/sleep_classification.py | sleep_classification.py | from numpy import array, convolve, int_
from skdh.sleep.utility import rle
def compute_sleep_predictions(act_index, sf=0.243, rescore=True):
"""
Apply the Cole-Kripke algorithm to activity index data
Parameters
----------
act_index : numpy.ndarray
Activity index calculated from accelerometer data on 1 minute windows.
sf : float, optional
Scale factor used for the predictions. Default is 0.243, which was optimized
for activity index. Recommended range if changing is between 0.15 and 0.3 depending
on desired sensitivity, and possibly the population being observed.
rescore : bool, optional
If True, applies Webster's rescoring rules to the sleep predictions to improve
specificity.
Returns
-------
Notes
-----
Applies Webster's rescoring rules as described in the Cole-Kripke paper.
"""
# paper writes this backwards [::-1]. For convolution has to be written this way though
kernel = array([0.0, 0.0, 4.024, 5.84, 16.19, 5.07, 3.75, 6.87, 4.64]) * sf
scores = convolve(act_index, kernel, "same")
predictions = (scores < 0.5).astype(int_) # sleep as positive
if rescore:
wake_bin = 0
for t in range(predictions.size):
if not predictions[t]:
wake_bin += 1
else:
if (
wake_bin >= 15
): # rule c: >= 15 minutes of wake -> next 4min of sleep rescored
predictions[t : t + 4] = 0
elif (
10 <= wake_bin < 15
): # rule b: >= 10 minutes of wake -> next 3 min rescored
predictions[t : t + 3] = 0
elif (
4 <= wake_bin < 10
): # rule a: >=4 min of wake -> next 1min of sleep rescored
predictions[t] = 0
wake_bin = 0 # reset
# rule d: [>10 min wake][<=6 min sleep][>10min wake] gets rescored
dt, changes, vals = rle(predictions)
mask = (changes >= 10) & (changes < (predictions.size - 10)) & (dt <= 6) & vals
for start, dur in zip(changes[mask], dt[mask]):
predictions[start : start + dur] = 0
return predictions | 0.901303 | 0.577972 |
from abc import ABC, abstractmethod
from collections.abc import Iterator, Sequence
import json
from warnings import warn
from pandas import DataFrame
from numpy import float_, asarray, zeros, sum, moveaxis
__all__ = ["Bank"]
class ArrayConversionError(Exception):
pass
def get_n_feats(size, index):
if isinstance(index, int):
return 1
elif isinstance(index, (Iterator, Sequence)):
return len(index)
elif isinstance(index, slice):
return len(range(*index.indices(size)))
elif isinstance(index, type(Ellipsis)):
return size
def partial_index_check(index):
if index is None:
index = ...
if not isinstance(index, (int, Iterator, Sequence, type(...), slice)):
raise IndexError(f"Index type ({type(index)}) not understood.")
if isinstance(index, str):
raise IndexError("Index type (str) not understood.")
return index
def normalize_indices(nfeat, index):
if index is None:
return [...] * nfeat
elif not isinstance(index, (Iterator, Sequence)): # slice, single integer, etc
return [partial_index_check(index)] * nfeat
elif all([isinstance(i, int) for i in index]): # iterable of ints
return [index] * nfeat
elif isinstance(index, Sequence): # able to be indexed
return [partial_index_check(i) for i in index]
else: # pragma: no cover
return IndexError(f"Index type ({type(index)}) not understood.")
def normalize_axes(ndim, axis, ind_axis):
"""
Normalize input axes to be positive/correct for how the swapping has to work
"""
if axis == ind_axis:
raise ValueError("axis and index_axis cannot be the same")
if ndim == 1:
return 0, None
elif ndim >= 2:
"""
| shape | ax | ia | move1 | ax | ia | res | ax | ia | res move |
|--------|----|----|--------|----|----|-------|----|----|----------|
| (a, b) | 0 | 1 | (b, a) | 0 | 0 | (bf,) | | | |
| (a, b) | 0 | N | (b, a) | 0 | N | (f, b)| | | |
| (a, b) | 1 | 0 | | | | (3a,) | | | |
| (a, b) | 1 | N | | | | (f, a)| | | |
| shape | ax| ia | move1 | ax| ia| move2 | res | | ia| res move |
|----------|---|------|----------|---|---|----------|----------|----|---|----------|
| (a, b, c)| 0 | 1(0) | (b, c, a)| | | | (bf, c) | 0 | 0 | |
| (a, b, c)| 0 | 2(1) | (b, c, a)| | 1 | (c, b, a)| (cf, b) | 0 | 1 | (b, cf) |
| (a, b, c)| 0 | N | (b, c, a)| | | | (f, b, c)| | | |
| (a, b, c)| 1 | 0 | (a, c, b)| | | | (af, c) | 0 | 0 | |
| (a, b, c)| 1 | 2(1) | (a, c, b)| | 1 | (c, a, b)| (cf, a) | 0 | 1 | (a, cf) |
| (a, b, c)| 1 | N | (a, c, b)| | | | (f, a, c)| | | |
| (a, b, c)| 2 | 0 | (a, b, c)| | | | (af, b) | 0 | 0 | |
| (a, b, c)| 2 | 1 | (a, b, c)| | 1 | (b, a, c)| (bf, a) | 0 | 1 | (a, bf) |
| (a, b, c)| 2 | N | (a, b, c)| | | | (f, a, b)| | | |
| shape | ax| ia | move1 | ia| move2 | res | | ia| res move |
|------------|---|------|-------------|---|-------------|-------------|---|---|-----------|
|(a, b, c, d)| 0 | 1(0) | (b, c, d, a)| | | (bf, c, d) | 0 | 0 | |
|(a, b, c, d)| 0 | 2(1) | (b, c, d, a)| 1 | (c, b, d, a)| (cf, b, d) | 0 | 1 | (b, cf, d)|
|(a, b, c, d)| 0 | 3(2) | (b, c, d, a)| 2 | (d, b, c, a)| (df, b, c) | 0 | 2 | (d, c, df)|
|(a, b, c, d)| 0 | N | (b, c, d, a)| | | (f, b, c, d)| | | |
|(a, b, c, d)| 1 | 0 | (a, c, d, b)| | | (af, c, d) | | | |
|(a, b, c, d)| 1 | 2(1) | (a, c, d, b)| 1 | (c, a, d, b)| (cf, a, d) | 0 | 1 | (a, cf, d)|
|(a, b, c, d)| 1 | 3(2) | (a, c, d, b)| 2 | (d, a, c, b)| (df, a, c) | 0 | 2 | (a, c, df)|
|(a, b, c, d)| 1 | N | (a, c, d, b)| | | (f, a, c, d)| | | |
|(a, b, c, d)| 2 | 0 | (a, b, d, c)| | | (af, b, d) | | | |
|(a, b, c, d)| 2 | 1 | (a, b, d, c)| 1 | (b, a, d, c)| (bf, a, d) | 0 | 1 | (a, bf, d)|
|(a, b, c, d)| 2 | 3(2) | (a, b, d, c)| 2 | (d, a, b, c)| (df, a, b) | 0 | 2 | (a, b, df)|
|(a, b, c, d)| 2 | N | (a, b, d, c)| | | (f, a, b, d)| | | |
|(a, b, c, d)| 3 | 0 | (a, b, c, d)| | | (af, b, c) | | | |
|(a, b, c, d)| 3 | 1 | (a, b, c, d)| 1 | (b, a, c, d)| (bf, a, c) | 0 | 1 | (a, bf, c)|
|(a, b, c, d)| 3 | 2 | (a, b, c, d)| 2 | (c, a, b, d)| (cf, a, b) | 0 | 2 | (a, b, cf)|
|(a, b, c, d)| 3 | N | (a, b, c, d)| | | (f, a, b, c)| | | |
"""
ax = axis if axis >= 0 else ndim + axis
if ind_axis is None:
return ax, None
ia = ind_axis if ind_axis >= 0 else ndim + ind_axis
if ia > ax:
ia -= 1
return ax, ia
class Bank:
"""
A feature bank object for ease in creating a table or pipeline of features to be computed.
Parameters
----------
bank_file : {None, path-like}, optional
Path to a saved bank file to load. Optional
Examples
--------
"""
__slots__ = ("_feats", "_indices")
def __str__(self):
return "Bank"
def __repr__(self):
s = "Bank["
for f in self._feats:
s += f"\n\t{f!r},"
s += "\n]"
return s
def __contains__(self, item):
return item in self._feats
def __len__(self):
return len(self._feats)
def __init__(self, bank_file=None):
# initialize some variables
self._feats = []
self._indices = []
if bank_file is not None:
self.load(bank_file)
def add(self, features, index=None):
"""
Add a feature or features to the pipeline.
Parameters
----------
features : {Feature, list}
Single signal Feature, or list of signal Features to add to the feature Bank
index : {int, slice, list}, optional
Index to be applied to data input to each features. Either a index that will
apply to every feature, or a list of features corresponding to each feature being
added.
"""
if isinstance(features, Feature):
if features in self:
warn(
f"Feature {features!s} already in the Bank, will be duplicated.",
UserWarning,
)
self._indices.append(partial_index_check(index))
self._feats.append(features)
elif all([isinstance(i, Feature) for i in features]):
if any([ft in self for ft in features]):
warn("Feature already in the Bank, will be duplicated.", UserWarning)
self._indices.extend(normalize_indices(len(features), index))
self._feats.extend(features)
def save(self, file):
"""
Save the feature Bank to a file for a persistent object that can be loaded later to create
the same Bank as before
Parameters
----------
file : path-like
File to be saved to. Creates a new file or overwrites an existing file.
"""
out = []
for i, ft in enumerate(self._feats):
idx = "Ellipsis" if self._indices[i] is Ellipsis else self._indices[i]
out.append(
{ft.__class__.__name__: {"Parameters": ft._params, "Index": idx}}
)
with open(file, "w") as f:
json.dump(out, f)
def load(self, file):
"""
Load a previously saved feature Bank from a json file.
Parameters
----------
file : path-like
File to be read to create the feature Bank.
"""
# the import must be here, otherwise a circular import error occurs
from skdh.features import lib
with open(file, "r") as f:
feats = json.load(f)
for ft in feats:
name = list(ft.keys())[0]
params = ft[name]["Parameters"]
index = ft[name]["Index"]
if index == "Ellipsis":
index = Ellipsis
# add it to the feature bank
self.add(getattr(lib, name)(**params), index=index)
def compute(
self, signal, fs=1.0, *, axis=-1, index_axis=None, indices=None, columns=None
):
"""
Compute the specified features for the given signal
Parameters
----------
signal : {array-like}
Array-like signal to have features computed for.
fs : float, optional
Sampling frequency in Hz. Default is 1Hz
axis : int, optional
Axis along which to compute the features. Default is -1.
index_axis : {None, int}, optional
Axis corresponding to the indices specified in `Bank.add` or `indices`. Default is
None, which assumes that this axis is not part of the signal. Note that setting this to
None means values for `indices` or the indices set in `Bank.add` will be ignored.
indices : {None, int, list-like, slice, ellipsis}, optional
Indices to apply to the input signal. Either None, a integer, list-like, slice to apply
to each feature, or a list-like of lists/objects with a 1:1 correspondence to the
features present in the Bank. If provided, takes precedence over any values given in
`Bank.add`. Default is None, which will use indices from `Bank.add`.
columns : {None, list}, optional
Columns to use if providing a dataframe. Default is None (uses all columns).
Returns
-------
feats : numpy.ndarray
Computed features.
"""
# standardize the input signal
if isinstance(signal, DataFrame):
columns = columns if columns is not None else signal.columns
x = signal[columns].values.astype(float_)
else:
try:
x = asarray(signal, dtype=float_)
except ValueError as e:
raise ArrayConversionError("Error converting signal to ndarray") from e
axis, index_axis = normalize_axes(x.ndim, axis, index_axis)
if index_axis is None:
indices = [...] * len(self)
else:
if indices is None:
indices = self._indices
else:
indices = normalize_indices(len(self), indices)
# get the number of features that will results. Needed to allocate the feature array
if index_axis is None:
# don't have to move any other axes than the computation axis
x = moveaxis(x, axis, -1)
# number of feats is 1 per
n_feats = [1] * len(self)
feats = zeros((sum(n_feats),) + x.shape[:-1], dtype=float_)
else:
# move both the computation and index axis. do this in two steps to allow for undoing
# just the index axis swap later. The index_axis has been adjusted appropriately
# to match this axis move in 2 steps
x = moveaxis(x, axis, -1)
x = moveaxis(x, index_axis, 0)
n_feats = []
for ind in indices:
n_feats.append(get_n_feats(x.shape[0], ind))
feats = zeros((sum(n_feats),) + x.shape[1:-1], dtype=float_)
feat_i = 0 # keep track of where in the feature array we are
for i, ft in enumerate(self._feats):
feats[feat_i : feat_i + n_feats[i]] = ft.compute(
x[indices[i]], fs=fs, axis=-1
)
feat_i += n_feats[i]
# Move the shape back to the correct one.
# only have to do this if there is an index axis, because otherwise the array is still in
# the same order as originally
if index_axis is not None:
feats = moveaxis(feats, 0, index_axis) # undo the previous swap/move
return feats
class Feature(ABC):
"""
Base feature class
"""
def __str__(self):
return self.__class__.__name__
def __repr__(self):
s = self.__class__.__name__ + "("
for p in self._params:
s += f"{p}={self._params[p]!r}, "
if len(self._params) > 0:
s = s[:-2]
return s + ")"
def __eq__(self, other):
if isinstance(other, type(self)):
# double check the name
eq = str(other) == str(self)
# check the parameters
eq &= other._params == self._params
return eq
else:
return False
__slots__ = ("_params",)
def __init__(self, **params):
self._params = params
@abstractmethod
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the signal feature.
Parameters
----------
signal : array-like
Signal to compute the feature over.
fs : float, optional
Sampling frequency in Hz. Default is 1.0
axis : int, optional
Axis over which to compute the feature. Default is -1 (last dimension)
Returns
-------
feat : numpy.ndarray
ndarray of the computed feature
"""
# move the computation axis to the end
return moveaxis(asarray(signal, dtype=float_), axis, -1) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/features/core.py | core.py | from abc import ABC, abstractmethod
from collections.abc import Iterator, Sequence
import json
from warnings import warn
from pandas import DataFrame
from numpy import float_, asarray, zeros, sum, moveaxis
__all__ = ["Bank"]
class ArrayConversionError(Exception):
pass
def get_n_feats(size, index):
if isinstance(index, int):
return 1
elif isinstance(index, (Iterator, Sequence)):
return len(index)
elif isinstance(index, slice):
return len(range(*index.indices(size)))
elif isinstance(index, type(Ellipsis)):
return size
def partial_index_check(index):
if index is None:
index = ...
if not isinstance(index, (int, Iterator, Sequence, type(...), slice)):
raise IndexError(f"Index type ({type(index)}) not understood.")
if isinstance(index, str):
raise IndexError("Index type (str) not understood.")
return index
def normalize_indices(nfeat, index):
if index is None:
return [...] * nfeat
elif not isinstance(index, (Iterator, Sequence)): # slice, single integer, etc
return [partial_index_check(index)] * nfeat
elif all([isinstance(i, int) for i in index]): # iterable of ints
return [index] * nfeat
elif isinstance(index, Sequence): # able to be indexed
return [partial_index_check(i) for i in index]
else: # pragma: no cover
return IndexError(f"Index type ({type(index)}) not understood.")
def normalize_axes(ndim, axis, ind_axis):
"""
Normalize input axes to be positive/correct for how the swapping has to work
"""
if axis == ind_axis:
raise ValueError("axis and index_axis cannot be the same")
if ndim == 1:
return 0, None
elif ndim >= 2:
"""
| shape | ax | ia | move1 | ax | ia | res | ax | ia | res move |
|--------|----|----|--------|----|----|-------|----|----|----------|
| (a, b) | 0 | 1 | (b, a) | 0 | 0 | (bf,) | | | |
| (a, b) | 0 | N | (b, a) | 0 | N | (f, b)| | | |
| (a, b) | 1 | 0 | | | | (3a,) | | | |
| (a, b) | 1 | N | | | | (f, a)| | | |
| shape | ax| ia | move1 | ax| ia| move2 | res | | ia| res move |
|----------|---|------|----------|---|---|----------|----------|----|---|----------|
| (a, b, c)| 0 | 1(0) | (b, c, a)| | | | (bf, c) | 0 | 0 | |
| (a, b, c)| 0 | 2(1) | (b, c, a)| | 1 | (c, b, a)| (cf, b) | 0 | 1 | (b, cf) |
| (a, b, c)| 0 | N | (b, c, a)| | | | (f, b, c)| | | |
| (a, b, c)| 1 | 0 | (a, c, b)| | | | (af, c) | 0 | 0 | |
| (a, b, c)| 1 | 2(1) | (a, c, b)| | 1 | (c, a, b)| (cf, a) | 0 | 1 | (a, cf) |
| (a, b, c)| 1 | N | (a, c, b)| | | | (f, a, c)| | | |
| (a, b, c)| 2 | 0 | (a, b, c)| | | | (af, b) | 0 | 0 | |
| (a, b, c)| 2 | 1 | (a, b, c)| | 1 | (b, a, c)| (bf, a) | 0 | 1 | (a, bf) |
| (a, b, c)| 2 | N | (a, b, c)| | | | (f, a, b)| | | |
| shape | ax| ia | move1 | ia| move2 | res | | ia| res move |
|------------|---|------|-------------|---|-------------|-------------|---|---|-----------|
|(a, b, c, d)| 0 | 1(0) | (b, c, d, a)| | | (bf, c, d) | 0 | 0 | |
|(a, b, c, d)| 0 | 2(1) | (b, c, d, a)| 1 | (c, b, d, a)| (cf, b, d) | 0 | 1 | (b, cf, d)|
|(a, b, c, d)| 0 | 3(2) | (b, c, d, a)| 2 | (d, b, c, a)| (df, b, c) | 0 | 2 | (d, c, df)|
|(a, b, c, d)| 0 | N | (b, c, d, a)| | | (f, b, c, d)| | | |
|(a, b, c, d)| 1 | 0 | (a, c, d, b)| | | (af, c, d) | | | |
|(a, b, c, d)| 1 | 2(1) | (a, c, d, b)| 1 | (c, a, d, b)| (cf, a, d) | 0 | 1 | (a, cf, d)|
|(a, b, c, d)| 1 | 3(2) | (a, c, d, b)| 2 | (d, a, c, b)| (df, a, c) | 0 | 2 | (a, c, df)|
|(a, b, c, d)| 1 | N | (a, c, d, b)| | | (f, a, c, d)| | | |
|(a, b, c, d)| 2 | 0 | (a, b, d, c)| | | (af, b, d) | | | |
|(a, b, c, d)| 2 | 1 | (a, b, d, c)| 1 | (b, a, d, c)| (bf, a, d) | 0 | 1 | (a, bf, d)|
|(a, b, c, d)| 2 | 3(2) | (a, b, d, c)| 2 | (d, a, b, c)| (df, a, b) | 0 | 2 | (a, b, df)|
|(a, b, c, d)| 2 | N | (a, b, d, c)| | | (f, a, b, d)| | | |
|(a, b, c, d)| 3 | 0 | (a, b, c, d)| | | (af, b, c) | | | |
|(a, b, c, d)| 3 | 1 | (a, b, c, d)| 1 | (b, a, c, d)| (bf, a, c) | 0 | 1 | (a, bf, c)|
|(a, b, c, d)| 3 | 2 | (a, b, c, d)| 2 | (c, a, b, d)| (cf, a, b) | 0 | 2 | (a, b, cf)|
|(a, b, c, d)| 3 | N | (a, b, c, d)| | | (f, a, b, c)| | | |
"""
ax = axis if axis >= 0 else ndim + axis
if ind_axis is None:
return ax, None
ia = ind_axis if ind_axis >= 0 else ndim + ind_axis
if ia > ax:
ia -= 1
return ax, ia
class Bank:
"""
A feature bank object for ease in creating a table or pipeline of features to be computed.
Parameters
----------
bank_file : {None, path-like}, optional
Path to a saved bank file to load. Optional
Examples
--------
"""
__slots__ = ("_feats", "_indices")
def __str__(self):
return "Bank"
def __repr__(self):
s = "Bank["
for f in self._feats:
s += f"\n\t{f!r},"
s += "\n]"
return s
def __contains__(self, item):
return item in self._feats
def __len__(self):
return len(self._feats)
def __init__(self, bank_file=None):
# initialize some variables
self._feats = []
self._indices = []
if bank_file is not None:
self.load(bank_file)
def add(self, features, index=None):
"""
Add a feature or features to the pipeline.
Parameters
----------
features : {Feature, list}
Single signal Feature, or list of signal Features to add to the feature Bank
index : {int, slice, list}, optional
Index to be applied to data input to each features. Either a index that will
apply to every feature, or a list of features corresponding to each feature being
added.
"""
if isinstance(features, Feature):
if features in self:
warn(
f"Feature {features!s} already in the Bank, will be duplicated.",
UserWarning,
)
self._indices.append(partial_index_check(index))
self._feats.append(features)
elif all([isinstance(i, Feature) for i in features]):
if any([ft in self for ft in features]):
warn("Feature already in the Bank, will be duplicated.", UserWarning)
self._indices.extend(normalize_indices(len(features), index))
self._feats.extend(features)
def save(self, file):
"""
Save the feature Bank to a file for a persistent object that can be loaded later to create
the same Bank as before
Parameters
----------
file : path-like
File to be saved to. Creates a new file or overwrites an existing file.
"""
out = []
for i, ft in enumerate(self._feats):
idx = "Ellipsis" if self._indices[i] is Ellipsis else self._indices[i]
out.append(
{ft.__class__.__name__: {"Parameters": ft._params, "Index": idx}}
)
with open(file, "w") as f:
json.dump(out, f)
def load(self, file):
"""
Load a previously saved feature Bank from a json file.
Parameters
----------
file : path-like
File to be read to create the feature Bank.
"""
# the import must be here, otherwise a circular import error occurs
from skdh.features import lib
with open(file, "r") as f:
feats = json.load(f)
for ft in feats:
name = list(ft.keys())[0]
params = ft[name]["Parameters"]
index = ft[name]["Index"]
if index == "Ellipsis":
index = Ellipsis
# add it to the feature bank
self.add(getattr(lib, name)(**params), index=index)
def compute(
self, signal, fs=1.0, *, axis=-1, index_axis=None, indices=None, columns=None
):
"""
Compute the specified features for the given signal
Parameters
----------
signal : {array-like}
Array-like signal to have features computed for.
fs : float, optional
Sampling frequency in Hz. Default is 1Hz
axis : int, optional
Axis along which to compute the features. Default is -1.
index_axis : {None, int}, optional
Axis corresponding to the indices specified in `Bank.add` or `indices`. Default is
None, which assumes that this axis is not part of the signal. Note that setting this to
None means values for `indices` or the indices set in `Bank.add` will be ignored.
indices : {None, int, list-like, slice, ellipsis}, optional
Indices to apply to the input signal. Either None, a integer, list-like, slice to apply
to each feature, or a list-like of lists/objects with a 1:1 correspondence to the
features present in the Bank. If provided, takes precedence over any values given in
`Bank.add`. Default is None, which will use indices from `Bank.add`.
columns : {None, list}, optional
Columns to use if providing a dataframe. Default is None (uses all columns).
Returns
-------
feats : numpy.ndarray
Computed features.
"""
# standardize the input signal
if isinstance(signal, DataFrame):
columns = columns if columns is not None else signal.columns
x = signal[columns].values.astype(float_)
else:
try:
x = asarray(signal, dtype=float_)
except ValueError as e:
raise ArrayConversionError("Error converting signal to ndarray") from e
axis, index_axis = normalize_axes(x.ndim, axis, index_axis)
if index_axis is None:
indices = [...] * len(self)
else:
if indices is None:
indices = self._indices
else:
indices = normalize_indices(len(self), indices)
# get the number of features that will results. Needed to allocate the feature array
if index_axis is None:
# don't have to move any other axes than the computation axis
x = moveaxis(x, axis, -1)
# number of feats is 1 per
n_feats = [1] * len(self)
feats = zeros((sum(n_feats),) + x.shape[:-1], dtype=float_)
else:
# move both the computation and index axis. do this in two steps to allow for undoing
# just the index axis swap later. The index_axis has been adjusted appropriately
# to match this axis move in 2 steps
x = moveaxis(x, axis, -1)
x = moveaxis(x, index_axis, 0)
n_feats = []
for ind in indices:
n_feats.append(get_n_feats(x.shape[0], ind))
feats = zeros((sum(n_feats),) + x.shape[1:-1], dtype=float_)
feat_i = 0 # keep track of where in the feature array we are
for i, ft in enumerate(self._feats):
feats[feat_i : feat_i + n_feats[i]] = ft.compute(
x[indices[i]], fs=fs, axis=-1
)
feat_i += n_feats[i]
# Move the shape back to the correct one.
# only have to do this if there is an index axis, because otherwise the array is still in
# the same order as originally
if index_axis is not None:
feats = moveaxis(feats, 0, index_axis) # undo the previous swap/move
return feats
class Feature(ABC):
"""
Base feature class
"""
def __str__(self):
return self.__class__.__name__
def __repr__(self):
s = self.__class__.__name__ + "("
for p in self._params:
s += f"{p}={self._params[p]!r}, "
if len(self._params) > 0:
s = s[:-2]
return s + ")"
def __eq__(self, other):
if isinstance(other, type(self)):
# double check the name
eq = str(other) == str(self)
# check the parameters
eq &= other._params == self._params
return eq
else:
return False
__slots__ = ("_params",)
def __init__(self, **params):
self._params = params
@abstractmethod
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the signal feature.
Parameters
----------
signal : array-like
Signal to compute the feature over.
fs : float, optional
Sampling frequency in Hz. Default is 1.0
axis : int, optional
Axis over which to compute the feature. Default is -1 (last dimension)
Returns
-------
feat : numpy.ndarray
ndarray of the computed feature
"""
# move the computation axis to the end
return moveaxis(asarray(signal, dtype=float_), axis, -1) | 0.759805 | 0.499023 |
from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = ["SignalEntropy", "SampleEntropy", "PermutationEntropy"]
class SignalEntropy(Feature):
r"""
A Measure of the information contained in a signal. Also described as a measure of how
surprising the outcome of a variable is.
Notes
-----
The entropy is estimated using the histogram of the input signal. Bin limits for the
histogram are defined per
.. math::
n_{bins} = ceil(\sqrt{N})
\delta = \frac{x_{max} - x_{min}}{N - 1}
bin_{min} = x_{min} - \frac{\delta}{2}
bin_{max} = x_{max} + \frac{\delta}{2}
where :math:`N` is the number of samples in the signal. Note that the data
is standardized before computing (using mean and standard deviation).
With the histogram, then the estimate of the entropy is computed per
.. math::
H_{est} = -\sum_{i=1}^kf(x_i)ln(f(x_i)) + ln(w) - bias
w = \frac{bin_{max} - bin_{min}}{n_{bins}}
bias = -\frac{n_{bins} - 1}{2N}
Because of the standardization before the histogram computation, the entropy
estimate is scaled again per
.. math:: H_{est} = exp(H_{est}^2) - 2
References
----------
.. [1] Wallis, Kenneth. "A note on the calculation of entropy from histograms". 2006.
https://warwick.ac.uk/fac/soc/economics/staff/academic/wallis/publications/entropy.pdf
"""
__slots__ = ()
def __init__(self):
super(SignalEntropy, self).__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the signal entropy
Parameters
----------
signal : array-like
Array-like containing values to compute the signal entropy for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
sig_ent : numpy.ndarray
Computed signal entropy.
"""
x = super().compute(signal, axis=axis)
return extensions.signal_entropy(x)
class SampleEntropy(Feature):
r"""
A measure of the complexity of a time-series signal. Sample entropy is a modification of
approximate entropy, but has the benefit of being data-length independent and having
an easier implementation. Smaller values indicate more self-similarity in the dataset,
and/or less noise.
Parameters
----------
m : int, optional
Set length for comparison (aka embedding dimension). Default is 4
r : float, optional
Maximum distance between sets. Default is 1.0
Notes
-----
Sample entropy first computes the probability that if two sets of length :math:`m`
simultaneous data points have distance :math:`<r`, then two sets of length :math:`m+`
simultaneous data points also have distance :math:`<r`, and then takes the negative
natural logarithm of this probability.
.. math:: E_{sample} = -ln\frac{A}{B}
where :math:`A=`number of :math:`m+1` vector pairs with distance :math:`<r`
and :math:`B=`number of :math:`m` vector pairs with distance :math:`<r`
The distance metric used is the Chebyshev distance, which is defined as the maximum
absolute value of the sample-by-sample difference between two sets of the same length
References
----------
.. [1] https://archive.physionet.org/physiotools/sampen/c/sampen.c
"""
__slots__ = ("m", "r")
def __init__(self, m=4, r=1.0):
super(SampleEntropy, self).__init__(m=m, r=r)
self.m = m
self.r = r
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the sample entropy of a signal
Parameters
----------
signal : array-like
Array-like containing values to compute the sample entropy for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
samp_en : numpy.ndarray
Computed sample entropy.
"""
x = super().compute(signal, axis=axis)
return extensions.sample_entropy(x, self.m, self.r)
class PermutationEntropy(Feature):
"""
A meausure of the signal complexity. Based on how the temporal signal behaves according to
a series of ordinal patterns.
Parameters
----------
order : int, optional
Order (length of sub-signals) to use in the computation. Default is 3
delay : int, optional
Time-delay to use in computing the sub-signals. Default is 1 sample.
normalize : bool, optional
Normalize the output between 0 and 1. Default is False.
"""
__slots__ = ("order", "delay", "normalize")
def __init__(self, order=3, delay=1, normalize=False):
super(PermutationEntropy, self).__init__(
order=order, delay=delay, normalize=False
)
self.order = order
self.delay = delay
self.normalize = normalize
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the permutation entropy
Parameters
----------
signal : array-like
Array-like containing values to compute the signal entropy for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
perm_en : numpy.ndarray
Computed permutation entropy.
"""
x = super().compute(signal, axis=axis)
return extensions.permutation_entropy(x, self.order, self.delay, self.normalize) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/features/lib/entropy.py | entropy.py | from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = ["SignalEntropy", "SampleEntropy", "PermutationEntropy"]
class SignalEntropy(Feature):
r"""
A Measure of the information contained in a signal. Also described as a measure of how
surprising the outcome of a variable is.
Notes
-----
The entropy is estimated using the histogram of the input signal. Bin limits for the
histogram are defined per
.. math::
n_{bins} = ceil(\sqrt{N})
\delta = \frac{x_{max} - x_{min}}{N - 1}
bin_{min} = x_{min} - \frac{\delta}{2}
bin_{max} = x_{max} + \frac{\delta}{2}
where :math:`N` is the number of samples in the signal. Note that the data
is standardized before computing (using mean and standard deviation).
With the histogram, then the estimate of the entropy is computed per
.. math::
H_{est} = -\sum_{i=1}^kf(x_i)ln(f(x_i)) + ln(w) - bias
w = \frac{bin_{max} - bin_{min}}{n_{bins}}
bias = -\frac{n_{bins} - 1}{2N}
Because of the standardization before the histogram computation, the entropy
estimate is scaled again per
.. math:: H_{est} = exp(H_{est}^2) - 2
References
----------
.. [1] Wallis, Kenneth. "A note on the calculation of entropy from histograms". 2006.
https://warwick.ac.uk/fac/soc/economics/staff/academic/wallis/publications/entropy.pdf
"""
__slots__ = ()
def __init__(self):
super(SignalEntropy, self).__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the signal entropy
Parameters
----------
signal : array-like
Array-like containing values to compute the signal entropy for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
sig_ent : numpy.ndarray
Computed signal entropy.
"""
x = super().compute(signal, axis=axis)
return extensions.signal_entropy(x)
class SampleEntropy(Feature):
r"""
A measure of the complexity of a time-series signal. Sample entropy is a modification of
approximate entropy, but has the benefit of being data-length independent and having
an easier implementation. Smaller values indicate more self-similarity in the dataset,
and/or less noise.
Parameters
----------
m : int, optional
Set length for comparison (aka embedding dimension). Default is 4
r : float, optional
Maximum distance between sets. Default is 1.0
Notes
-----
Sample entropy first computes the probability that if two sets of length :math:`m`
simultaneous data points have distance :math:`<r`, then two sets of length :math:`m+`
simultaneous data points also have distance :math:`<r`, and then takes the negative
natural logarithm of this probability.
.. math:: E_{sample} = -ln\frac{A}{B}
where :math:`A=`number of :math:`m+1` vector pairs with distance :math:`<r`
and :math:`B=`number of :math:`m` vector pairs with distance :math:`<r`
The distance metric used is the Chebyshev distance, which is defined as the maximum
absolute value of the sample-by-sample difference between two sets of the same length
References
----------
.. [1] https://archive.physionet.org/physiotools/sampen/c/sampen.c
"""
__slots__ = ("m", "r")
def __init__(self, m=4, r=1.0):
super(SampleEntropy, self).__init__(m=m, r=r)
self.m = m
self.r = r
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the sample entropy of a signal
Parameters
----------
signal : array-like
Array-like containing values to compute the sample entropy for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
samp_en : numpy.ndarray
Computed sample entropy.
"""
x = super().compute(signal, axis=axis)
return extensions.sample_entropy(x, self.m, self.r)
class PermutationEntropy(Feature):
"""
A meausure of the signal complexity. Based on how the temporal signal behaves according to
a series of ordinal patterns.
Parameters
----------
order : int, optional
Order (length of sub-signals) to use in the computation. Default is 3
delay : int, optional
Time-delay to use in computing the sub-signals. Default is 1 sample.
normalize : bool, optional
Normalize the output between 0 and 1. Default is False.
"""
__slots__ = ("order", "delay", "normalize")
def __init__(self, order=3, delay=1, normalize=False):
super(PermutationEntropy, self).__init__(
order=order, delay=delay, normalize=False
)
self.order = order
self.delay = delay
self.normalize = normalize
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the permutation entropy
Parameters
----------
signal : array-like
Array-like containing values to compute the signal entropy for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
perm_en : numpy.ndarray
Computed permutation entropy.
"""
x = super().compute(signal, axis=axis)
return extensions.permutation_entropy(x, self.order, self.delay, self.normalize) | 0.979957 | 0.909466 |
from numpy import zeros, ceil, log2, sort, sum, diff, sign, maximum
import pywt
from skdh.features.core import Feature
__all__ = ["DetailPower", "DetailPowerRatio"]
class DetailPower(Feature):
"""
The summed power in the detail levels that span the chosen frequency band.
Parameters
----------
wavelet : str
Wavelet to use. Options are the discrete wavelets in `PyWavelets`.
Default is 'coif4'.
freq_band : array_like
2-element array-like of the frequency band (Hz) to get the power in.
Default is [1, 3].
References
----------
.. [1] Sekine, M. et al. "Classification of waist-acceleration signals in a
continuous walking record." Medical Engineering & Physics. Vol. 22.
Pp 285-291. 2000.
"""
__slots__ = ("wave", "f_band")
_wavelet_options = pywt.wavelist(kind="discrete")
def __init__(self, wavelet="coif4", freq_band=None):
super().__init__(wavelet=wavelet, freq_band=freq_band)
self.wave = wavelet
if freq_band is not None:
self.f_band = sort(freq_band)
else:
self.f_band = [1.0, 3.0]
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the detail power
Parameters
----------
signal : array-like
Array-like containing values to compute the detail power for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
power : numpy.ndarray
Computed detail power.
"""
x = super().compute(signal, fs, axis=axis)
# computation
lvls = [
int(ceil(log2(fs / self.f_band[0]))), # maximum level needed
int(ceil(log2(fs / self.f_band[1]))), # minimum level to include in sum
]
# TODO test effect of mode on result
cA, *cD = pywt.wavedec(x, self.wave, mode="symmetric", level=lvls[0], axis=-1)
# set non necessary levels to 0
for i in range(lvls[0] - lvls[1] + 1, lvls[0]):
cD[i][:] = 0.0
# reconstruct and get negative->positive zero crossings
xr = pywt.waverec((cA,) + tuple(cD), self.wave, mode="symmetric", axis=-1)
N = sum(diff(sign(xr), axis=-1) > 0, axis=-1).astype(float)
# ensure no 0 values to prevent divide by 0
N = maximum(N, 1e-10)
rshape = x.shape[:-1]
result = zeros(rshape)
for i in range(lvls[0] - lvls[1] + 1):
result += sum(cD[i] ** 2, axis=-1)
return result / N
class DetailPowerRatio(Feature):
"""
The ratio of the power in the detail signals that span the specified
frequency band. Uses the discrete wavelet transform to break down the
signal into constituent components at different frequencies.
Parameters
----------
wavelet : str
Wavelet to use. Options are the discrete wavelets in `PyWavelets`.
Default is 'coif4'.
freq_band : array_like
2-element array-like of the frequency band (Hz) to get the power in.
Default is [1, 10].
Notes
-----
In the original paper [1]_, the result is multiplied by 100 to obtain a
percentage. This final multiplication is not included in order to obtain
results that have a scale that closer matches the typical 0-1 (or -1 to 1)
scale for machine learning features. NOTE that this does not mean that
the values will be in this range - since the scaling factor
is the original acceleration and not the wavelet detail values.
References
----------
.. [1] Sekine, M. et al. "Classification of waist-acceleration signals in a
continuous walking record." Medical Engineering & Physics. Vol. 22.
Pp 285-291. 2000.
"""
__slots__ = ("wave", "f_band")
_wavelet_options = pywt.wavelist(kind="discrete")
def __init__(self, wavelet="coif4", freq_band=None):
super().__init__(wavelet=wavelet, freq_band=freq_band)
self.wave = wavelet
if freq_band is not None:
self.f_band = sort(freq_band)
else:
self.f_band = [1.0, 10.0]
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the detail power ratio
Parameters
----------
signal : array-like
Array-like containing values to compute the detail power ratio for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
power_ratio : numpy.ndarray
Computed detail power ratio.
"""
x = super().compute(signal, fs, axis=axis)
# compute the required levels
lvls = [
int(ceil(log2(fs / self.f_band[0]))), # maximum level needed
int(ceil(log2(fs / self.f_band[1]))), # minimum level to include in sum
]
# TODO test effect of mode on result
cA, *cD = pywt.wavedec(x, self.wave, mode="symmetric", level=lvls[0], axis=-1)
result = zeros(x.shape[:-1])
for i in range(lvls[0] - lvls[1] + 1):
result += sum(cD[i] ** 2, axis=-1)
return result / sum(x**2, axis=-1) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/features/lib/wavelet.py | wavelet.py | from numpy import zeros, ceil, log2, sort, sum, diff, sign, maximum
import pywt
from skdh.features.core import Feature
__all__ = ["DetailPower", "DetailPowerRatio"]
class DetailPower(Feature):
"""
The summed power in the detail levels that span the chosen frequency band.
Parameters
----------
wavelet : str
Wavelet to use. Options are the discrete wavelets in `PyWavelets`.
Default is 'coif4'.
freq_band : array_like
2-element array-like of the frequency band (Hz) to get the power in.
Default is [1, 3].
References
----------
.. [1] Sekine, M. et al. "Classification of waist-acceleration signals in a
continuous walking record." Medical Engineering & Physics. Vol. 22.
Pp 285-291. 2000.
"""
__slots__ = ("wave", "f_band")
_wavelet_options = pywt.wavelist(kind="discrete")
def __init__(self, wavelet="coif4", freq_band=None):
super().__init__(wavelet=wavelet, freq_band=freq_band)
self.wave = wavelet
if freq_band is not None:
self.f_band = sort(freq_band)
else:
self.f_band = [1.0, 3.0]
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the detail power
Parameters
----------
signal : array-like
Array-like containing values to compute the detail power for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
power : numpy.ndarray
Computed detail power.
"""
x = super().compute(signal, fs, axis=axis)
# computation
lvls = [
int(ceil(log2(fs / self.f_band[0]))), # maximum level needed
int(ceil(log2(fs / self.f_band[1]))), # minimum level to include in sum
]
# TODO test effect of mode on result
cA, *cD = pywt.wavedec(x, self.wave, mode="symmetric", level=lvls[0], axis=-1)
# set non necessary levels to 0
for i in range(lvls[0] - lvls[1] + 1, lvls[0]):
cD[i][:] = 0.0
# reconstruct and get negative->positive zero crossings
xr = pywt.waverec((cA,) + tuple(cD), self.wave, mode="symmetric", axis=-1)
N = sum(diff(sign(xr), axis=-1) > 0, axis=-1).astype(float)
# ensure no 0 values to prevent divide by 0
N = maximum(N, 1e-10)
rshape = x.shape[:-1]
result = zeros(rshape)
for i in range(lvls[0] - lvls[1] + 1):
result += sum(cD[i] ** 2, axis=-1)
return result / N
class DetailPowerRatio(Feature):
"""
The ratio of the power in the detail signals that span the specified
frequency band. Uses the discrete wavelet transform to break down the
signal into constituent components at different frequencies.
Parameters
----------
wavelet : str
Wavelet to use. Options are the discrete wavelets in `PyWavelets`.
Default is 'coif4'.
freq_band : array_like
2-element array-like of the frequency band (Hz) to get the power in.
Default is [1, 10].
Notes
-----
In the original paper [1]_, the result is multiplied by 100 to obtain a
percentage. This final multiplication is not included in order to obtain
results that have a scale that closer matches the typical 0-1 (or -1 to 1)
scale for machine learning features. NOTE that this does not mean that
the values will be in this range - since the scaling factor
is the original acceleration and not the wavelet detail values.
References
----------
.. [1] Sekine, M. et al. "Classification of waist-acceleration signals in a
continuous walking record." Medical Engineering & Physics. Vol. 22.
Pp 285-291. 2000.
"""
__slots__ = ("wave", "f_band")
_wavelet_options = pywt.wavelist(kind="discrete")
def __init__(self, wavelet="coif4", freq_band=None):
super().__init__(wavelet=wavelet, freq_band=freq_band)
self.wave = wavelet
if freq_band is not None:
self.f_band = sort(freq_band)
else:
self.f_band = [1.0, 10.0]
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the detail power ratio
Parameters
----------
signal : array-like
Array-like containing values to compute the detail power ratio for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
power_ratio : numpy.ndarray
Computed detail power ratio.
"""
x = super().compute(signal, fs, axis=axis)
# compute the required levels
lvls = [
int(ceil(log2(fs / self.f_band[0]))), # maximum level needed
int(ceil(log2(fs / self.f_band[1]))), # minimum level to include in sum
]
# TODO test effect of mode on result
cA, *cD = pywt.wavedec(x, self.wave, mode="symmetric", level=lvls[0], axis=-1)
result = zeros(x.shape[:-1])
for i in range(lvls[0] - lvls[1] + 1):
result += sum(cD[i] ** 2, axis=-1)
return result / sum(x**2, axis=-1) | 0.942447 | 0.675024 |
from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = [
"DominantFrequency",
"DominantFrequencyValue",
"PowerSpectralSum",
"SpectralFlatness",
"SpectralEntropy",
]
class DominantFrequency(Feature):
r"""
The primary frequency in the signal. Computed using the FFT and finding the maximum value of
the power spectral density in the specified range of frequencies.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(DominantFrequency, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the dominant frequency
Parameters
----------
signal : array-like
Array-like containing values to compute the dominant frequency for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
dom_freq : numpy.ndarray
Computed dominant frequency.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.dominant_frequency(
x, fs, self.pad, self.low_cut, self.high_cut
)
class DominantFrequencyValue(Feature):
r"""
The power spectral density maximum value. Taken inside the range of frequencies specified.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(DominantFrequencyValue, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the dominant frequency value
Parameters
----------
signal : array-like
Array-like containing values to compute the dominant frequency value for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
dom_freq_val : numpy.ndarray
Computed dominant frequency value.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.dominant_frequency_value(
x, fs, self.pad, self.low_cut, self.high_cut
)
class PowerSpectralSum(Feature):
r"""
Sum of power spectral density values. The sum of power spectral density values in a
1.0Hz wide band around the primary (dominant) frequency (:math:`f_{dom}\pm 0.5`)
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(PowerSpectralSum, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the power spectral sum
Parameters
----------
signal : array-like
Array-like containing values to compute the power spectral sum for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
pss : numpy.ndarray
Computed power spectral sum.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.power_spectral_sum(
x, fs, self.pad, self.low_cut, self.high_cut
)
class SpectralFlatness(Feature):
r"""
A measure of the "tonality" or resonant structure of a signal. Provides a quantification of
how tone-like a signal is, as opposed to being noise-like. For this case, tonality is defined
in a sense as the amount of peaks in the power spectrum, opposed to a flat signal representing
white noise.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(SpectralFlatness, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the spectral flatness
Parameters
----------
signal : array-like
Array-like containing values to compute the spectral flatness for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
spec_flat : numpy.ndarray
Computed spectral flatness.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.spectral_flatness(
x, fs, self.pad, self.low_cut, self.high_cut
)
class SpectralEntropy(Feature):
r"""
A measure of the information contained in the power spectral density estimate. Similar
to :py:class:`SignalEntropy` but for the power spectral density.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(SpectralEntropy, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the spectral entropy
Parameters
----------
signal : array-like
Array-like containing values to compute the spectral entropy for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
spec_ent : numpy.ndarray
Computed spectral entropy.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.spectral_entropy(x, fs, self.pad, self.low_cut, self.high_cut) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/features/lib/frequency.py | frequency.py | from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = [
"DominantFrequency",
"DominantFrequencyValue",
"PowerSpectralSum",
"SpectralFlatness",
"SpectralEntropy",
]
class DominantFrequency(Feature):
r"""
The primary frequency in the signal. Computed using the FFT and finding the maximum value of
the power spectral density in the specified range of frequencies.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(DominantFrequency, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the dominant frequency
Parameters
----------
signal : array-like
Array-like containing values to compute the dominant frequency for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
dom_freq : numpy.ndarray
Computed dominant frequency.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.dominant_frequency(
x, fs, self.pad, self.low_cut, self.high_cut
)
class DominantFrequencyValue(Feature):
r"""
The power spectral density maximum value. Taken inside the range of frequencies specified.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(DominantFrequencyValue, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the dominant frequency value
Parameters
----------
signal : array-like
Array-like containing values to compute the dominant frequency value for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
dom_freq_val : numpy.ndarray
Computed dominant frequency value.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.dominant_frequency_value(
x, fs, self.pad, self.low_cut, self.high_cut
)
class PowerSpectralSum(Feature):
r"""
Sum of power spectral density values. The sum of power spectral density values in a
1.0Hz wide band around the primary (dominant) frequency (:math:`f_{dom}\pm 0.5`)
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(PowerSpectralSum, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the power spectral sum
Parameters
----------
signal : array-like
Array-like containing values to compute the power spectral sum for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
pss : numpy.ndarray
Computed power spectral sum.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.power_spectral_sum(
x, fs, self.pad, self.low_cut, self.high_cut
)
class SpectralFlatness(Feature):
r"""
A measure of the "tonality" or resonant structure of a signal. Provides a quantification of
how tone-like a signal is, as opposed to being noise-like. For this case, tonality is defined
in a sense as the amount of peaks in the power spectrum, opposed to a flat signal representing
white noise.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(SpectralFlatness, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the spectral flatness
Parameters
----------
signal : array-like
Array-like containing values to compute the spectral flatness for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
spec_flat : numpy.ndarray
Computed spectral flatness.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.spectral_flatness(
x, fs, self.pad, self.low_cut, self.high_cut
)
class SpectralEntropy(Feature):
r"""
A measure of the information contained in the power spectral density estimate. Similar
to :py:class:`SignalEntropy` but for the power spectral density.
Parameters
----------
padlevel : int, optional
Padding (factors of 2) to use in the FFT computation. Default is 2.
low_cutoff : float, optional
Low value of the frequency range to look in. Default is 0.0 Hz
high_cutoff : float, optional
High value of the frequency range to look in. Default is 5.0 Hz
Notes
-----
The `padlevel` parameter effects the number of points to be used in the FFT computation by
factors of 2. The computation of number of points is per
.. math:: nfft = 2^{ceil(log_2(N)) + padlevel}
So `padlevel=2` would mean that for a signal with length 150, the number of points used
in the FFT would go from 256 to 1024.
"""
__slots__ = ("pad", "low_cut", "high_cut")
def __init__(self, padlevel=2, low_cutoff=0.0, high_cutoff=5.0):
super(SpectralEntropy, self).__init__(
padlevel=padlevel, low_cutoff=low_cutoff, high_cutoff=high_cutoff
)
self.pad = padlevel
self.low_cut = low_cutoff
self.high_cut = high_cutoff
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the spectral entropy
Parameters
----------
signal : array-like
Array-like containing values to compute the spectral entropy for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is assumed to be 1Hz.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
spec_ent : numpy.ndarray
Computed spectral entropy.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.spectral_entropy(x, fs, self.pad, self.low_cut, self.high_cut) | 0.980224 | 0.744772 |
from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = ["ComplexityInvariantDistance", "RangeCountPercentage", "RatioBeyondRSigma"]
class ComplexityInvariantDistance(Feature):
"""
A distance metric that accounts for signal complexity.
Parameters
----------
normalize : bool, optional
Normalize the signal. Default is True.
"""
__slots__ = ("normalize",)
def __init__(self, normalize=True):
super(ComplexityInvariantDistance, self).__init__(normalize=normalize)
self.normalize = normalize
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the complexity invariant distance
Parameters
----------
signal : array-like
Array-like containing values to compute the complexity invariant
distance for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
cid : numpy.ndarray
Computed complexity invariant distance.
"""
x = super().compute(signal, axis=axis)
return extensions.complexity_invariant_distance(x, self.normalize)
class RangeCountPercentage(Feature):
"""
The percent of the signal that falls between specified values
Parameters
----------
range_min : {int, float}, optional
Minimum value of the range. Default value is -1.0
range_max : {int, float}, optional
Maximum value of the range. Default value is 1.0
"""
__slots__ = ("rmin", "rmax")
def __init__(self, range_min=-1.0, range_max=1.0):
super(RangeCountPercentage, self).__init__(
range_min=range_min, range_max=range_max
)
self.rmin = range_min
self.rmax = range_max
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the range count percentage
Parameters
----------
signal : array-like
Array-like containing values to compute the range count percentage for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
rcp : numpy.ndarray
Computed range count percentage.
"""
x = super().compute(signal, fs=1.0, axis=axis)
return extensions.range_count(x, self.rmin, self.rmax)
class RatioBeyondRSigma(Feature):
"""
The percent of the signal outside :math:`r` standard deviations from the mean.
Parameters
----------
r : float, optional
Number of standard deviations above or below the mean the range includes. Default is 2.0
"""
__slots__ = ("r",)
def __init__(self, r=2.0):
super(RatioBeyondRSigma, self).__init__(r=r)
self.r = r
def compute(self, signal, *, axis=-1, **kwargs):
r"""
compute(signal, *, axis=-1)
Compute the ratio beyond :math:`r\sigma`
Parameters
----------
signal : array-like
Array-like containing values to compute the ratio beyond :math:`r\sigma` for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
rbr : numpy.ndarray
Computed ratio beyond r sigma.
"""
x = super().compute(signal, fs=1.0, axis=axis)
return extensions.ratio_beyond_r_sigma(x, self.r) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/features/lib/misc.py | misc.py | from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = ["ComplexityInvariantDistance", "RangeCountPercentage", "RatioBeyondRSigma"]
class ComplexityInvariantDistance(Feature):
"""
A distance metric that accounts for signal complexity.
Parameters
----------
normalize : bool, optional
Normalize the signal. Default is True.
"""
__slots__ = ("normalize",)
def __init__(self, normalize=True):
super(ComplexityInvariantDistance, self).__init__(normalize=normalize)
self.normalize = normalize
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the complexity invariant distance
Parameters
----------
signal : array-like
Array-like containing values to compute the complexity invariant
distance for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
cid : numpy.ndarray
Computed complexity invariant distance.
"""
x = super().compute(signal, axis=axis)
return extensions.complexity_invariant_distance(x, self.normalize)
class RangeCountPercentage(Feature):
"""
The percent of the signal that falls between specified values
Parameters
----------
range_min : {int, float}, optional
Minimum value of the range. Default value is -1.0
range_max : {int, float}, optional
Maximum value of the range. Default value is 1.0
"""
__slots__ = ("rmin", "rmax")
def __init__(self, range_min=-1.0, range_max=1.0):
super(RangeCountPercentage, self).__init__(
range_min=range_min, range_max=range_max
)
self.rmin = range_min
self.rmax = range_max
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the range count percentage
Parameters
----------
signal : array-like
Array-like containing values to compute the range count percentage for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
rcp : numpy.ndarray
Computed range count percentage.
"""
x = super().compute(signal, fs=1.0, axis=axis)
return extensions.range_count(x, self.rmin, self.rmax)
class RatioBeyondRSigma(Feature):
"""
The percent of the signal outside :math:`r` standard deviations from the mean.
Parameters
----------
r : float, optional
Number of standard deviations above or below the mean the range includes. Default is 2.0
"""
__slots__ = ("r",)
def __init__(self, r=2.0):
super(RatioBeyondRSigma, self).__init__(r=r)
self.r = r
def compute(self, signal, *, axis=-1, **kwargs):
r"""
compute(signal, *, axis=-1)
Compute the ratio beyond :math:`r\sigma`
Parameters
----------
signal : array-like
Array-like containing values to compute the ratio beyond :math:`r\sigma` for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
rbr : numpy.ndarray
Computed ratio beyond r sigma.
"""
x = super().compute(signal, fs=1.0, axis=axis)
return extensions.ratio_beyond_r_sigma(x, self.r) | 0.973507 | 0.747961 |
from numpy import log as nplog, abs
from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = ["JerkMetric", "DimensionlessJerk", "SPARC"]
class JerkMetric(Feature):
r"""
The normalized sum of jerk. Assumes the input signal is acceleration, and
therefore the jerk is the first time derivative of the input signal.
Notes
-----
Given an acceleration signal :math:`a`, the pre-normalized jerk metric
:math:`\hat{J}` is computed using a 2-point difference of the acceleration,
then squared and summed per
.. math:: \hat{J} = \sum_{i=2}^N\left(\frac{a_{i} - a_{i-1}}{\Delta t}\right)^2
where :math:`\Delta t` is the sampling period in seconds. The jerk metric
:math:`J` is then normalized using constants and the maximum absolute
acceleration value observed per
.. math:: s = \frac{360max(|a|)^2}{\Delta t}
.. math:: J = \frac{\hat{J}}{2s}
"""
__slots__ = ()
def __init__(self):
super(JerkMetric, self).__init__()
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the jerk metric
Parameters
----------
signal : array-like
Array-like containing values to compute the jerk metric for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
jerk_metric : numpy.ndarray
Computed jerk metric.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.jerk_metric(x, fs)
class DimensionlessJerk(Feature):
r"""
The dimensionless normalized sum of jerk, or its log value. Will take
velocity, acceleration, or jerk as the input signal, and compute the jerk
accordingly.
Parameters
----------
log : bool, optional
Take the log of the dimensionless jerk. Default is False.
signal_type : {'acceleration', 'velocity', 'jerk'}, optional
The type of the signal being provided. Default is 'acceleration'
Notes
-----
For all three inputs (acceleration, velocity, and jerk) the squaring and
summation of the computed jerk values is the same as :py:class:`JerkMetric`.
The difference comes in the normalization to get a dimensionless value, and
in the computation of the jerk.
For the different inputs, the pre-normalized metric :math:`\hat{J}` is
computed per
.. math::
\hat{J}_{vel} = \sum_{i=2}^{N-1}\left(\frac{v_{i+1} - 2v_{i}
+ v_{i-1}}{\Delta t^2}\right)^2 \\
\hat{J}_{acc} = \sum_{i=2}^N\left(\frac{a_{i} - a_{i-1}}{\Delta t}\right)^2 \\
\hat{J}_{jerk} = \sum_{i=1}^Nj_i^2
The scaling factor also changes depending on which input is provided, per
.. math::
s_{vel} = \frac{max(|v|)^2}{N^3\Delta t^4} \\
s_{acc} = \frac{max(|a|)^2}{N \Delta t^2} \\
s_{jerk} = Nmax(|j|)^2
Note that the sampling period ends up cancelling out for all versions of
the metric. Finally, the dimensionless jerk metric is simply the negative
pre-normalized value divided by the corresponding scaling factor. If the
log dimensionless jerk is required, then the negative is taken after taking
the natural logarithm
.. math::
DJ = \frac{-\hat{J}_{type}}{s_{type}} \\
DJ_{log} = -ln\left(\frac{\hat{J}_{type}}{s_{type}}\right)
"""
__slots__ = ("log", "i_type")
_signal_type_options = ["velocity", "acceleration", "jerk"]
def __init__(self, log=False, signal_type="acceleration"):
super(DimensionlessJerk, self).__init__(log=log, signal_type=signal_type)
self.log = log
t_map = {"velocity": 1, "acceleration": 2, "jerk": 3}
try:
self.i_type = t_map[signal_type]
except KeyError:
raise ValueError(f"'signal_type' ({signal_type}) unrecognized.")
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the dimensionless jerk metric
Parameters
----------
signal : array-like
Array-like containing values to compute the dimensionless jerk
metric for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
dimless_jerk_metric : numpy.ndarray
Computed [log] dimensionless jerk metric.
"""
x = super().compute(signal, axis=axis)
res = extensions.dimensionless_jerk_metric(x, self.i_type)
if self.log:
return -nplog(abs(res))
else:
return res
class SPARC(Feature):
"""
A quantitative measure of the smoothness of a signal. SPARC stands for the
SPectral ARC length.
Parameters
----------
padlevel : int
Indicates the level of zero-padding to perform on the signal. This
essentially multiplies the length of the signal by 2^padlevel. Default
is 4.
fc: float, optional
The max. cut off frequency for calculating the spectral arc length
metric. Default is 10.0 Hz.
amplitude_threshold : float, optional
The amplitude threshold to used for determining the cut off frequency
up to which the spectral arc length is to be estimated. Default is 0.05
References
----------
.. [1] S. Balasubramanian, A. Melendez-Calderon, A. Roby-Brami, and
E. Burdet, “On the analysis of movement smoothness,” J NeuroEngineering
Rehabil, vol. 12, no. 1, p. 112, Dec. 2015, doi: 10.1186/s12984-015-0090-9.
"""
__slots__ = ("padlevel", "fc", "amp_thresh")
def __init__(self, padlevel=4, fc=10.0, amplitude_threshold=0.05):
super(SPARC, self).__init__(
padlevel=padlevel, fc=fc, amplitude_threshold=amplitude_threshold
)
self.padlevel = padlevel
self.fc = fc
self.amp_thresh = amplitude_threshold
def compute(self, signal, fs=1.0, *, axis=-1):
"""
compute(signal, fs, *, columns=None, windowed=False)
Compute the SPARC
Parameters
----------
signal : array-like
Array-like containing values to compute the SPARC for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
sparc : numpy.ndarray
Computed SPARC.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.SPARC(x, fs, self.padlevel, self.fc, self.amp_thresh) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/features/lib/smoothness.py | smoothness.py | from numpy import log as nplog, abs
from skdh.features.core import Feature
from skdh.features.lib import extensions
__all__ = ["JerkMetric", "DimensionlessJerk", "SPARC"]
class JerkMetric(Feature):
r"""
The normalized sum of jerk. Assumes the input signal is acceleration, and
therefore the jerk is the first time derivative of the input signal.
Notes
-----
Given an acceleration signal :math:`a`, the pre-normalized jerk metric
:math:`\hat{J}` is computed using a 2-point difference of the acceleration,
then squared and summed per
.. math:: \hat{J} = \sum_{i=2}^N\left(\frac{a_{i} - a_{i-1}}{\Delta t}\right)^2
where :math:`\Delta t` is the sampling period in seconds. The jerk metric
:math:`J` is then normalized using constants and the maximum absolute
acceleration value observed per
.. math:: s = \frac{360max(|a|)^2}{\Delta t}
.. math:: J = \frac{\hat{J}}{2s}
"""
__slots__ = ()
def __init__(self):
super(JerkMetric, self).__init__()
def compute(self, signal, fs=1.0, *, axis=-1):
"""
Compute the jerk metric
Parameters
----------
signal : array-like
Array-like containing values to compute the jerk metric for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
jerk_metric : numpy.ndarray
Computed jerk metric.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.jerk_metric(x, fs)
class DimensionlessJerk(Feature):
r"""
The dimensionless normalized sum of jerk, or its log value. Will take
velocity, acceleration, or jerk as the input signal, and compute the jerk
accordingly.
Parameters
----------
log : bool, optional
Take the log of the dimensionless jerk. Default is False.
signal_type : {'acceleration', 'velocity', 'jerk'}, optional
The type of the signal being provided. Default is 'acceleration'
Notes
-----
For all three inputs (acceleration, velocity, and jerk) the squaring and
summation of the computed jerk values is the same as :py:class:`JerkMetric`.
The difference comes in the normalization to get a dimensionless value, and
in the computation of the jerk.
For the different inputs, the pre-normalized metric :math:`\hat{J}` is
computed per
.. math::
\hat{J}_{vel} = \sum_{i=2}^{N-1}\left(\frac{v_{i+1} - 2v_{i}
+ v_{i-1}}{\Delta t^2}\right)^2 \\
\hat{J}_{acc} = \sum_{i=2}^N\left(\frac{a_{i} - a_{i-1}}{\Delta t}\right)^2 \\
\hat{J}_{jerk} = \sum_{i=1}^Nj_i^2
The scaling factor also changes depending on which input is provided, per
.. math::
s_{vel} = \frac{max(|v|)^2}{N^3\Delta t^4} \\
s_{acc} = \frac{max(|a|)^2}{N \Delta t^2} \\
s_{jerk} = Nmax(|j|)^2
Note that the sampling period ends up cancelling out for all versions of
the metric. Finally, the dimensionless jerk metric is simply the negative
pre-normalized value divided by the corresponding scaling factor. If the
log dimensionless jerk is required, then the negative is taken after taking
the natural logarithm
.. math::
DJ = \frac{-\hat{J}_{type}}{s_{type}} \\
DJ_{log} = -ln\left(\frac{\hat{J}_{type}}{s_{type}}\right)
"""
__slots__ = ("log", "i_type")
_signal_type_options = ["velocity", "acceleration", "jerk"]
def __init__(self, log=False, signal_type="acceleration"):
super(DimensionlessJerk, self).__init__(log=log, signal_type=signal_type)
self.log = log
t_map = {"velocity": 1, "acceleration": 2, "jerk": 3}
try:
self.i_type = t_map[signal_type]
except KeyError:
raise ValueError(f"'signal_type' ({signal_type}) unrecognized.")
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the dimensionless jerk metric
Parameters
----------
signal : array-like
Array-like containing values to compute the dimensionless jerk
metric for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
dimless_jerk_metric : numpy.ndarray
Computed [log] dimensionless jerk metric.
"""
x = super().compute(signal, axis=axis)
res = extensions.dimensionless_jerk_metric(x, self.i_type)
if self.log:
return -nplog(abs(res))
else:
return res
class SPARC(Feature):
"""
A quantitative measure of the smoothness of a signal. SPARC stands for the
SPectral ARC length.
Parameters
----------
padlevel : int
Indicates the level of zero-padding to perform on the signal. This
essentially multiplies the length of the signal by 2^padlevel. Default
is 4.
fc: float, optional
The max. cut off frequency for calculating the spectral arc length
metric. Default is 10.0 Hz.
amplitude_threshold : float, optional
The amplitude threshold to used for determining the cut off frequency
up to which the spectral arc length is to be estimated. Default is 0.05
References
----------
.. [1] S. Balasubramanian, A. Melendez-Calderon, A. Roby-Brami, and
E. Burdet, “On the analysis of movement smoothness,” J NeuroEngineering
Rehabil, vol. 12, no. 1, p. 112, Dec. 2015, doi: 10.1186/s12984-015-0090-9.
"""
__slots__ = ("padlevel", "fc", "amp_thresh")
def __init__(self, padlevel=4, fc=10.0, amplitude_threshold=0.05):
super(SPARC, self).__init__(
padlevel=padlevel, fc=fc, amplitude_threshold=amplitude_threshold
)
self.padlevel = padlevel
self.fc = fc
self.amp_thresh = amplitude_threshold
def compute(self, signal, fs=1.0, *, axis=-1):
"""
compute(signal, fs, *, columns=None, windowed=False)
Compute the SPARC
Parameters
----------
signal : array-like
Array-like containing values to compute the SPARC for.
fs : float, optional
Sampling frequency in Hz. If not provided, default is 1.0Hz
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if
`signal` is a pandas.DataFrame. Default is last (-1).
Returns
-------
sparc : numpy.ndarray
Computed SPARC.
"""
x = super().compute(signal, fs, axis=axis)
return extensions.SPARC(x, fs, self.padlevel, self.fc, self.amp_thresh) | 0.956135 | 0.76973 |
from numpy import mean, std, sum, diff, sign
from scipy.stats import skew, kurtosis
from skdh.features.core import Feature
__all__ = ["Mean", "MeanCrossRate", "StdDev", "Skewness", "Kurtosis"]
class Mean(Feature):
"""
The signal mean.
Examples
--------
>>> import numpy as np
>>> signal = np.arange(15).reshape((5, 3))
>>> mn = Mean()
>>> mn.compute(signal)
array([6., 7., 8.])
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the mean.
Parameters
----------
signal : array-like
Array-like containing values to compute the mean for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
mean : numpy.ndarray
Computed mean.
"""
x = super().compute(signal, axis=axis)
return mean(x, axis=-1)
class MeanCrossRate(Feature):
"""
Number of signal mean value crossings. Expressed as a percentage of signal length.
"""
__slots__ = ()
def __init__(self):
super(MeanCrossRate, self).__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the mean cross rate
Parameters
----------
signal : array-like
Array-like containing values to compute the mean cross rate for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
mcr : numpy.ndarray
Computed mean cross rate.
"""
x = super().compute(signal, axis=axis)
x_nomean = x - mean(x, axis=-1, keepdims=True)
mcr = sum(diff(sign(x_nomean), axis=-1) != 0, axis=-1)
return mcr / x.shape[-1] # shape of the 1 axis
class StdDev(Feature):
"""
The signal standard deviation
Examples
--------
>>> import numpy as np
>>> signal = np.arange(15).reshape((5, 3))
>>> StdDev().compute(signal)
array([[4.74341649, 4.74341649, 4.74341649]])
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the standard deviation
Parameters
----------
signal : array-like
Array-like containing values to compute the standard deviation for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
stdev : numpy.ndarray
Computed standard deviation.
"""
x = super().compute(signal, axis=axis)
return std(x, axis=-1, ddof=1)
class Skewness(Feature):
"""
The skewness of a signal. NaN inputs will be propagated through to the result.
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the skewness
Parameters
----------
signal : array-like
Array-like containing values to compute the skewness for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
skew : numpy.ndarray
Computed skewness.
"""
x = super().compute(signal, axis=axis)
return skew(x, axis=-1, bias=False)
class Kurtosis(Feature):
"""
The kurtosis of a signal. NaN inputs will be propagated through to the result.
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the kurtosis
Parameters
----------
signal : array-like
Array-like containing values to compute the kurtosis for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
kurt : numpy.ndarray
Computed kurtosis.
"""
x = super().compute(signal, axis=axis)
return kurtosis(x, axis=-1, bias=False) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/features/lib/moments.py | moments.py | from numpy import mean, std, sum, diff, sign
from scipy.stats import skew, kurtosis
from skdh.features.core import Feature
__all__ = ["Mean", "MeanCrossRate", "StdDev", "Skewness", "Kurtosis"]
class Mean(Feature):
"""
The signal mean.
Examples
--------
>>> import numpy as np
>>> signal = np.arange(15).reshape((5, 3))
>>> mn = Mean()
>>> mn.compute(signal)
array([6., 7., 8.])
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the mean.
Parameters
----------
signal : array-like
Array-like containing values to compute the mean for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
mean : numpy.ndarray
Computed mean.
"""
x = super().compute(signal, axis=axis)
return mean(x, axis=-1)
class MeanCrossRate(Feature):
"""
Number of signal mean value crossings. Expressed as a percentage of signal length.
"""
__slots__ = ()
def __init__(self):
super(MeanCrossRate, self).__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the mean cross rate
Parameters
----------
signal : array-like
Array-like containing values to compute the mean cross rate for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
mcr : numpy.ndarray
Computed mean cross rate.
"""
x = super().compute(signal, axis=axis)
x_nomean = x - mean(x, axis=-1, keepdims=True)
mcr = sum(diff(sign(x_nomean), axis=-1) != 0, axis=-1)
return mcr / x.shape[-1] # shape of the 1 axis
class StdDev(Feature):
"""
The signal standard deviation
Examples
--------
>>> import numpy as np
>>> signal = np.arange(15).reshape((5, 3))
>>> StdDev().compute(signal)
array([[4.74341649, 4.74341649, 4.74341649]])
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the standard deviation
Parameters
----------
signal : array-like
Array-like containing values to compute the standard deviation for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
stdev : numpy.ndarray
Computed standard deviation.
"""
x = super().compute(signal, axis=axis)
return std(x, axis=-1, ddof=1)
class Skewness(Feature):
"""
The skewness of a signal. NaN inputs will be propagated through to the result.
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the skewness
Parameters
----------
signal : array-like
Array-like containing values to compute the skewness for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
skew : numpy.ndarray
Computed skewness.
"""
x = super().compute(signal, axis=axis)
return skew(x, axis=-1, bias=False)
class Kurtosis(Feature):
"""
The kurtosis of a signal. NaN inputs will be propagated through to the result.
"""
__slots__ = ()
def __init__(self):
super().__init__()
def compute(self, signal, *, axis=-1, **kwargs):
"""
compute(signal, *, axis=-1)
Compute the kurtosis
Parameters
----------
signal : array-like
Array-like containing values to compute the kurtosis for.
axis : int, optional
Axis along which the signal entropy will be computed. Ignored if `signal` is a
pandas.DataFrame. Default is last (-1).
Returns
-------
kurt : numpy.ndarray
Computed kurtosis.
"""
x = super().compute(signal, axis=axis)
return kurtosis(x, axis=-1, bias=False) | 0.971699 | 0.769622 |
from numpy import array, repeat, abs, minimum, floor, float_
from scipy.signal import lfilter_zi, lfilter
from skdh.utility.internal import apply_downsample
from skdh.utility import moving_mean
__all__ = ["get_activity_counts"]
input_coef = array(
[
-0.009341062898525,
-0.025470289659360,
-0.004235264826105,
0.044152415456420,
0.036493718347760,
-0.011893961934740,
-0.022917390623150,
-0.006788163862310,
0.000000000000000,
],
dtype=float_,
)
output_coef = array(
[
1.00000000000000000000,
-3.63367395910957000000,
5.03689812757486000000,
-3.09612247819666000000,
0.50620507633883000000,
0.32421701566682000000,
-0.15685485875559000000,
0.01949130205890000000,
0.00000000000000000000,
],
dtype=float_,
)
def get_activity_counts(fs, time, accel, epoch_seconds=60):
"""
Compute the activity counts from acceleration.
Parameters
----------
fs : float
Sampling frequency.
time : numpy.ndarray
Shape (N,) array of epoch timestamps (in seconds) for each sample.
accel : numpy.ndarray
Nx3 array of measured acceleration values, in units of g.
epoch_seconds : int, optional
Number of seconds in an epoch (time unit for counts). Default is 60 seconds.
Returns
-------
counts : numpy.ndarray
Array of activity counts
References
----------
.. [1] A. Neishabouri et al., “Quantification of acceleration as activity counts
in ActiGraph wearable,” Sci Rep, vol. 12, no. 1, Art. no. 1, Jul. 2022,
doi: 10.1038/s41598-022-16003-x.
Notes
-----
This implementation is still slightly different than that provided in [1]_.
Foremost is that the down-sampling is different to accommodate other sensor types
that have different sampling frequencies than what might be provided by ActiGraph.
"""
# 3. down-sample to 30hz
time_ds, (acc_ds,) = apply_downsample(
30.0,
time,
data=(accel,),
aa_filter=True,
fs=fs,
)
# 4. filter the data
# NOTE: this is the actigraph implementation - they specifically use
# a filter with a phase shift (ie not filtfilt), and TF representation
# instead of ZPK or SOS
zi = lfilter_zi(input_coef, output_coef).reshape((-1, 1))
acc_bpf, _ = lfilter(
input_coef,
output_coef,
acc_ds,
zi=repeat(zi, acc_ds.shape[1], axis=-1) * acc_ds[0],
axis=0,
)
# 5. scale the data
acc_bpf *= (3 / 4096) / (2.6 / 256) * 237.5
# 6. rectify
acc_trim = abs(acc_bpf)
# 7. trim
acc_trim[acc_trim < 4] = 0
acc_trim = floor(minimum(acc_trim, 128))
# 8. "downsample" to 10hz by taking moving mean
acc_10hz = moving_mean(acc_trim, 3, 3, trim=True, axis=0)
# 9. get the counts
block_size = epoch_seconds * 10 # 1 minute
# this time is a moving sum
epoch_counts = moving_mean(acc_10hz, block_size, block_size, trim=True, axis=0)
epoch_counts *= block_size # remove the "mean" part to get back to sum
return epoch_counts | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/activity_counts.py | activity_counts.py | from numpy import array, repeat, abs, minimum, floor, float_
from scipy.signal import lfilter_zi, lfilter
from skdh.utility.internal import apply_downsample
from skdh.utility import moving_mean
__all__ = ["get_activity_counts"]
input_coef = array(
[
-0.009341062898525,
-0.025470289659360,
-0.004235264826105,
0.044152415456420,
0.036493718347760,
-0.011893961934740,
-0.022917390623150,
-0.006788163862310,
0.000000000000000,
],
dtype=float_,
)
output_coef = array(
[
1.00000000000000000000,
-3.63367395910957000000,
5.03689812757486000000,
-3.09612247819666000000,
0.50620507633883000000,
0.32421701566682000000,
-0.15685485875559000000,
0.01949130205890000000,
0.00000000000000000000,
],
dtype=float_,
)
def get_activity_counts(fs, time, accel, epoch_seconds=60):
"""
Compute the activity counts from acceleration.
Parameters
----------
fs : float
Sampling frequency.
time : numpy.ndarray
Shape (N,) array of epoch timestamps (in seconds) for each sample.
accel : numpy.ndarray
Nx3 array of measured acceleration values, in units of g.
epoch_seconds : int, optional
Number of seconds in an epoch (time unit for counts). Default is 60 seconds.
Returns
-------
counts : numpy.ndarray
Array of activity counts
References
----------
.. [1] A. Neishabouri et al., “Quantification of acceleration as activity counts
in ActiGraph wearable,” Sci Rep, vol. 12, no. 1, Art. no. 1, Jul. 2022,
doi: 10.1038/s41598-022-16003-x.
Notes
-----
This implementation is still slightly different than that provided in [1]_.
Foremost is that the down-sampling is different to accommodate other sensor types
that have different sampling frequencies than what might be provided by ActiGraph.
"""
# 3. down-sample to 30hz
time_ds, (acc_ds,) = apply_downsample(
30.0,
time,
data=(accel,),
aa_filter=True,
fs=fs,
)
# 4. filter the data
# NOTE: this is the actigraph implementation - they specifically use
# a filter with a phase shift (ie not filtfilt), and TF representation
# instead of ZPK or SOS
zi = lfilter_zi(input_coef, output_coef).reshape((-1, 1))
acc_bpf, _ = lfilter(
input_coef,
output_coef,
acc_ds,
zi=repeat(zi, acc_ds.shape[1], axis=-1) * acc_ds[0],
axis=0,
)
# 5. scale the data
acc_bpf *= (3 / 4096) / (2.6 / 256) * 237.5
# 6. rectify
acc_trim = abs(acc_bpf)
# 7. trim
acc_trim[acc_trim < 4] = 0
acc_trim = floor(minimum(acc_trim, 128))
# 8. "downsample" to 10hz by taking moving mean
acc_10hz = moving_mean(acc_trim, 3, 3, trim=True, axis=0)
# 9. get the counts
block_size = epoch_seconds * 10 # 1 minute
# this time is a moving sum
epoch_counts = moving_mean(acc_10hz, block_size, block_size, trim=True, axis=0)
epoch_counts *= block_size # remove the "mean" part to get back to sum
return epoch_counts | 0.931905 | 0.41182 |
from warnings import warn
from numpy import moveaxis, ascontiguousarray, full, nan, isnan
from skdh.utility import _extensions
from skdh.utility.windowing import get_windowed_view
__all__ = [
"moving_mean",
"moving_sd",
"moving_skewness",
"moving_kurtosis",
"moving_median",
"moving_max",
"moving_min",
]
def moving_mean(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving mean.
Parameters
----------
a : array-like
Signal to compute moving mean for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
Returns
-------
mmean : numpy.ndarray
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis if `trim=True`, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Most efficient computations are for `skip` values that are either factors of
`wlen`, or greater or equal to `wlen`.
Warnings
--------
Catastropic cancellation is a concern when `skip` is less than `wlen` due to
the cumulative sum-type algorithm being used, when input values are very very
large, or very very small. With typical IMU data values this should not be an
issue, even for very long data series (multiple days worth of data)
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_mean(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_mean(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8.])
Compute without trimming the result
>>> moving_mean(x, 3, 1, trim=False)
array([1., 2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output
should be equal to :math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_mean(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_mean(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_mean(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_mean(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmean = _extensions.moving_mean(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmean, -1, axis)
def moving_sd(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample standard deviation.
Parameters
----------
a : array-like
Signal to compute moving sample standard deviation for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
msd : numpy.ndarray
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Most efficient computations are for `skip` values that are either factors of `wlen`, or greater
or equal to `wlen`.
Warnings
--------
Catastropic cancellation is a concern when `skip` is less than `wlen` due to the cumulative
sum-type algorithms being used, when input values are very very large, or very very small. With
typical IMU data values this should not be an issue, even for very long data series (multiple
days worth of data).
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_sd(x, 3, 3, return_previous=True)
(array([ 2.081666 , 8.02080628, 14.0118997 ]),
array([ 1.66666667, 16.66666667, 49.66666667]))
Compute with overlapping windows:
>>> moving_mean(x, 3, 1, return_previous=False)
array([ 2.081666 , 4.04145188, 6.02771377, 8.02080628, 10.0166528 ,
12.01388086, 14.0118997 , 16.01041328])
Compute without trimming:
>>> moving_mean(x, 3, 1, trim=False, return_previous=False)
array([ 2.081666 , 4.04145188, 6.02771377, 8.02080628, 10.0166528 ,
12.01388086, 14.0118997 , 16.01041328, nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_sd(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_sd(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_sd(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_sd(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_sd(x, w_len, skip, trim, return_previous)
# move computation axis back to original place and return
if return_previous:
return moveaxis(res[0], -1, axis), moveaxis(res[1], -1, axis)
else:
return moveaxis(res, -1, axis)
def moving_skewness(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample skewness.
Parameters
----------
a : array-like
Signal to compute moving skewness for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
mskew : numpy.ndarray
Moving skewness. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
msd : numpy.ndarray, optional
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Warnings
--------
While this implementation is quite fast, it is also quite mememory inefficient. 3 arrays
of equal length to the computation axis are created during computation, which can easily
exceed system memory if already using a significant amount of memory.
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_skewness(x, 3, 3, return_previous=True)
(array([0.52800497, 0.15164108, 0.08720961]),
array([ 2.081666 , 8.02080628, 14.0118997 ]),
array([ 1.66666667, 16.66666667, 49.66666667]))
Compute with overlapping windows:
>>> moving_skewness(x, 3, 1, return_previous=False)
array([0.52800497, 0.29479961, 0.20070018, 0.15164108, 0.12172925,
0.10163023, 0.08720961, 0.07636413])
Compute without trimming:
>>> moving_skewness(x, 3, 1, trim=False, return_previous=False)
array([0.52800497, 0.29479961, 0.20070018, 0.15164108, 0.12172925,
0.10163023, 0.08720961, 0.07636413, nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_skewness(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_skewness(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_skewness(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_skewness(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_skewness(x, w_len, skip, trim, return_previous)
if isnan(res).any():
warn("NaN values present in output, possibly due to catastrophic cancellation.")
# move computation axis back to original place and return
if return_previous:
return tuple(moveaxis(i, -1, axis) for i in res)
else:
return moveaxis(res, -1, axis)
def moving_kurtosis(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample kurtosis.
Parameters
----------
a : array-like
Signal to compute moving kurtosis for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
mkurt : numpy.ndarray
Moving kurtosis. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
mskew : numpy.ndarray, optional
Moving skewness. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
msd : numpy.ndarray, optional
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Warnings
--------
While this implementation is quite fast, it is also quite mememory inefficient. 4 arrays
of equal length to the computation axis are created during computation, which can easily
exceed system memory if already using a significant amount of memory.
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_kurtosis(x, 3, 3, return_previous=True)
(array([-1.5, -1.5, -1.5]), # kurtosis
array([0.52800497, 0.15164108, 0.08720961]), # skewness
array([ 2.081666 , 8.02080628, 14.0118997 ]), # standard deviation
array([ 1.66666667, 16.66666667, 49.66666667])) # mean
Compute with overlapping windows:
>>> moving_kurtosis(np.random.random(100), 50, 20, return_previous=False)
array([-1.10155074, -1.20785479, -1.24363625]) # random
Compute without trimming:
>>> moving_kurtosis(np.random.random(100), 50, 20, return_previous=False)
array([-1.10155074, -1.20785479, -1.24363625, nan, nan]) # random
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_skewness(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_kurtosis(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_kurtosis(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_kurtosis(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_kurtosis(x, w_len, skip, trim, return_previous)
if isnan(res).any():
warn("NaN values present in output, possibly due to catastrophic cancellation.")
# move computation axis back to original place and return
if return_previous:
return tuple(moveaxis(i, -1, axis) for i in res)
else:
return moveaxis(res, -1, axis)
def moving_median(a, w_len, skip=1, trim=True, axis=-1):
r"""
Compute the moving mean.
Parameters
----------
a : array-like
Signal to compute moving mean for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples. Default is 1.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
Returns
-------
mmed : numpy.ndarray
Moving median. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_median(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_median(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8.])
Compute without trimming:
>>> moving_median(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_median(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_median(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_median(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_median(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
rmed = _extensions.moving_median(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmed, -1, axis)
def moving_max(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving maximum value.
Parameters
----------
a : array-like
Signal to compute moving max for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving max along. Default is -1.
Returns
-------
mmax : numpy.ndarray
Moving max. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_max(x, 3, 3)
array([2., 5., 8.])
Compute with overlapping windows:
>>> moving_max(x, 3, 1)
array([2., 3., 4., 5., 6., 7., 8.])
Compute without triming:
>>> moving_max(x, 3, 1)
array([2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_max(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_max(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_max(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_max(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# Numpy uses SIMD instructions for max/min, so it will likely be faster
# unless there is a lot of overlap
cond1 = a.ndim == 1 and (skip / w_len) < 0.005
cond2 = a.ndim > 1 and (skip / w_len) < 0.3 # due to c-contiguity?
cond3 = a.ndim > 2 # windowing doesnt handle more than 2 dimensions currently
if any([cond1, cond2, cond3]):
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmax = _extensions.moving_max(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmax, -1, axis)
else:
x = ascontiguousarray(
moveaxis(a, axis, 0)
) # need to move axis to the front for windowing
xw = get_windowed_view(x, w_len, skip)
if trim:
res = xw.max(axis=1) # computation axis is still the second axis
else:
nfill = (x.shape[0] - w_len) // skip + 1
rshape = list(x.shape)
rshape[0] = (x.shape[0] - 1) // skip + 1
res = full(rshape, nan)
res[:nfill] = xw.max(axis=1)
return moveaxis(res, 0, axis)
def moving_min(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving maximum value.
Parameters
----------
a : array-like
Signal to compute moving max for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving max along. Default is -1.
Returns
-------
mmax : numpy.ndarray
Moving max. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_min(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_min(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7.])
Compute without trimming:
>>> moving_min(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_min(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_min(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_min(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_min(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# Numpy uses SIMD instructions for max/min, so it will likely be faster
# unless there is a lot of overlap
cond1 = a.ndim == 1 and (skip / w_len) < 0.005
cond2 = a.ndim > 1 and (skip / w_len) < 0.3 # due to c-contiguity?
cond3 = a.ndim > 2 # windowing doesnt handle more than 2 dimensions currently
if any([cond1, cond2, cond3]):
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmin = _extensions.moving_min(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmin, -1, axis)
else:
x = ascontiguousarray(
moveaxis(a, axis, 0)
) # need to move axis to the front for windowing
xw = get_windowed_view(x, w_len, skip)
if trim:
res = xw.min(axis=1) # computation axis is still the second axis
else:
nfill = (x.shape[0] - w_len) // skip + 1
rshape = list(x.shape)
rshape[0] = (x.shape[0] - 1) // skip + 1
res = full(rshape, nan)
res[:nfill] = xw.min(axis=1)
return moveaxis(res, 0, axis) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/math.py | math.py | from warnings import warn
from numpy import moveaxis, ascontiguousarray, full, nan, isnan
from skdh.utility import _extensions
from skdh.utility.windowing import get_windowed_view
__all__ = [
"moving_mean",
"moving_sd",
"moving_skewness",
"moving_kurtosis",
"moving_median",
"moving_max",
"moving_min",
]
def moving_mean(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving mean.
Parameters
----------
a : array-like
Signal to compute moving mean for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
Returns
-------
mmean : numpy.ndarray
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis if `trim=True`, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Most efficient computations are for `skip` values that are either factors of
`wlen`, or greater or equal to `wlen`.
Warnings
--------
Catastropic cancellation is a concern when `skip` is less than `wlen` due to
the cumulative sum-type algorithm being used, when input values are very very
large, or very very small. With typical IMU data values this should not be an
issue, even for very long data series (multiple days worth of data)
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_mean(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_mean(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8.])
Compute without trimming the result
>>> moving_mean(x, 3, 1, trim=False)
array([1., 2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output
should be equal to :math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_mean(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_mean(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_mean(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_mean(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmean = _extensions.moving_mean(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmean, -1, axis)
def moving_sd(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample standard deviation.
Parameters
----------
a : array-like
Signal to compute moving sample standard deviation for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
msd : numpy.ndarray
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Most efficient computations are for `skip` values that are either factors of `wlen`, or greater
or equal to `wlen`.
Warnings
--------
Catastropic cancellation is a concern when `skip` is less than `wlen` due to the cumulative
sum-type algorithms being used, when input values are very very large, or very very small. With
typical IMU data values this should not be an issue, even for very long data series (multiple
days worth of data).
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_sd(x, 3, 3, return_previous=True)
(array([ 2.081666 , 8.02080628, 14.0118997 ]),
array([ 1.66666667, 16.66666667, 49.66666667]))
Compute with overlapping windows:
>>> moving_mean(x, 3, 1, return_previous=False)
array([ 2.081666 , 4.04145188, 6.02771377, 8.02080628, 10.0166528 ,
12.01388086, 14.0118997 , 16.01041328])
Compute without trimming:
>>> moving_mean(x, 3, 1, trim=False, return_previous=False)
array([ 2.081666 , 4.04145188, 6.02771377, 8.02080628, 10.0166528 ,
12.01388086, 14.0118997 , 16.01041328, nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_sd(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_sd(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_sd(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_sd(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_sd(x, w_len, skip, trim, return_previous)
# move computation axis back to original place and return
if return_previous:
return moveaxis(res[0], -1, axis), moveaxis(res[1], -1, axis)
else:
return moveaxis(res, -1, axis)
def moving_skewness(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample skewness.
Parameters
----------
a : array-like
Signal to compute moving skewness for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
mskew : numpy.ndarray
Moving skewness. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
msd : numpy.ndarray, optional
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Warnings
--------
While this implementation is quite fast, it is also quite mememory inefficient. 3 arrays
of equal length to the computation axis are created during computation, which can easily
exceed system memory if already using a significant amount of memory.
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_skewness(x, 3, 3, return_previous=True)
(array([0.52800497, 0.15164108, 0.08720961]),
array([ 2.081666 , 8.02080628, 14.0118997 ]),
array([ 1.66666667, 16.66666667, 49.66666667]))
Compute with overlapping windows:
>>> moving_skewness(x, 3, 1, return_previous=False)
array([0.52800497, 0.29479961, 0.20070018, 0.15164108, 0.12172925,
0.10163023, 0.08720961, 0.07636413])
Compute without trimming:
>>> moving_skewness(x, 3, 1, trim=False, return_previous=False)
array([0.52800497, 0.29479961, 0.20070018, 0.15164108, 0.12172925,
0.10163023, 0.08720961, 0.07636413, nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_skewness(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_skewness(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_skewness(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_skewness(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_skewness(x, w_len, skip, trim, return_previous)
if isnan(res).any():
warn("NaN values present in output, possibly due to catastrophic cancellation.")
# move computation axis back to original place and return
if return_previous:
return tuple(moveaxis(i, -1, axis) for i in res)
else:
return moveaxis(res, -1, axis)
def moving_kurtosis(a, w_len, skip, trim=True, axis=-1, return_previous=True):
r"""
Compute the moving sample kurtosis.
Parameters
----------
a : array-like
Signal to compute moving kurtosis for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
return_previous : bool, optional
Return previous moments. These are computed either way, and are therefore optional returns.
Default is True.
Returns
-------
mkurt : numpy.ndarray
Moving kurtosis. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous.
mskew : numpy.ndarray, optional
Moving skewness. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
msd : numpy.ndarray, optional
Moving sample standard deviation. Note that if the moving axis is not the last axis,
then the result will *not* be c-contiguous. Only returned if `return_previous=True`.
mmean : numpy.ndarray, optional.
Moving mean. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous. Only returned if `return_previous=True`.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Warnings
--------
While this implementation is quite fast, it is also quite mememory inefficient. 4 arrays
of equal length to the computation axis are created during computation, which can easily
exceed system memory if already using a significant amount of memory.
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)**2
>>> moving_kurtosis(x, 3, 3, return_previous=True)
(array([-1.5, -1.5, -1.5]), # kurtosis
array([0.52800497, 0.15164108, 0.08720961]), # skewness
array([ 2.081666 , 8.02080628, 14.0118997 ]), # standard deviation
array([ 1.66666667, 16.66666667, 49.66666667])) # mean
Compute with overlapping windows:
>>> moving_kurtosis(np.random.random(100), 50, 20, return_previous=False)
array([-1.10155074, -1.20785479, -1.24363625]) # random
Compute without trimming:
>>> moving_kurtosis(np.random.random(100), 50, 20, return_previous=False)
array([-1.10155074, -1.20785479, -1.24363625, nan, nan]) # random
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_skewness(y, window_length, window_skip, axis=1, return_previous=False)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_kurtosis(z, 3, 3, axis=0, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_kurtosis(z, 3, 3, axis=1, return_previous=False).flags['C_CONTIGUOUS']
False
>>> moving_kurtosis(z, 3, 3, axis=2, return_previous=False).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
res = _extensions.moving_kurtosis(x, w_len, skip, trim, return_previous)
if isnan(res).any():
warn("NaN values present in output, possibly due to catastrophic cancellation.")
# move computation axis back to original place and return
if return_previous:
return tuple(moveaxis(i, -1, axis) for i in res)
else:
return moveaxis(res, -1, axis)
def moving_median(a, w_len, skip=1, trim=True, axis=-1):
r"""
Compute the moving mean.
Parameters
----------
a : array-like
Signal to compute moving mean for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples. Default is 1.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving mean along. Default is -1.
Returns
-------
mmed : numpy.ndarray
Moving median. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_median(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_median(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8.])
Compute without trimming:
>>> moving_median(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_median(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_median(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_median(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_median(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError(
"Cannot have a window length larger than the computation axis."
)
rmed = _extensions.moving_median(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmed, -1, axis)
def moving_max(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving maximum value.
Parameters
----------
a : array-like
Signal to compute moving max for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving max along. Default is -1.
Returns
-------
mmax : numpy.ndarray
Moving max. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_max(x, 3, 3)
array([2., 5., 8.])
Compute with overlapping windows:
>>> moving_max(x, 3, 1)
array([2., 3., 4., 5., 6., 7., 8.])
Compute without triming:
>>> moving_max(x, 3, 1)
array([2., 3., 4., 5., 6., 7., 8., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_max(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_max(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_max(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_max(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# Numpy uses SIMD instructions for max/min, so it will likely be faster
# unless there is a lot of overlap
cond1 = a.ndim == 1 and (skip / w_len) < 0.005
cond2 = a.ndim > 1 and (skip / w_len) < 0.3 # due to c-contiguity?
cond3 = a.ndim > 2 # windowing doesnt handle more than 2 dimensions currently
if any([cond1, cond2, cond3]):
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmax = _extensions.moving_max(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmax, -1, axis)
else:
x = ascontiguousarray(
moveaxis(a, axis, 0)
) # need to move axis to the front for windowing
xw = get_windowed_view(x, w_len, skip)
if trim:
res = xw.max(axis=1) # computation axis is still the second axis
else:
nfill = (x.shape[0] - w_len) // skip + 1
rshape = list(x.shape)
rshape[0] = (x.shape[0] - 1) // skip + 1
res = full(rshape, nan)
res[:nfill] = xw.max(axis=1)
return moveaxis(res, 0, axis)
def moving_min(a, w_len, skip, trim=True, axis=-1):
r"""
Compute the moving maximum value.
Parameters
----------
a : array-like
Signal to compute moving max for.
w_len : int
Window length in number of samples.
skip : int
Window start location skip in number of samples.
trim : bool, optional
Trim the ends of the result, where a value cannot be calculated. If False,
these values will be set to NaN. Default is True.
axis : int, optional
Axis to compute the moving max along. Default is -1.
Returns
-------
mmax : numpy.ndarray
Moving max. Note that if the moving axis is not the last axis, then the result
will *not* be c-contiguous.
Notes
-----
On the moving axis, the output length can be computed as follows:
.. math:: \frac{n - w_{len}}{skip} + 1
where `n` is the length of the moving axis. For cases where `skip != 1` and
`trim=False`, the length of the return on the moving axis can be calculated as:
.. math:: \frac{n}{skip}
Examples
--------
Compute the with non-overlapping windows:
>>> import numpy as np
>>> x = np.arange(10)
>>> moving_min(x, 3, 3)
array([1., 4., 7.])
Compute with overlapping windows:
>>> moving_min(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7.])
Compute without trimming:
>>> moving_min(x, 3, 1)
array([1., 2., 3., 4., 5., 6., 7., nan, nan])
Compute on a nd-array to see output shape. On the moving axis, the output should be equal to
:math:`(n - w_{len}) / skip + 1`.
>>> n = 500
>>> window_length = 100
>>> window_skip = 50
>>> shape = (3, n, 5, 10)
>>> y = np.random.random(shape)
>>> res = moving_min(y, window_length, window_skip, axis=1)
>>> print(res.shape)
(3, 9, 5, 10)
Check flags for different axis output
>>> z = np.random.random((10, 10, 10))
>>> moving_min(z, 3, 3, axis=0).flags['C_CONTIGUOUS']
False
>>> moving_min(z, 3, 3, axis=1).flags['C_CONTIGUOUS']
False
>>> moving_min(z, 3, 3, axis=2).flags['C_CONTIGUOUS']
True
"""
if w_len <= 0 or skip <= 0:
raise ValueError("`wlen` and `skip` cannot be less than or equal to 0.")
# Numpy uses SIMD instructions for max/min, so it will likely be faster
# unless there is a lot of overlap
cond1 = a.ndim == 1 and (skip / w_len) < 0.005
cond2 = a.ndim > 1 and (skip / w_len) < 0.3 # due to c-contiguity?
cond3 = a.ndim > 2 # windowing doesnt handle more than 2 dimensions currently
if any([cond1, cond2, cond3]):
# move computation axis to end
x = moveaxis(a, axis, -1)
# check that there are enough samples
if w_len > x.shape[-1]:
raise ValueError("Window length is larger than the computation axis.")
rmin = _extensions.moving_min(x, w_len, skip, trim)
# move computation axis back to original place and return
return moveaxis(rmin, -1, axis)
else:
x = ascontiguousarray(
moveaxis(a, axis, 0)
) # need to move axis to the front for windowing
xw = get_windowed_view(x, w_len, skip)
if trim:
res = xw.min(axis=1) # computation axis is still the second axis
else:
nfill = (x.shape[0] - w_len) // skip + 1
rshape = list(x.shape)
rshape[0] = (x.shape[0] - 1) // skip + 1
res = full(rshape, nan)
res[:nfill] = xw.min(axis=1)
return moveaxis(res, 0, axis) | 0.946818 | 0.657683 |
from numpy import require
from numpy.lib.stride_tricks import as_strided
__all__ = ["compute_window_samples", "get_windowed_view"]
class DimensionError(Exception):
"""
Custom error for if the input signal has too many dimensions
"""
pass
class ContiguityError(Exception):
"""
Custom error for if the input signal is not C-contiguous
"""
pass
def compute_window_samples(fs, window_length, window_step):
"""
Compute the number of samples for a window. Takes the sampling frequency, window length, and
window step in common representations and converts them into number of samples.
Parameters
----------
fs : float
Sampling frequency in Hz.
window_length : float
Window length in seconds. If not provided (None), will do no windowing. Default is None
window_step : {float, int}
Window step - the spacing between the start of windows. This can be specified several
different ways (see Notes). Default is 1.0
Returns
-------
length_n : int
Window length in samples
step_n : int
Window step in samples
Raises
------
ValueError
If `window_step` is negative, or if `window_step` is a float not in (0.0, 1.0]
Notes
-----
Computation of the window step depends on the type of input provided, and the range.
- `window_step` is a float in (0.0, 1.0]: specifies the fraction of a window to skip to get to
the start of the next window
- `window_step` is an integer > 1: specifies the number of samples to skip to get to the start
of the next window
Examples
--------
Compute the window length and step in samples for a 3s window with 50% overlap, with a
sampling rate of 50Hz
>>> compute_window_samples(50.0, 3.0, 0.5)
(150, 75)
Compute the window length for a 4.5s window with a step of 1 sample, and a sampling
rate of 100Hz
>>> compute_window_samples(100.0, 4.5, 1)
(450, 1)
"""
if window_step is None or window_length is None:
return None, None
length_n = int(round(fs * window_length))
if isinstance(window_step, int):
if window_step > 0:
step_n = window_step
else:
raise ValueError("window_step cannot be negative")
elif isinstance(window_step, float):
if 0.0 < window_step < 1.0:
step_n = int(round(length_n * window_step))
step_n = max(min(step_n, length_n), 1)
elif window_step == 1.0:
step_n = length_n
else:
raise ValueError("float values for window_step must be in (0.0, 1.0]")
return length_n, step_n
def get_windowed_view(x, window_length, step_size, ensure_c_contiguity=False):
"""
Return a moving window view over the data
Parameters
----------
x : numpy.ndarray
1- or 2-D array of signals to window. Windows occur along the 0 axis.
Must be C-contiguous.
window_length : int
Window length/size.
step_size : int
Step/stride size for windows - how many samples to step from window
center to window center.
ensure_c_contiguity : bool, optional
Create a new array with C-contiguity if the passed array is not C-contiguous.
This *may* result in the memory requirements significantly increasing. Default is False,
which will raise a ValueError if `x` is not C-contiguous
Returns
-------
x_win : numpy.ndarray
2- or 3-D array of windows of the original data, of shape (..., L[, ...])
"""
if not (x.ndim in [1, 2]):
raise DimensionError("Array cannot have more than 2 dimensions.")
if ensure_c_contiguity:
x = require(x, requirements=["C"])
else:
if not x.flags["C_CONTIGUOUS"]:
raise ContiguityError(
"Input array must be C-contiguous. See numpy.ascontiguousarray"
)
if x.ndim == 1:
nrows = ((x.size - window_length) // step_size) + 1
n = x.strides[0]
return as_strided(
x, shape=(nrows, window_length), strides=(step_size * n, n), writeable=False
)
else:
k = x.shape[1]
nrows = ((x.shape[0] - window_length) // step_size) + 1
n = x.strides[1]
new_shape = (nrows, window_length, k)
new_strides = (step_size * k * n, k * n, n)
return as_strided(x, shape=new_shape, strides=new_strides, writeable=False) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/windowing.py | windowing.py | from numpy import require
from numpy.lib.stride_tricks import as_strided
__all__ = ["compute_window_samples", "get_windowed_view"]
class DimensionError(Exception):
"""
Custom error for if the input signal has too many dimensions
"""
pass
class ContiguityError(Exception):
"""
Custom error for if the input signal is not C-contiguous
"""
pass
def compute_window_samples(fs, window_length, window_step):
"""
Compute the number of samples for a window. Takes the sampling frequency, window length, and
window step in common representations and converts them into number of samples.
Parameters
----------
fs : float
Sampling frequency in Hz.
window_length : float
Window length in seconds. If not provided (None), will do no windowing. Default is None
window_step : {float, int}
Window step - the spacing between the start of windows. This can be specified several
different ways (see Notes). Default is 1.0
Returns
-------
length_n : int
Window length in samples
step_n : int
Window step in samples
Raises
------
ValueError
If `window_step` is negative, or if `window_step` is a float not in (0.0, 1.0]
Notes
-----
Computation of the window step depends on the type of input provided, and the range.
- `window_step` is a float in (0.0, 1.0]: specifies the fraction of a window to skip to get to
the start of the next window
- `window_step` is an integer > 1: specifies the number of samples to skip to get to the start
of the next window
Examples
--------
Compute the window length and step in samples for a 3s window with 50% overlap, with a
sampling rate of 50Hz
>>> compute_window_samples(50.0, 3.0, 0.5)
(150, 75)
Compute the window length for a 4.5s window with a step of 1 sample, and a sampling
rate of 100Hz
>>> compute_window_samples(100.0, 4.5, 1)
(450, 1)
"""
if window_step is None or window_length is None:
return None, None
length_n = int(round(fs * window_length))
if isinstance(window_step, int):
if window_step > 0:
step_n = window_step
else:
raise ValueError("window_step cannot be negative")
elif isinstance(window_step, float):
if 0.0 < window_step < 1.0:
step_n = int(round(length_n * window_step))
step_n = max(min(step_n, length_n), 1)
elif window_step == 1.0:
step_n = length_n
else:
raise ValueError("float values for window_step must be in (0.0, 1.0]")
return length_n, step_n
def get_windowed_view(x, window_length, step_size, ensure_c_contiguity=False):
"""
Return a moving window view over the data
Parameters
----------
x : numpy.ndarray
1- or 2-D array of signals to window. Windows occur along the 0 axis.
Must be C-contiguous.
window_length : int
Window length/size.
step_size : int
Step/stride size for windows - how many samples to step from window
center to window center.
ensure_c_contiguity : bool, optional
Create a new array with C-contiguity if the passed array is not C-contiguous.
This *may* result in the memory requirements significantly increasing. Default is False,
which will raise a ValueError if `x` is not C-contiguous
Returns
-------
x_win : numpy.ndarray
2- or 3-D array of windows of the original data, of shape (..., L[, ...])
"""
if not (x.ndim in [1, 2]):
raise DimensionError("Array cannot have more than 2 dimensions.")
if ensure_c_contiguity:
x = require(x, requirements=["C"])
else:
if not x.flags["C_CONTIGUOUS"]:
raise ContiguityError(
"Input array must be C-contiguous. See numpy.ascontiguousarray"
)
if x.ndim == 1:
nrows = ((x.size - window_length) // step_size) + 1
n = x.strides[0]
return as_strided(
x, shape=(nrows, window_length), strides=(step_size * n, n), writeable=False
)
else:
k = x.shape[1]
nrows = ((x.shape[0] - window_length) // step_size) + 1
n = x.strides[1]
new_shape = (nrows, window_length, k)
new_strides = (step_size * k * n, k * n, n)
return as_strided(x, shape=new_shape, strides=new_strides, writeable=False) | 0.951402 | 0.590602 |
from numpy import (
mean,
asarray,
cumsum,
minimum,
sort,
argsort,
unique,
insert,
sum,
log,
nan,
float_,
)
from skdh.utility.internal import rle
__all__ = [
"average_duration",
"state_transition_probability",
"gini_index",
"average_hazard",
"state_power_law_distribution",
]
def gini(x, w=None, corr=True):
"""
Compute the GINI Index.
Parameters
----------
x : numpy.ndarray
Array of bout lengths
w : {None, numpy.ndarray}, optional
Weights for x. Must be the same size. If None, weights are not used.
corr : bool, optional
Apply finite sample correction. Default is True.
Returns
-------
g : float
Gini index
References
----------
.. [1] https://stackoverflow.com/questions/48999542/more-efficient-weighted-gini-coefficient-in
-python/48999797#48999797
"""
if x.size == 0:
return 0.0
elif x.size == 1:
return 1.0
# The rest of the code requires numpy arrays.
if w is not None:
sorted_indices = argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
# Force float dtype to avoid overflows
cumw = cumsum(sorted_w, dtype=float_)
cumxw = cumsum(sorted_x * sorted_w, dtype=float_)
g = sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (cumxw[-1] * cumw[-1])
if corr:
return g * x.size / (x.size - 1)
else:
return g
else:
sorted_x = sort(x)
n = x.size
cumx = cumsum(sorted_x, dtype=float_)
# The above formula, with all weights equal to 1 simplifies to:
g = (n + 1 - 2 * sum(cumx) / cumx[-1]) / n
if corr:
return minimum(g * n / (n - 1), 1)
else:
return g
def average_duration(a=None, *, lengths=None, values=None, voi=1):
"""
Compute the average duration in the desired state.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> average_duration(x, voi=1)
2.0
>>> average_duration(x, voi=0)
5.333333333
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> average_duration(lengths=lengths, values=values, voi=1)
2.0
>>> average_duration(lengths=lengths)
2.0
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 0.0
return mean(lens)
def state_transition_probability(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the probability of transitioning from the desired state to the
second state.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> state_transition_probability(x, voi=1)
0.5
>>> state_transition_probability(x, voi=0)
0.1875
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> state_transition_probability(lengths=lengths, values=values, voi=1)
0.5
>>> state_transition_probability(lengths=lengths)
0.5
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are associated with
mortality in US adults: The NHANES study,” bioRxiv, p. 182337, Aug. 2017,
doi: 10.1101/182337.
Notes
-----
Higher values indicate more frequent switching between states, and as a result may indicate
greater fragmentation of sleep.
The implementation is straightforward [1]_, and is simply defined as
.. math:: satp = \frac{1}{\mu_{awake}}
where :math:`\mu_{awake}` is the mean awake bout time.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return nan
return 1 / mean(lens)
def gini_index(a=None, *, lengths=None, values=None, voi=1):
"""
Compute the normalized variability of the state bouts, also known as the GINI
index from economics.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> gini_index(x, voi=1)
0.333333
>>> gini_index(x, voi=0)
0.375
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> gini_index(lengths=lengths, values=values, voi=1)
0.333333
>>> gini_index(lengths=lengths)
0.333333
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are associated with
mortality in US adults: The NHANES study,” bioRxiv, p. 182337, Aug. 2017,
doi: 10.1101/182337.
Notes
-----
Gini Index values are bounded between 0 and 1, with values near 1 indicating the total
time accumulating due to a small number of longer bouts, whereas values near 0 indicate all
bouts contribute more equally to the total time.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 0.0
return gini(lens, w=None, corr=True)
def average_hazard(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the average hazard summary of the hazard function, as a function of the
state bout duration. The average hazard represents a summary of the frequency
of transitioning from one state to the other.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> average_hazard(x, voi=1)
0.61111111
>>> average_hazard(x, voi=0)
0.61111111
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> average_hazard(lengths=lengths, values=values, voi=1)
0.61111111
>>> average_hazard(lengths=lengths)
0.61111111
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are
associated with mortality in US adults: The NHANES study,” bioRxiv,
p. 182337, Aug. 2017, doi: 10.1101/182337.
Notes
-----
Higher values indicate higher frequency in switching from sleep to awake states.
The average hazard is computed per [1]_:
.. math::
h(t_n_i) = \frac{n\left(t_n_i\right)}{n - n^c\left(t_n_{i-1}\right)}
\har{h} = \frac{1}{m}\sum_{t\in D}h(t)
where :math:`h(t_n_i)` is the hazard for the sleep bout of length :math:`t_n_i`,
:math:`n(t_n_i)` is the number of bouts of length :math:`t_n_i`, :math:`n` is
the total number of sleep bouts, :math:`n^c(t_n_i)` is the sum number of bouts
less than or equal to length :math:`t_n_i`, and :math:`t\in D` indicates all
bouts up to the maximum length (:math:`D`).
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return nan
unq, cnts = unique(lens, return_counts=True)
sidx = argsort(unq)
cnts = cnts[sidx]
cumsum_cnts = insert(cumsum(cnts), 0, 0)
h = cnts / (cumsum_cnts[-1] - cumsum_cnts[:-1])
return sum(h) / unq.size
def state_power_law_distribution(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the scaling factor for the power law distribution over the desired
state bout lengths.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> state_power_law_distribution(x, voi=1)
1.7749533004219864
>>> state_power_law_distribution(x, voi=0)
2.5517837760569524
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> state_power_law_distribution(lengths=lengths, values=values, voi=1)
1.7749533004219864
>>> state_power_law_distribution(lengths=lengths)
1.7749533004219864
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are
associated with mortality in US adults: The NHANES study,” bioRxiv,
p. 182337, Aug. 2017, doi: 10.1101/182337.
Notes
-----
Larger `alpha` values indicate that the total sleeping time is accumulated with
a larger portion of shorter sleep bouts.
The power law scaling factor is computer per [1]_:
.. math:: 1 + \frac{n_{sleep}}{\sum_{i}\log{t_i / \left(min(t) - 0.5\right)}}
where :math:`n_{sleep}` is the number of sleep bouts, :math:`t_i` is the duration
of the :math:`ith` sleep bout, and :math:`min(t)` is the length of the shortest
sleep bout.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 1.0
return 1 + lens.size / sum(log(lens / (lens.min() - 0.5))) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/fragmentation_endpoints.py | fragmentation_endpoints.py | from numpy import (
mean,
asarray,
cumsum,
minimum,
sort,
argsort,
unique,
insert,
sum,
log,
nan,
float_,
)
from skdh.utility.internal import rle
__all__ = [
"average_duration",
"state_transition_probability",
"gini_index",
"average_hazard",
"state_power_law_distribution",
]
def gini(x, w=None, corr=True):
"""
Compute the GINI Index.
Parameters
----------
x : numpy.ndarray
Array of bout lengths
w : {None, numpy.ndarray}, optional
Weights for x. Must be the same size. If None, weights are not used.
corr : bool, optional
Apply finite sample correction. Default is True.
Returns
-------
g : float
Gini index
References
----------
.. [1] https://stackoverflow.com/questions/48999542/more-efficient-weighted-gini-coefficient-in
-python/48999797#48999797
"""
if x.size == 0:
return 0.0
elif x.size == 1:
return 1.0
# The rest of the code requires numpy arrays.
if w is not None:
sorted_indices = argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
# Force float dtype to avoid overflows
cumw = cumsum(sorted_w, dtype=float_)
cumxw = cumsum(sorted_x * sorted_w, dtype=float_)
g = sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (cumxw[-1] * cumw[-1])
if corr:
return g * x.size / (x.size - 1)
else:
return g
else:
sorted_x = sort(x)
n = x.size
cumx = cumsum(sorted_x, dtype=float_)
# The above formula, with all weights equal to 1 simplifies to:
g = (n + 1 - 2 * sum(cumx) / cumx[-1]) / n
if corr:
return minimum(g * n / (n - 1), 1)
else:
return g
def average_duration(a=None, *, lengths=None, values=None, voi=1):
"""
Compute the average duration in the desired state.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> average_duration(x, voi=1)
2.0
>>> average_duration(x, voi=0)
5.333333333
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> average_duration(lengths=lengths, values=values, voi=1)
2.0
>>> average_duration(lengths=lengths)
2.0
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 0.0
return mean(lens)
def state_transition_probability(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the probability of transitioning from the desired state to the
second state.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> state_transition_probability(x, voi=1)
0.5
>>> state_transition_probability(x, voi=0)
0.1875
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> state_transition_probability(lengths=lengths, values=values, voi=1)
0.5
>>> state_transition_probability(lengths=lengths)
0.5
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are associated with
mortality in US adults: The NHANES study,” bioRxiv, p. 182337, Aug. 2017,
doi: 10.1101/182337.
Notes
-----
Higher values indicate more frequent switching between states, and as a result may indicate
greater fragmentation of sleep.
The implementation is straightforward [1]_, and is simply defined as
.. math:: satp = \frac{1}{\mu_{awake}}
where :math:`\mu_{awake}` is the mean awake bout time.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return nan
return 1 / mean(lens)
def gini_index(a=None, *, lengths=None, values=None, voi=1):
"""
Compute the normalized variability of the state bouts, also known as the GINI
index from economics.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> gini_index(x, voi=1)
0.333333
>>> gini_index(x, voi=0)
0.375
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> gini_index(lengths=lengths, values=values, voi=1)
0.333333
>>> gini_index(lengths=lengths)
0.333333
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are associated with
mortality in US adults: The NHANES study,” bioRxiv, p. 182337, Aug. 2017,
doi: 10.1101/182337.
Notes
-----
Gini Index values are bounded between 0 and 1, with values near 1 indicating the total
time accumulating due to a small number of longer bouts, whereas values near 0 indicate all
bouts contribute more equally to the total time.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 0.0
return gini(lens, w=None, corr=True)
def average_hazard(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the average hazard summary of the hazard function, as a function of the
state bout duration. The average hazard represents a summary of the frequency
of transitioning from one state to the other.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> average_hazard(x, voi=1)
0.61111111
>>> average_hazard(x, voi=0)
0.61111111
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> average_hazard(lengths=lengths, values=values, voi=1)
0.61111111
>>> average_hazard(lengths=lengths)
0.61111111
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are
associated with mortality in US adults: The NHANES study,” bioRxiv,
p. 182337, Aug. 2017, doi: 10.1101/182337.
Notes
-----
Higher values indicate higher frequency in switching from sleep to awake states.
The average hazard is computed per [1]_:
.. math::
h(t_n_i) = \frac{n\left(t_n_i\right)}{n - n^c\left(t_n_{i-1}\right)}
\har{h} = \frac{1}{m}\sum_{t\in D}h(t)
where :math:`h(t_n_i)` is the hazard for the sleep bout of length :math:`t_n_i`,
:math:`n(t_n_i)` is the number of bouts of length :math:`t_n_i`, :math:`n` is
the total number of sleep bouts, :math:`n^c(t_n_i)` is the sum number of bouts
less than or equal to length :math:`t_n_i`, and :math:`t\in D` indicates all
bouts up to the maximum length (:math:`D`).
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return nan
unq, cnts = unique(lens, return_counts=True)
sidx = argsort(unq)
cnts = cnts[sidx]
cumsum_cnts = insert(cumsum(cnts), 0, 0)
h = cnts / (cumsum_cnts[-1] - cumsum_cnts[:-1])
return sum(h) / unq.size
def state_power_law_distribution(a=None, *, lengths=None, values=None, voi=1):
r"""
Compute the scaling factor for the power law distribution over the desired
state bout lengths.
Parameters
----------
a : array-like, optional
1D array of binary values. If not provided, all of `lengths`, `starts`,
and `values` must be provided.
lengths : {numpy.ndarray, list}, optional, optional
Lengths of runs of the binary values. If not provided, `a` must be. Must
be the same size as `values`.
values : {numpy.ndarray, list}, optional, optional
Values of the runs. If not provided, all `lengths` will be assumed to be
for the `voi`.
voi : {int, bool}, optional
Value of interest, value for which to calculate the average run length.
Default is `1` (`True`).
Returns
-------
avg_dur : float
average duration, in samples, of the runs with value `voi`.
Examples
--------
>>> x = [0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1]
>>> state_power_law_distribution(x, voi=1)
1.7749533004219864
>>> state_power_law_distribution(x, voi=0)
2.5517837760569524
>>> lengths = [4, 2, 9, 1, 3, 3]
>>> values = [0, 1, 0, 1, 0, 1]
>>> state_power_law_distribution(lengths=lengths, values=values, voi=1)
1.7749533004219864
>>> state_power_law_distribution(lengths=lengths)
1.7749533004219864
References
----------
.. [1] J. Di et al., “Patterns of sedentary and active time accumulation are
associated with mortality in US adults: The NHANES study,” bioRxiv,
p. 182337, Aug. 2017, doi: 10.1101/182337.
Notes
-----
Larger `alpha` values indicate that the total sleeping time is accumulated with
a larger portion of shorter sleep bouts.
The power law scaling factor is computer per [1]_:
.. math:: 1 + \frac{n_{sleep}}{\sum_{i}\log{t_i / \left(min(t) - 0.5\right)}}
where :math:`n_{sleep}` is the number of sleep bouts, :math:`t_i` is the duration
of the :math:`ith` sleep bout, and :math:`min(t)` is the length of the shortest
sleep bout.
"""
if a is not None:
l, _, v = rle(a)
lens = l[v == voi]
else:
if lengths is None:
raise ValueError("One of `a` or `lengths` must be provided.")
lens = asarray(lengths)
if values is not None:
lens = lens[values == voi]
if lens.size == 0:
return 1.0
return 1 + lens.size / sum(log(lens / (lens.min() - 0.5))) | 0.929432 | 0.592991 |
from warnings import warn
from numpy import argmax, abs, mean, cos, arcsin, sign, zeros_like
__all__ = ["correct_accelerometer_orientation"]
def correct_accelerometer_orientation(accel, v_axis=None, ap_axis=None):
r"""
Applies the correction for acceleration from [1]_ to better align acceleration with the human
body anatomical axes. This correction requires that the original device measuring accleration
is somewhat closely aligned with the anatomical axes already, due to required assumptions.
Quality of the correction will degrade the farther from aligned the input acceleration is.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values, in units of "g".
v_axis : {None, int}, optional
Vertical axis for `accel`. If not provided (default of None), this will be guessed as the
axis with the largest mean value.
ap_axis : {None, int}, optional
Anterior-posterior axis for `accel`. If not provided (default of None), the ML and AP axes
will not be picked. This will have a slight effect on the correction.
Returns
-------
co_accel : numpy.ndarray
(N, 3) array of acceleration with best alignment to the human anatomical axes
Notes
-----
If `v_axis` is not provided (`None`), it is guessed as the largest mean valued axis (absolute
value). While this should work for most cases, it will fail if there is significant
acceleration in the non-vertical axes. As such, if there are very large accelerations present,
this value should be provided.
If `ap_axis` is not provided, it is guessed as the axis with the most similar autocovariance
to the vertical axes.
The correction algorithm from [1]_ starts by using simple trigonometric identities to correct
the measured acceleration per
.. math::
a_A = a_a\cos{\theta_a} - sign(a_v)a_v\sin{\theta_a}
a_V' = sign(a_v)a_a\sin{\theta_a} + a_v\cos{\theta_a}
a_M = a_m\cos{\theta_m} - sign(a_v)a_V'\sin{\theta_m}
a_V = sign(a_v)a_m\sin{\theta_m} + a_V'\cos{\theta_m}
where $a_i$ is the measured $i$ direction acceleration, $a_I$ is the corrected $I$ direction
acceleration ($i/I=[a/A, m/M, v/V]$, $a$ is anterior-posterior, $m$ is medial-lateral, and
$v$ is vertical), $a_V'$ is a provisional estimate of the corrected vertical acceleration.
$\theta_{a/m}$ are the angles between the measured AP and ML axes and the horizontal plane.
Through some manipulation, [1]_ arrives at the simplification that best estimates for these
angles per
.. math::
\sin{\theta_a} = \bar{a}_a
\sin{\theta_m} = \bar{a}_m
This is the part of the step that requires acceleration to be in "g", as well as mostly
already aligned. If significantly out of alignment, then this small-angle relationship
with sine starts to fall apart, and the correction will not be as appropriate.
References
----------
.. [1] R. Moe-Nilssen, “A new method for evaluating motor control in gait under real-life
environmental conditions. Part 1: The instrument,” Clinical Biomechanics, vol. 13, no.
4–5, pp. 320–327, Jun. 1998, doi: 10.1016/S0268-0033(98)00089-8.
"""
if v_axis is None:
v_axis = argmax(abs(mean(accel, axis=0)))
else:
if not (0 <= v_axis < 3):
raise ValueError("v_axis must be in {0, 1, 2}")
if ap_axis is None:
ap_axis, ml_axis = [i for i in range(3) if i != v_axis]
else:
if not (0 <= ap_axis < 3):
raise ValueError("ap_axis must be in {0, 1, 2}")
ml_axis = [i for i in range(3) if i not in [v_axis, ap_axis]][0]
s_theta_a = mean(accel[:, ap_axis])
s_theta_m = mean(accel[:, ml_axis])
# make sure the theta values are in range
if s_theta_a < -1 or s_theta_a > 1 or s_theta_m < -1 or s_theta_m > 1:
warn("Accel. correction angles outside possible range [-1, 1]. Not correcting.")
return accel
c_theta_a = cos(arcsin(s_theta_a))
c_theta_m = cos(arcsin(s_theta_m))
v_sign = sign(mean(accel[:, v_axis]))
co_accel = zeros_like(accel)
# correct ap axis acceleration
co_accel[:, ap_axis] = (
accel[:, ap_axis] * c_theta_a - v_sign * accel[:, v_axis] * s_theta_a
)
# provisional correction for vertical axis
co_accel[:, v_axis] = (
v_sign * accel[:, ap_axis] * s_theta_a + accel[:, v_axis] * c_theta_a
)
# correct ml axis acceleration
co_accel[:, ml_axis] = (
accel[:, ml_axis] * c_theta_m - v_sign * co_accel[:, v_axis] * s_theta_m
)
# final correction for vertical axis
co_accel[:, v_axis] = (
v_sign * accel[:, ml_axis] * s_theta_m + co_accel[:, v_axis] * c_theta_m
)
return co_accel | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/utility/orientation.py | orientation.py | from warnings import warn
from numpy import argmax, abs, mean, cos, arcsin, sign, zeros_like
__all__ = ["correct_accelerometer_orientation"]
def correct_accelerometer_orientation(accel, v_axis=None, ap_axis=None):
r"""
Applies the correction for acceleration from [1]_ to better align acceleration with the human
body anatomical axes. This correction requires that the original device measuring accleration
is somewhat closely aligned with the anatomical axes already, due to required assumptions.
Quality of the correction will degrade the farther from aligned the input acceleration is.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values, in units of "g".
v_axis : {None, int}, optional
Vertical axis for `accel`. If not provided (default of None), this will be guessed as the
axis with the largest mean value.
ap_axis : {None, int}, optional
Anterior-posterior axis for `accel`. If not provided (default of None), the ML and AP axes
will not be picked. This will have a slight effect on the correction.
Returns
-------
co_accel : numpy.ndarray
(N, 3) array of acceleration with best alignment to the human anatomical axes
Notes
-----
If `v_axis` is not provided (`None`), it is guessed as the largest mean valued axis (absolute
value). While this should work for most cases, it will fail if there is significant
acceleration in the non-vertical axes. As such, if there are very large accelerations present,
this value should be provided.
If `ap_axis` is not provided, it is guessed as the axis with the most similar autocovariance
to the vertical axes.
The correction algorithm from [1]_ starts by using simple trigonometric identities to correct
the measured acceleration per
.. math::
a_A = a_a\cos{\theta_a} - sign(a_v)a_v\sin{\theta_a}
a_V' = sign(a_v)a_a\sin{\theta_a} + a_v\cos{\theta_a}
a_M = a_m\cos{\theta_m} - sign(a_v)a_V'\sin{\theta_m}
a_V = sign(a_v)a_m\sin{\theta_m} + a_V'\cos{\theta_m}
where $a_i$ is the measured $i$ direction acceleration, $a_I$ is the corrected $I$ direction
acceleration ($i/I=[a/A, m/M, v/V]$, $a$ is anterior-posterior, $m$ is medial-lateral, and
$v$ is vertical), $a_V'$ is a provisional estimate of the corrected vertical acceleration.
$\theta_{a/m}$ are the angles between the measured AP and ML axes and the horizontal plane.
Through some manipulation, [1]_ arrives at the simplification that best estimates for these
angles per
.. math::
\sin{\theta_a} = \bar{a}_a
\sin{\theta_m} = \bar{a}_m
This is the part of the step that requires acceleration to be in "g", as well as mostly
already aligned. If significantly out of alignment, then this small-angle relationship
with sine starts to fall apart, and the correction will not be as appropriate.
References
----------
.. [1] R. Moe-Nilssen, “A new method for evaluating motor control in gait under real-life
environmental conditions. Part 1: The instrument,” Clinical Biomechanics, vol. 13, no.
4–5, pp. 320–327, Jun. 1998, doi: 10.1016/S0268-0033(98)00089-8.
"""
if v_axis is None:
v_axis = argmax(abs(mean(accel, axis=0)))
else:
if not (0 <= v_axis < 3):
raise ValueError("v_axis must be in {0, 1, 2}")
if ap_axis is None:
ap_axis, ml_axis = [i for i in range(3) if i != v_axis]
else:
if not (0 <= ap_axis < 3):
raise ValueError("ap_axis must be in {0, 1, 2}")
ml_axis = [i for i in range(3) if i not in [v_axis, ap_axis]][0]
s_theta_a = mean(accel[:, ap_axis])
s_theta_m = mean(accel[:, ml_axis])
# make sure the theta values are in range
if s_theta_a < -1 or s_theta_a > 1 or s_theta_m < -1 or s_theta_m > 1:
warn("Accel. correction angles outside possible range [-1, 1]. Not correcting.")
return accel
c_theta_a = cos(arcsin(s_theta_a))
c_theta_m = cos(arcsin(s_theta_m))
v_sign = sign(mean(accel[:, v_axis]))
co_accel = zeros_like(accel)
# correct ap axis acceleration
co_accel[:, ap_axis] = (
accel[:, ap_axis] * c_theta_a - v_sign * accel[:, v_axis] * s_theta_a
)
# provisional correction for vertical axis
co_accel[:, v_axis] = (
v_sign * accel[:, ap_axis] * s_theta_a + accel[:, v_axis] * c_theta_a
)
# correct ml axis acceleration
co_accel[:, ml_axis] = (
accel[:, ml_axis] * c_theta_m - v_sign * co_accel[:, v_axis] * s_theta_m
)
# final correction for vertical axis
co_accel[:, v_axis] = (
v_sign * accel[:, ml_axis] * s_theta_m + co_accel[:, v_axis] * c_theta_m
)
return co_accel | 0.968456 | 0.790692 |
from skdh.activity import metrics
def get_available_cutpoints(name=None):
"""
Print the available cutpoints for activity level segmentation, or the
thresholds for a specific set of cutpoints.
Parameters
----------
name : {None, str}, optional
The name of the cupoint values to print. If None, will print all
the available cutpoint options.
"""
if name is None:
for k in _base_cutpoints:
print(k)
else:
cuts = _base_cutpoints[name]
print(f"{name}\n{'-' * 15}")
print(f"Metric: {cuts['metric']}")
for level in ["sedentary", "light", "moderate", "vigorous"]:
lthresh, uthresh = get_level_thresholds(level, cuts)
print(f"{level} range [g]: {lthresh:0.3f} -> {uthresh:0.3f}")
def get_level_thresholds(level, cutpoints):
if level.lower() in ["sed", "sedentary"]:
return -1e5, cutpoints["sedentary"]
elif level.lower() == "light":
return cutpoints["sedentary"], cutpoints["light"]
elif level.lower() in ["mod", "moderate"]:
return cutpoints["light"], cutpoints["moderate"]
elif level.lower() in ["vig", "vigorous"]:
return cutpoints["moderate"], 1e5
elif level.lower() == "mvpa":
return cutpoints["light"], 1e5
elif level.lower() == "slpa": # sedentary-light phys. act.
return -1e5, cutpoints["light"]
else:
raise ValueError(f"Activity level label [{level}] not recognized.")
def get_metric(name):
return getattr(metrics, name)
# ==========================================================
# Activity cutpoints
_base_cutpoints = {}
_base_cutpoints["esliger_lwrist_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 217 / 80 / 60, # paper at 80hz, summed for each minute long window
"light": 644 / 80 / 60,
"moderate": 1810 / 80 / 60,
}
_base_cutpoints["esliger_rwirst_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 386 / 80 / 60, # paper at 80hz, summed for each 1min window
"light": 439 / 80 / 60,
"moderate": 2098 / 80 / 60,
}
_base_cutpoints["esliger_lumbar_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 77 / 80 / 60, # paper at 80hz, summed for each 1min window
"light": 219 / 80 / 60,
"moderate": 2056 / 80 / 60,
}
_base_cutpoints["schaefer_ndomwrist_child6-11"] = {
"metric": "metric_bfen",
"kwargs": {"low_cutoff": 0.2, "high_cutoff": 15, "trim_zero": False},
"sedentary": 0.190,
"light": 0.314,
"moderate": 0.998,
}
_base_cutpoints["phillips_rwrist_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 6 / 80, # paper at 80hz, summed for each 1s window
"light": 21 / 80,
"moderate": 56 / 80,
}
_base_cutpoints["phillips_lwrist_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 7 / 80,
"light": 19 / 80,
"moderate": 60 / 80,
}
_base_cutpoints["phillips_hip_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 3 / 80,
"light": 16 / 80,
"moderate": 51 / 80,
}
_base_cutpoints["vaha-ypya_hip_adult"] = {
"metric": "metric_mad",
"kwargs": {},
"light": 0.091, # originally presented in mg
"moderate": 0.414,
}
_base_cutpoints["hildebrand_hip_adult_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0474,
"light": 0.0691,
"moderate": 0.2587,
}
_base_cutpoints["hildebrand_hip_adult_geneactv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0469,
"light": 0.0687,
"moderate": 0.2668,
}
_base_cutpoints["hildebrand_wrist_adult_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0448,
"light": 0.1006,
"moderate": 0.4288,
}
_base_cutpoints["hildebrand_wrist_adult_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0458,
"light": 0.0932,
"moderate": 0.4183,
}
_base_cutpoints["hildebrand_hip_child7-11_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0633,
"light": 0.1426,
"moderate": 0.4646,
}
_base_cutpoints["hildebrand_hip_child7-11_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0641,
"light": 0.1528,
"moderate": 0.5143,
}
_base_cutpoints["hildebrand_wrist_child7-11_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0356,
"light": 0.2014,
"moderate": 0.707,
}
_base_cutpoints["hildebrand_wrist_child7-11_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0563,
"light": 0.1916,
"moderate": 0.6958,
}
_base_cutpoints["migueles_wrist_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.050,
"light": 0.110,
"moderate": 0.440,
} | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/activity/cutpoints.py | cutpoints.py | from skdh.activity import metrics
def get_available_cutpoints(name=None):
"""
Print the available cutpoints for activity level segmentation, or the
thresholds for a specific set of cutpoints.
Parameters
----------
name : {None, str}, optional
The name of the cupoint values to print. If None, will print all
the available cutpoint options.
"""
if name is None:
for k in _base_cutpoints:
print(k)
else:
cuts = _base_cutpoints[name]
print(f"{name}\n{'-' * 15}")
print(f"Metric: {cuts['metric']}")
for level in ["sedentary", "light", "moderate", "vigorous"]:
lthresh, uthresh = get_level_thresholds(level, cuts)
print(f"{level} range [g]: {lthresh:0.3f} -> {uthresh:0.3f}")
def get_level_thresholds(level, cutpoints):
if level.lower() in ["sed", "sedentary"]:
return -1e5, cutpoints["sedentary"]
elif level.lower() == "light":
return cutpoints["sedentary"], cutpoints["light"]
elif level.lower() in ["mod", "moderate"]:
return cutpoints["light"], cutpoints["moderate"]
elif level.lower() in ["vig", "vigorous"]:
return cutpoints["moderate"], 1e5
elif level.lower() == "mvpa":
return cutpoints["light"], 1e5
elif level.lower() == "slpa": # sedentary-light phys. act.
return -1e5, cutpoints["light"]
else:
raise ValueError(f"Activity level label [{level}] not recognized.")
def get_metric(name):
return getattr(metrics, name)
# ==========================================================
# Activity cutpoints
_base_cutpoints = {}
_base_cutpoints["esliger_lwrist_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 217 / 80 / 60, # paper at 80hz, summed for each minute long window
"light": 644 / 80 / 60,
"moderate": 1810 / 80 / 60,
}
_base_cutpoints["esliger_rwirst_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 386 / 80 / 60, # paper at 80hz, summed for each 1min window
"light": 439 / 80 / 60,
"moderate": 2098 / 80 / 60,
}
_base_cutpoints["esliger_lumbar_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 77 / 80 / 60, # paper at 80hz, summed for each 1min window
"light": 219 / 80 / 60,
"moderate": 2056 / 80 / 60,
}
_base_cutpoints["schaefer_ndomwrist_child6-11"] = {
"metric": "metric_bfen",
"kwargs": {"low_cutoff": 0.2, "high_cutoff": 15, "trim_zero": False},
"sedentary": 0.190,
"light": 0.314,
"moderate": 0.998,
}
_base_cutpoints["phillips_rwrist_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 6 / 80, # paper at 80hz, summed for each 1s window
"light": 21 / 80,
"moderate": 56 / 80,
}
_base_cutpoints["phillips_lwrist_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 7 / 80,
"light": 19 / 80,
"moderate": 60 / 80,
}
_base_cutpoints["phillips_hip_child8-14"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": True},
"sedentary": 3 / 80,
"light": 16 / 80,
"moderate": 51 / 80,
}
_base_cutpoints["vaha-ypya_hip_adult"] = {
"metric": "metric_mad",
"kwargs": {},
"light": 0.091, # originally presented in mg
"moderate": 0.414,
}
_base_cutpoints["hildebrand_hip_adult_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0474,
"light": 0.0691,
"moderate": 0.2587,
}
_base_cutpoints["hildebrand_hip_adult_geneactv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0469,
"light": 0.0687,
"moderate": 0.2668,
}
_base_cutpoints["hildebrand_wrist_adult_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0448,
"light": 0.1006,
"moderate": 0.4288,
}
_base_cutpoints["hildebrand_wrist_adult_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0458,
"light": 0.0932,
"moderate": 0.4183,
}
_base_cutpoints["hildebrand_hip_child7-11_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0633,
"light": 0.1426,
"moderate": 0.4646,
}
_base_cutpoints["hildebrand_hip_child7-11_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0641,
"light": 0.1528,
"moderate": 0.5143,
}
_base_cutpoints["hildebrand_wrist_child7-11_actigraph"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0356,
"light": 0.2014,
"moderate": 0.707,
}
_base_cutpoints["hildebrand_wrist_child7-11_geneactiv"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.0563,
"light": 0.1916,
"moderate": 0.6958,
}
_base_cutpoints["migueles_wrist_adult"] = {
"metric": "metric_enmo",
"kwargs": {"take_abs": False, "trim_zero": True},
"sedentary": 0.050,
"light": 0.110,
"moderate": 0.440,
} | 0.767385 | 0.270453 |
from numpy import maximum, abs, repeat, arctan, sqrt, pi
from numpy.linalg import norm
from scipy.signal import butter, sosfiltfilt
from skdh.utility import moving_mean
__all__ = [
"metric_anglez",
"metric_en",
"metric_enmo",
"metric_bfen",
"metric_hfen",
"metric_hfenplus",
"metric_mad",
]
def metric_anglez(accel, wlen, *args, **kwargs):
"""
Compute the angle between the accelerometer z axis and the horizontal plane.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
anglez : numpy.ndarray
(N, ) array of angles between accelerometer z axis and horizontal plane in degrees.
"""
anglez = arctan(accel[:, 2] / sqrt(accel[:, 0] ** 2 + accel[:, 1] ** 2)) * (
180 / pi
)
return moving_mean(anglez, wlen, wlen)
def metric_en(accel, wlen, *args, **kwargs):
"""
Compute the euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
en : numpy.ndarray
(N, ) array of euclidean norms.
"""
return moving_mean(norm(accel, axis=1), wlen, wlen)
def metric_enmo(accel, wlen, *args, take_abs=False, trim_zero=True, **kwargs):
"""
Compute the euclidean norm minus 1. Works best when the accelerometer data has been calibrated
so that devices at rest measure acceleration norms of 1g.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
take_abs : bool, optional
Use the absolute value of the difference between euclidean norm and 1g. Default is False.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
enmo : numpy.ndarray
(N, ) array of euclidean norms minus 1.
"""
enmo = norm(accel, axis=1) - 1
if take_abs:
enmo = abs(enmo)
if trim_zero:
return moving_mean(maximum(enmo, 0), wlen, wlen)
else:
return moving_mean(enmo, wlen, wlen)
def metric_bfen(accel, wlen, fs, low_cutoff=0.2, high_cutoff=15, **kwargs):
"""
Compute the band-pass filtered euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
low_cutoff : float, optional
Band-pass low cutoff in Hz. Default is 0.2Hz.
high_cutoff : float, optional
Band-pass high cutoff in Hz. Default is 15Hz
Returns
-------
bfen : numpy.ndarray
(N, ) array of band-pass filtered and euclidean normed accelerations.
"""
sos = butter(
4, [2 * low_cutoff / fs, 2 * high_cutoff / fs], btype="bandpass", output="sos"
)
# no reason to for trimming zeros as the norm after the filter will always
# be positive
return moving_mean(norm(sosfiltfilt(sos, accel, axis=0), axis=1), wlen, wlen)
def metric_hfen(accel, wlen, fs, low_cutoff=0.2, trim_zero=True, **kwargs):
"""
Compute the high-pass filtered euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
low_cutoff : float, optional
High-pass cutoff in Hz. Default is 0.2Hz.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
hfen : numpy.ndarray
(N, ) array of high-pass filtered and euclidean normed accelerations.
"""
sos = butter(4, 2 * low_cutoff / fs, btype="high", output="sos")
# no reason to for trimming zeros as the norm after the filter will always
# be positive
return moving_mean(norm(sosfiltfilt(sos, accel, axis=0), axis=1), wlen, wlen)
def metric_hfenplus(accel, wlen, fs, cutoff=0.2, trim_zero=True, **kwargs):
"""
Compute the high-pass filtered euclidean norm plus the low-pass filtered euclidean norm
minus 1g.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
cutoff : float, optional
Cutoff in Hz for both high and low filters. Default is 0.2Hz.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
hfenp : numpy.ndarray
(N, ) array of high-pass filtered acceleration norm added to the low-pass filtered
norm minus 1g.
"""
sos_low = butter(4, 2 * cutoff / fs, btype="low", output="sos")
sos_high = butter(4, 2 * cutoff / fs, btype="high", output="sos")
acc_high = norm(sosfiltfilt(sos_high, accel, axis=0), axis=1)
acc_low = norm(sosfiltfilt(sos_low, accel, axis=0), axis=1)
if trim_zero:
return moving_mean(maximum(acc_high + acc_low - 1, 0), wlen, wlen)
else:
return moving_mean(acc_high + acc_low - 1, wlen, wlen)
def metric_mad(accel, wlen, *args, **kwargs):
"""
Compute the Mean Amplitude Deviation metric for acceleration.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of accelerationes measured in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
mad : numpy.ndarray
(N, ) array of computed MAD values.
"""
acc_norm = norm(accel, axis=1)
r_avg = repeat(moving_mean(acc_norm, wlen, wlen), wlen)
mad = moving_mean(abs(acc_norm[: r_avg.size] - r_avg), wlen, wlen)
return mad | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/activity/metrics.py | metrics.py | from numpy import maximum, abs, repeat, arctan, sqrt, pi
from numpy.linalg import norm
from scipy.signal import butter, sosfiltfilt
from skdh.utility import moving_mean
__all__ = [
"metric_anglez",
"metric_en",
"metric_enmo",
"metric_bfen",
"metric_hfen",
"metric_hfenplus",
"metric_mad",
]
def metric_anglez(accel, wlen, *args, **kwargs):
"""
Compute the angle between the accelerometer z axis and the horizontal plane.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
anglez : numpy.ndarray
(N, ) array of angles between accelerometer z axis and horizontal plane in degrees.
"""
anglez = arctan(accel[:, 2] / sqrt(accel[:, 0] ** 2 + accel[:, 1] ** 2)) * (
180 / pi
)
return moving_mean(anglez, wlen, wlen)
def metric_en(accel, wlen, *args, **kwargs):
"""
Compute the euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
en : numpy.ndarray
(N, ) array of euclidean norms.
"""
return moving_mean(norm(accel, axis=1), wlen, wlen)
def metric_enmo(accel, wlen, *args, take_abs=False, trim_zero=True, **kwargs):
"""
Compute the euclidean norm minus 1. Works best when the accelerometer data has been calibrated
so that devices at rest measure acceleration norms of 1g.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
take_abs : bool, optional
Use the absolute value of the difference between euclidean norm and 1g. Default is False.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
enmo : numpy.ndarray
(N, ) array of euclidean norms minus 1.
"""
enmo = norm(accel, axis=1) - 1
if take_abs:
enmo = abs(enmo)
if trim_zero:
return moving_mean(maximum(enmo, 0), wlen, wlen)
else:
return moving_mean(enmo, wlen, wlen)
def metric_bfen(accel, wlen, fs, low_cutoff=0.2, high_cutoff=15, **kwargs):
"""
Compute the band-pass filtered euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
low_cutoff : float, optional
Band-pass low cutoff in Hz. Default is 0.2Hz.
high_cutoff : float, optional
Band-pass high cutoff in Hz. Default is 15Hz
Returns
-------
bfen : numpy.ndarray
(N, ) array of band-pass filtered and euclidean normed accelerations.
"""
sos = butter(
4, [2 * low_cutoff / fs, 2 * high_cutoff / fs], btype="bandpass", output="sos"
)
# no reason to for trimming zeros as the norm after the filter will always
# be positive
return moving_mean(norm(sosfiltfilt(sos, accel, axis=0), axis=1), wlen, wlen)
def metric_hfen(accel, wlen, fs, low_cutoff=0.2, trim_zero=True, **kwargs):
"""
Compute the high-pass filtered euclidean norm.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
low_cutoff : float, optional
High-pass cutoff in Hz. Default is 0.2Hz.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
hfen : numpy.ndarray
(N, ) array of high-pass filtered and euclidean normed accelerations.
"""
sos = butter(4, 2 * low_cutoff / fs, btype="high", output="sos")
# no reason to for trimming zeros as the norm after the filter will always
# be positive
return moving_mean(norm(sosfiltfilt(sos, accel, axis=0), axis=1), wlen, wlen)
def metric_hfenplus(accel, wlen, fs, cutoff=0.2, trim_zero=True, **kwargs):
"""
Compute the high-pass filtered euclidean norm plus the low-pass filtered euclidean norm
minus 1g.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration values in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
fs : float
Sampling frequency of `accel` in Hz.
cutoff : float, optional
Cutoff in Hz for both high and low filters. Default is 0.2Hz.
trim_zero : bool, optional
Trim values to no less than 0. Default is True.
Returns
-------
hfenp : numpy.ndarray
(N, ) array of high-pass filtered acceleration norm added to the low-pass filtered
norm minus 1g.
"""
sos_low = butter(4, 2 * cutoff / fs, btype="low", output="sos")
sos_high = butter(4, 2 * cutoff / fs, btype="high", output="sos")
acc_high = norm(sosfiltfilt(sos_high, accel, axis=0), axis=1)
acc_low = norm(sosfiltfilt(sos_low, accel, axis=0), axis=1)
if trim_zero:
return moving_mean(maximum(acc_high + acc_low - 1, 0), wlen, wlen)
else:
return moving_mean(acc_high + acc_low - 1, wlen, wlen)
def metric_mad(accel, wlen, *args, **kwargs):
"""
Compute the Mean Amplitude Deviation metric for acceleration.
Parameters
----------
accel : numpy.ndarray
(N, 3) array of accelerationes measured in g.
wlen : int
Window length (in number of samples) for non-overlapping windows.
Returns
-------
mad : numpy.ndarray
(N, ) array of computed MAD values.
"""
acc_norm = norm(accel, axis=1)
r_avg = repeat(moving_mean(acc_norm, wlen, wlen), wlen)
mad = moving_mean(abs(acc_norm[: r_avg.size] - r_avg), wlen, wlen)
return mad | 0.956166 | 0.623835 |
from warnings import warn
from numpy import vstack, asarray, int_
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
from skdh.io._extensions import read_geneactiv
class ReadBin(BaseProcess):
"""
Read a binary .bin file from a GeneActiv sensor into memory. Acceleration values are returned
in units of `g`. If providing a base and period value, included in the output will be the
indices to create windows starting at the `base` time, with a length of `period`.
Parameters
----------
bases : {None, int, list-like}, optional
Base hours [0, 23] in which to start a window of time. Default is None,
which will not do any windowing. Both `base` and `period` must be defined
in order to window. Can use multiple, but the number of `bases` must match
the number of `periods`.
periods : {None, int, list-like}, optional
Periods for each window, in [1, 24]. Defines the number of hours per window.
Default is None, which will do no windowing. Both `period` and `base` must
be defined to window. Can use multiple but the number of `periods` must
match the number of `bases`.
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.bin).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
Examples
========
Setup a reader with no windowing:
>>> reader = ReadBin()
>>> reader.predict('example.bin')
{'accel': ..., 'time': ...}
Setup a reader that does windowing between 8:00 AM and 8:00 PM (20:00):
>>> reader = ReadBin(bases=8, periods=12) # 8 + 12 = 20
>>> reader.predict('example.bin')
{'accel': ..., 'time': ..., 'day_ends': [130, 13951, ...]}
"""
def __init__(self, bases=None, periods=None, ext_error="warn"):
super().__init__(
# kwargs
bases=bases,
periods=periods,
ext_error=ext_error,
)
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
if (bases is None) and (periods is None):
self.window = False
self.bases = asarray([0]) # needs to be defined for passing to extensions
self.periods = asarray([12])
elif (bases is None) or (periods is None):
warn("One of base or period is None, not windowing", UserWarning)
self.window = False
self.bases = asarray([0])
self.periods = asarray([12])
else:
if isinstance(bases, int) and isinstance(periods, int):
bases = asarray([bases])
periods = asarray([periods])
else:
bases = asarray(bases, dtype=int_)
periods = asarray(periods, dtype=int_)
if ((0 <= bases) & (bases <= 23)).all() and (
(1 <= periods) & (periods <= 24)
).all():
self.window = True
self.bases = bases
self.periods = periods
else:
raise ValueError(
"Base must be in [0, 23] and period must be in [1, 23]"
)
@check_input_file(".bin")
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from the GeneActiv file
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be converted by
`str(file)`
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `time`: timestamps [s]
- `light`: light values [unknown]
- `temperature`: temperature [deg C]
- `day_ends`: window indices
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
# read the file
n_max, fs, acc, time, light, temp, starts, stops = read_geneactiv(
file, self.bases, self.periods
)
results = {
self._time: time[:n_max],
self._acc: acc[:n_max, :],
self._temp: temp[:n_max],
"light": light[:n_max],
"fs": fs,
"file": file,
}
if self.window:
results[self._days] = {}
for i, data in enumerate(zip(self.bases, self.periods)):
strt = starts[stops[:, i] != 0, i]
stp = stops[stops[:, i] != 0, i]
results[self._days][(data[0], data[1])] = vstack((strt, stp)).T
kwargs.update(results)
return (kwargs, None) if self._in_pipeline else kwargs | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/geneactiv.py | geneactiv.py | from warnings import warn
from numpy import vstack, asarray, int_
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
from skdh.io._extensions import read_geneactiv
class ReadBin(BaseProcess):
"""
Read a binary .bin file from a GeneActiv sensor into memory. Acceleration values are returned
in units of `g`. If providing a base and period value, included in the output will be the
indices to create windows starting at the `base` time, with a length of `period`.
Parameters
----------
bases : {None, int, list-like}, optional
Base hours [0, 23] in which to start a window of time. Default is None,
which will not do any windowing. Both `base` and `period` must be defined
in order to window. Can use multiple, but the number of `bases` must match
the number of `periods`.
periods : {None, int, list-like}, optional
Periods for each window, in [1, 24]. Defines the number of hours per window.
Default is None, which will do no windowing. Both `period` and `base` must
be defined to window. Can use multiple but the number of `periods` must
match the number of `bases`.
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.bin).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
Examples
========
Setup a reader with no windowing:
>>> reader = ReadBin()
>>> reader.predict('example.bin')
{'accel': ..., 'time': ...}
Setup a reader that does windowing between 8:00 AM and 8:00 PM (20:00):
>>> reader = ReadBin(bases=8, periods=12) # 8 + 12 = 20
>>> reader.predict('example.bin')
{'accel': ..., 'time': ..., 'day_ends': [130, 13951, ...]}
"""
def __init__(self, bases=None, periods=None, ext_error="warn"):
super().__init__(
# kwargs
bases=bases,
periods=periods,
ext_error=ext_error,
)
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
if (bases is None) and (periods is None):
self.window = False
self.bases = asarray([0]) # needs to be defined for passing to extensions
self.periods = asarray([12])
elif (bases is None) or (periods is None):
warn("One of base or period is None, not windowing", UserWarning)
self.window = False
self.bases = asarray([0])
self.periods = asarray([12])
else:
if isinstance(bases, int) and isinstance(periods, int):
bases = asarray([bases])
periods = asarray([periods])
else:
bases = asarray(bases, dtype=int_)
periods = asarray(periods, dtype=int_)
if ((0 <= bases) & (bases <= 23)).all() and (
(1 <= periods) & (periods <= 24)
).all():
self.window = True
self.bases = bases
self.periods = periods
else:
raise ValueError(
"Base must be in [0, 23] and period must be in [1, 23]"
)
@check_input_file(".bin")
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from the GeneActiv file
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be converted by
`str(file)`
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `time`: timestamps [s]
- `light`: light values [unknown]
- `temperature`: temperature [deg C]
- `day_ends`: window indices
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
# read the file
n_max, fs, acc, time, light, temp, starts, stops = read_geneactiv(
file, self.bases, self.periods
)
results = {
self._time: time[:n_max],
self._acc: acc[:n_max, :],
self._temp: temp[:n_max],
"light": light[:n_max],
"fs": fs,
"file": file,
}
if self.window:
results[self._days] = {}
for i, data in enumerate(zip(self.bases, self.periods)):
strt = starts[stops[:, i] != 0, i]
stp = stops[stops[:, i] != 0, i]
results[self._days][(data[0], data[1])] = vstack((strt, stp)).T
kwargs.update(results)
return (kwargs, None) if self._in_pipeline else kwargs | 0.920652 | 0.650883 |
from pathlib import Path
import functools
from warnings import warn
from skdh.io.utility import FileSizeError
def check_input_file(
extension,
check_size=True,
ext_message="File extension [{}] does not match expected [{}]",
):
"""
Check the input file for existence and suffix.
Parameters
----------
extension : str
Expected file suffix, eg '.abc'.
check_size : bool, optional
Check file size is over 1kb. Default is True.
ext_message : str, optional
Message to print if the suffix does not match. Should take 2 format arguments
('{}'), the first for the actual file suffix, and the second for the
expected suffix.
"""
def decorator_check_input_file(func):
@functools.wraps(func)
def wrapper_check_input_file(self, file=None, **kwargs):
# check if the file is provided
if file is None:
raise ValueError("`file` must not be None.")
# make a path instance for ease of use
pfile = Path(file)
# make sure the file exists
if not pfile.exists():
raise FileNotFoundError(f"File {file} does not exist.")
# check that the file matches the expected extension
if pfile.suffix != extension:
if self.ext_error == "warn":
warn(ext_message.format(pfile.suffix, extension), UserWarning)
elif self.ext_error == "raise":
raise ValueError(ext_message.format(pfile.suffix, extension))
elif self.ext_error == "skip":
kwargs.update({"file": str(file)})
return (kwargs, None) if self._in_pipeline else kwargs
# check file size if desired
if check_size:
if pfile.stat().st_size < 1000:
raise FileSizeError("File is less than 1kb, nothing to read.")
# cast to a string
file = str(file)
return func(self, file=file, **kwargs)
return wrapper_check_input_file
return decorator_check_input_file | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/base.py | base.py | from pathlib import Path
import functools
from warnings import warn
from skdh.io.utility import FileSizeError
def check_input_file(
extension,
check_size=True,
ext_message="File extension [{}] does not match expected [{}]",
):
"""
Check the input file for existence and suffix.
Parameters
----------
extension : str
Expected file suffix, eg '.abc'.
check_size : bool, optional
Check file size is over 1kb. Default is True.
ext_message : str, optional
Message to print if the suffix does not match. Should take 2 format arguments
('{}'), the first for the actual file suffix, and the second for the
expected suffix.
"""
def decorator_check_input_file(func):
@functools.wraps(func)
def wrapper_check_input_file(self, file=None, **kwargs):
# check if the file is provided
if file is None:
raise ValueError("`file` must not be None.")
# make a path instance for ease of use
pfile = Path(file)
# make sure the file exists
if not pfile.exists():
raise FileNotFoundError(f"File {file} does not exist.")
# check that the file matches the expected extension
if pfile.suffix != extension:
if self.ext_error == "warn":
warn(ext_message.format(pfile.suffix, extension), UserWarning)
elif self.ext_error == "raise":
raise ValueError(ext_message.format(pfile.suffix, extension))
elif self.ext_error == "skip":
kwargs.update({"file": str(file)})
return (kwargs, None) if self._in_pipeline else kwargs
# check file size if desired
if check_size:
if pfile.stat().st_size < 1000:
raise FileSizeError("File is less than 1kb, nothing to read.")
# cast to a string
file = str(file)
return func(self, file=file, **kwargs)
return wrapper_check_input_file
return decorator_check_input_file | 0.78535 | 0.273797 |
from numpy import load as np_load
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
class ReadNumpyFile(BaseProcess):
"""
Read a Numpy compressed file into memory. The file should have been
created by `numpy.savez`. The data contained is read in
unprocessed - ie acceleration is already assumed to be in units of
'g' and time in units of seconds. No day windowing is performed. Expected
keys are `time` and `accel`. If `fs` is present, it is used as well.
Parameters
----------
allow_pickle : bool, optional
Allow pickled objects in the NumPy file. Default is False, which is the safer option.
For more information see :py:meth:`numpy.load`.
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.npz).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
"""
def __init__(self, allow_pickle=False, ext_error="warn"):
super(ReadNumpyFile, self).__init__(
allow_pickle=allow_pickle, ext_error=ext_error
)
self.allow_pickle = allow_pickle
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
@check_input_file(".npz", check_size=True)
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from a numpy compressed file.
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be
converted by `str(file)`.
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `time`: timestamps [s]
- `fs`: sampling frequency in Hz.
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
with np_load(file, allow_pickle=self.allow_pickle) as data:
kwargs.update(data) # pull everything in
# make sure that fs is saved properly
if "fs" in data:
kwargs["fs"] = data["fs"][()]
# check that time and accel are in the correct names
if self._time not in kwargs or self._acc not in kwargs:
raise ValueError(
f"Missing `{self._time}` or `{self._acc}` arrays in the file"
)
# make sure we return the file
kwargs.update({"file": file})
return (kwargs, None) if self._in_pipeline else kwargs | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/numpy_compressed.py | numpy_compressed.py | from numpy import load as np_load
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
class ReadNumpyFile(BaseProcess):
"""
Read a Numpy compressed file into memory. The file should have been
created by `numpy.savez`. The data contained is read in
unprocessed - ie acceleration is already assumed to be in units of
'g' and time in units of seconds. No day windowing is performed. Expected
keys are `time` and `accel`. If `fs` is present, it is used as well.
Parameters
----------
allow_pickle : bool, optional
Allow pickled objects in the NumPy file. Default is False, which is the safer option.
For more information see :py:meth:`numpy.load`.
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.npz).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
"""
def __init__(self, allow_pickle=False, ext_error="warn"):
super(ReadNumpyFile, self).__init__(
allow_pickle=allow_pickle, ext_error=ext_error
)
self.allow_pickle = allow_pickle
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
@check_input_file(".npz", check_size=True)
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from a numpy compressed file.
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be
converted by `str(file)`.
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `time`: timestamps [s]
- `fs`: sampling frequency in Hz.
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
with np_load(file, allow_pickle=self.allow_pickle) as data:
kwargs.update(data) # pull everything in
# make sure that fs is saved properly
if "fs" in data:
kwargs["fs"] = data["fs"][()]
# check that time and accel are in the correct names
if self._time not in kwargs or self._acc not in kwargs:
raise ValueError(
f"Missing `{self._time}` or `{self._acc}` arrays in the file"
)
# make sure we return the file
kwargs.update({"file": file})
return (kwargs, None) if self._in_pipeline else kwargs | 0.870542 | 0.500977 |
from warnings import warn
from numpy import vstack, asarray, ascontiguousarray, minimum, int_
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
from skdh.io._extensions import read_axivity
class UnexpectedAxesError(Exception):
pass
class ReadCwa(BaseProcess):
"""
Read a binary CWA file from an axivity sensor into memory. Acceleration is return in units of
'g' while angular velocity (if available) is returned in units of `deg/s`. If providing a base
and period value, included in the output will be the indices to create windows starting at
the `base` time, with a length of `period`.
Parameters
----------
bases : {None, int}, optional
Base hour [0, 23] in which to start a window of time. Default is None, which
will not do any windowing. Both `base` and `period` must be defined in order
to window.
periods : {None, int}, optional
Period for each window, in [1, 24]. Defines the number of hours per window.
Default is None, which will do no windowing. Both `period` and `base` must
be defined to window
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.cwa).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
Examples
--------
Setup a reader with no windowing:
>>> reader = ReadCwa()
>>> reader.predict('example.cwa')
{'accel': ..., 'time': ..., ...}
Setup a reader that does windowing between 8:00 AM and 8:00 PM (20:00):
>>> reader = ReadCwa(bases=8, periods=12) # 8 + 12 = 20
>>> reader.predict('example.cwa')
{'accel': ..., 'time': ..., 'day_ends': [130, 13951, ...], ...}
"""
def __init__(self, bases=None, periods=None, ext_error="warn"):
super().__init__(
# kwargs
bases=bases,
periods=periods,
ext_error=ext_error,
)
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
if (bases is None) and (periods is None):
self.window = False
self.bases = asarray([0]) # needs to be defined for passing to extensions
self.periods = asarray([12])
elif (bases is None) or (periods is None):
warn("One of base or period is None, not windowing", UserWarning)
self.window = False
self.bases = asarray([0])
self.periods = asarray([12])
else:
if isinstance(bases, int) and isinstance(periods, int):
bases = asarray([bases])
periods = asarray([periods])
else:
bases = asarray(bases, dtype=int_)
periods = asarray(periods, dtype=int_)
if ((0 <= bases) & (bases <= 23)).all() and (
(1 <= periods) & (periods <= 24)
).all():
self.window = True
self.bases = bases
self.periods = periods
else:
raise ValueError(
"Base must be in [0, 23] and period must be in [1, 23]"
)
@check_input_file(".cwa")
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from the axivity file
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be converted by
`str(file)`
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
UnexpectedAxesError
If the number of axes returned is not 3, 6 or 9
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `gyro`: angular velocity [deg/s]
- `magnet`: magnetic field readings [uT]
- `time`: timestamps [s]
- `day_ends`: window indices
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
# read the file
fs, n_bad_samples, imudata, ts, temperature, starts, stops = read_axivity(
file, self.bases, self.periods
)
# end = None if n_bad_samples == 0 else -n_bad_samples
end = None
num_axes = imudata.shape[1]
gyr_axes = mag_axes = None
if num_axes == 3:
acc_axes = slice(None)
elif num_axes == 6:
gyr_axes = slice(3)
acc_axes = slice(3, 6)
elif num_axes == 9: # pragma: no cover :: don't have data to test this
gyr_axes = slice(3)
acc_axes = slice(3, 6)
mag_axes = slice(6, 9)
else: # pragma: no cover :: not expected to reach here only if file is corrupt
raise UnexpectedAxesError("Unexpected number of axes in the IMU data")
results = {
self._time: ts[:end],
"file": file,
"fs": fs,
self._temp: temperature[:end],
}
if acc_axes is not None:
results[self._acc] = ascontiguousarray(imudata[:end, acc_axes])
if gyr_axes is not None:
results[self._gyro] = ascontiguousarray(imudata[:end, gyr_axes])
if mag_axes is not None: # pragma: no cover :: don't have data to test this
results[self._mag] = ascontiguousarray(imudata[:end, mag_axes])
if self.window:
results[self._days] = {}
for i, data in enumerate(zip(self.bases, self.periods)):
strt = starts[stops[:, i] != 0, i]
stp = stops[stops[:, i] != 0, i]
results[self._days][(data[0], data[1])] = minimum(
vstack((strt, stp)).T, results[self._time].size - 1
)
kwargs.update(results)
return (kwargs, None) if self._in_pipeline else kwargs | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/io/axivity.py | axivity.py | from warnings import warn
from numpy import vstack, asarray, ascontiguousarray, minimum, int_
from skdh.base import BaseProcess
from skdh.io.base import check_input_file
from skdh.io._extensions import read_axivity
class UnexpectedAxesError(Exception):
pass
class ReadCwa(BaseProcess):
"""
Read a binary CWA file from an axivity sensor into memory. Acceleration is return in units of
'g' while angular velocity (if available) is returned in units of `deg/s`. If providing a base
and period value, included in the output will be the indices to create windows starting at
the `base` time, with a length of `period`.
Parameters
----------
bases : {None, int}, optional
Base hour [0, 23] in which to start a window of time. Default is None, which
will not do any windowing. Both `base` and `period` must be defined in order
to window.
periods : {None, int}, optional
Period for each window, in [1, 24]. Defines the number of hours per window.
Default is None, which will do no windowing. Both `period` and `base` must
be defined to window
ext_error : {"warn", "raise", "skip"}, optional
What to do if the file extension does not match the expected extension (.cwa).
Default is "warn". "raise" raises a ValueError. "skip" skips the file
reading altogether and attempts to continue with the pipeline.
Examples
--------
Setup a reader with no windowing:
>>> reader = ReadCwa()
>>> reader.predict('example.cwa')
{'accel': ..., 'time': ..., ...}
Setup a reader that does windowing between 8:00 AM and 8:00 PM (20:00):
>>> reader = ReadCwa(bases=8, periods=12) # 8 + 12 = 20
>>> reader.predict('example.cwa')
{'accel': ..., 'time': ..., 'day_ends': [130, 13951, ...], ...}
"""
def __init__(self, bases=None, periods=None, ext_error="warn"):
super().__init__(
# kwargs
bases=bases,
periods=periods,
ext_error=ext_error,
)
if ext_error.lower() in ["warn", "raise", "skip"]:
self.ext_error = ext_error.lower()
else:
raise ValueError("`ext_error` must be one of 'raise', 'warn', 'skip'.")
if (bases is None) and (periods is None):
self.window = False
self.bases = asarray([0]) # needs to be defined for passing to extensions
self.periods = asarray([12])
elif (bases is None) or (periods is None):
warn("One of base or period is None, not windowing", UserWarning)
self.window = False
self.bases = asarray([0])
self.periods = asarray([12])
else:
if isinstance(bases, int) and isinstance(periods, int):
bases = asarray([bases])
periods = asarray([periods])
else:
bases = asarray(bases, dtype=int_)
periods = asarray(periods, dtype=int_)
if ((0 <= bases) & (bases <= 23)).all() and (
(1 <= periods) & (periods <= 24)
).all():
self.window = True
self.bases = bases
self.periods = periods
else:
raise ValueError(
"Base must be in [0, 23] and period must be in [1, 23]"
)
@check_input_file(".cwa")
def predict(self, file=None, **kwargs):
"""
predict(file)
Read the data from the axivity file
Parameters
----------
file : {str, Path}
Path to the file to read. Must either be a string, or be able to be converted by
`str(file)`
Returns
-------
data : dict
Dictionary of the data contained in the file.
Raises
------
ValueError
If the file name is not provided
UnexpectedAxesError
If the number of axes returned is not 3, 6 or 9
Notes
-----
The keys in `data` depend on which data the file contained. Potential keys are:
- `accel`: acceleration [g]
- `gyro`: angular velocity [deg/s]
- `magnet`: magnetic field readings [uT]
- `time`: timestamps [s]
- `day_ends`: window indices
"""
super().predict(expect_days=False, expect_wear=False, file=file, **kwargs)
# read the file
fs, n_bad_samples, imudata, ts, temperature, starts, stops = read_axivity(
file, self.bases, self.periods
)
# end = None if n_bad_samples == 0 else -n_bad_samples
end = None
num_axes = imudata.shape[1]
gyr_axes = mag_axes = None
if num_axes == 3:
acc_axes = slice(None)
elif num_axes == 6:
gyr_axes = slice(3)
acc_axes = slice(3, 6)
elif num_axes == 9: # pragma: no cover :: don't have data to test this
gyr_axes = slice(3)
acc_axes = slice(3, 6)
mag_axes = slice(6, 9)
else: # pragma: no cover :: not expected to reach here only if file is corrupt
raise UnexpectedAxesError("Unexpected number of axes in the IMU data")
results = {
self._time: ts[:end],
"file": file,
"fs": fs,
self._temp: temperature[:end],
}
if acc_axes is not None:
results[self._acc] = ascontiguousarray(imudata[:end, acc_axes])
if gyr_axes is not None:
results[self._gyro] = ascontiguousarray(imudata[:end, gyr_axes])
if mag_axes is not None: # pragma: no cover :: don't have data to test this
results[self._mag] = ascontiguousarray(imudata[:end, mag_axes])
if self.window:
results[self._days] = {}
for i, data in enumerate(zip(self.bases, self.periods)):
strt = starts[stops[:, i] != 0, i]
stp = stops[stops[:, i] != 0, i]
results[self._days][(data[0], data[1])] = minimum(
vstack((strt, stp)).T, results[self._time].size - 1
)
kwargs.update(results)
return (kwargs, None) if self._in_pipeline else kwargs | 0.89796 | 0.60288 |
from sys import version_info
from numpy import isclose, where, diff, insert, append, ascontiguousarray, int_
from numpy.linalg import norm
from scipy.signal import butter, sosfiltfilt
import lightgbm as lgb
from skdh.utility import get_windowed_view
from skdh.utility.internal import rle
from skdh.features import Bank
if version_info >= (3, 7):
from importlib import resources
else: # pragma: no cover
import importlib_resources
def _resolve_path(mod, file):
if version_info >= (3, 7):
with resources.path(mod, file) as file_path:
path = file_path
else: # pragma: no cover
with importlib_resources.path(mod, file) as file_path:
path = file_path
return path
class DimensionMismatchError(Exception):
pass
def get_gait_classification_lgbm(gait_starts, gait_stops, accel, fs):
"""
Get classification of windows of accelerometer data using the LightGBM classifier
Parameters
----------
gait_starts : {None, numpy.ndarray}
Provided gait start indices.
gait_stops : {None, numpy.ndarray}
Provided gait stop indices.
accel : numpy.ndarray
(N, 3) array of acceleration values, in units of "g"
fs : float
Sampling frequency for the data
"""
if gait_starts is not None and gait_stops is not None:
return gait_starts, gait_stops
else:
if not isclose(fs, 50.0) and not isclose(fs, 20.0):
raise ValueError("fs must be either 50hz or 20hz.")
suffix = "50hz" if fs == 50.0 else "20hz"
wlen = int(fs * 3) # window length, 3 seconds
wstep = wlen # non-overlapping windows
thresh = 0.7 # mean + 1 stdev of best threshold for maximizing F1 score.
# used to try to minimized false positives
# band-pass filter
sos = butter(1, [2 * 0.25 / fs, 2 * 5 / fs], btype="band", output="sos")
accel_filt = ascontiguousarray(sosfiltfilt(sos, norm(accel, axis=1)))
# window, data will already be in c-contiguous layout
accel_w = get_windowed_view(accel_filt, wlen, wstep, ensure_c_contiguity=False)
# get the feature bank
feat_bank = Bank() # data is already windowed
feat_bank.load(_resolve_path("skdh.gait.model", "final_features.json"))
# compute the features
accel_feats = feat_bank.compute(accel_w, fs=fs, axis=1, index_axis=None)
# output shape is (18, 99), need to transpose when passing to classifier
# load the classification model
lgb_file = str(
_resolve_path(
"skdh.gait.model", f"lgbm_gait_classifier_no-stairs_{suffix}.lgbm"
)
)
bst = lgb.Booster(model_file=lgb_file)
# predict
gait_predictions = (
bst.predict(accel_feats.T, raw_score=False) > thresh
).astype(int_)
lengths, starts, vals = rle(gait_predictions)
bout_starts = starts[vals == 1]
bout_stops = bout_starts + lengths[vals == 1]
# convert to actual values that match up with data
bout_starts *= wstep
bout_stops = bout_stops * wstep + (
wlen - wstep
) # account for edges, if windows overlap
return bout_starts.astype("int"), bout_stops.astype("int") | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/get_gait_classification.py | get_gait_classification.py | from sys import version_info
from numpy import isclose, where, diff, insert, append, ascontiguousarray, int_
from numpy.linalg import norm
from scipy.signal import butter, sosfiltfilt
import lightgbm as lgb
from skdh.utility import get_windowed_view
from skdh.utility.internal import rle
from skdh.features import Bank
if version_info >= (3, 7):
from importlib import resources
else: # pragma: no cover
import importlib_resources
def _resolve_path(mod, file):
if version_info >= (3, 7):
with resources.path(mod, file) as file_path:
path = file_path
else: # pragma: no cover
with importlib_resources.path(mod, file) as file_path:
path = file_path
return path
class DimensionMismatchError(Exception):
pass
def get_gait_classification_lgbm(gait_starts, gait_stops, accel, fs):
"""
Get classification of windows of accelerometer data using the LightGBM classifier
Parameters
----------
gait_starts : {None, numpy.ndarray}
Provided gait start indices.
gait_stops : {None, numpy.ndarray}
Provided gait stop indices.
accel : numpy.ndarray
(N, 3) array of acceleration values, in units of "g"
fs : float
Sampling frequency for the data
"""
if gait_starts is not None and gait_stops is not None:
return gait_starts, gait_stops
else:
if not isclose(fs, 50.0) and not isclose(fs, 20.0):
raise ValueError("fs must be either 50hz or 20hz.")
suffix = "50hz" if fs == 50.0 else "20hz"
wlen = int(fs * 3) # window length, 3 seconds
wstep = wlen # non-overlapping windows
thresh = 0.7 # mean + 1 stdev of best threshold for maximizing F1 score.
# used to try to minimized false positives
# band-pass filter
sos = butter(1, [2 * 0.25 / fs, 2 * 5 / fs], btype="band", output="sos")
accel_filt = ascontiguousarray(sosfiltfilt(sos, norm(accel, axis=1)))
# window, data will already be in c-contiguous layout
accel_w = get_windowed_view(accel_filt, wlen, wstep, ensure_c_contiguity=False)
# get the feature bank
feat_bank = Bank() # data is already windowed
feat_bank.load(_resolve_path("skdh.gait.model", "final_features.json"))
# compute the features
accel_feats = feat_bank.compute(accel_w, fs=fs, axis=1, index_axis=None)
# output shape is (18, 99), need to transpose when passing to classifier
# load the classification model
lgb_file = str(
_resolve_path(
"skdh.gait.model", f"lgbm_gait_classifier_no-stairs_{suffix}.lgbm"
)
)
bst = lgb.Booster(model_file=lgb_file)
# predict
gait_predictions = (
bst.predict(accel_feats.T, raw_score=False) > thresh
).astype(int_)
lengths, starts, vals = rle(gait_predictions)
bout_starts = starts[vals == 1]
bout_stops = bout_starts + lengths[vals == 1]
# convert to actual values that match up with data
bout_starts *= wstep
bout_stops = bout_stops * wstep + (
wlen - wstep
) # account for edges, if windows overlap
return bout_starts.astype("int"), bout_stops.astype("int") | 0.551574 | 0.30767 |
from numpy import fft, argmax, std, abs, argsort, corrcoef, mean, sign
from scipy.signal import detrend, butter, sosfiltfilt, find_peaks
from scipy.integrate import cumtrapz
from pywt import cwt
from skdh.utility import correct_accelerometer_orientation
from skdh.gait.gait_endpoints import gait_endpoints
def get_cwt_scales(use_optimal_scale, vertical_velocity, original_scale, fs):
"""
Get the CWT scales for the IC and FC events.
Parameters
----------
use_optimal_scale : bool
Use the optimal scale based on step frequency.
vertical_velocity : numpy.ndarray
Vertical velocity, in units of "g".
original_scale : int
The original/default scale for the CWT.
fs : float
Sampling frequency, in Hz.
Returns
-------
scale1 : int
First scale for the CWT. For initial contact events.
scale2 : int
Second scale for the CWT. For final contact events.
"""
if use_optimal_scale:
coef_scale_original, _ = cwt(vertical_velocity, original_scale, "gaus1")
F = abs(fft.rfft(coef_scale_original[0]))
# compute an estimate of the step frequency
step_freq = argmax(F) / vertical_velocity.size * fs
# IC scale: -10 * sf + 56
# FC scale: -52 * sf + 131
# TODO verify the FC scale equation. This it not in the paper but is a
# guess from the graph
# original fs was 250hz, hence the conversion
scale1 = min(max(round((-10 * step_freq + 56) * (fs / 250)), 1), 90)
scale2 = min(max(round((-52 * step_freq + 131) * (fs / 250)), 1), 90)
# scale range is between 1 and 90
else:
scale1 = scale2 = original_scale
return scale1, scale2
def get_gait_events(
accel,
fs,
ts,
orig_scale,
filter_order,
filter_cutoff,
corr_accel_orient,
use_optimal_scale,
):
"""
Get the bouts of gait from the acceleration during a gait bout
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration during the gait bout.
fs : float
Sampling frequency for the acceleration.
ts : numpy.ndarray
Array of timestmaps (in seconds) corresponding to acceleration sampling times.
orig_scale : int
Original scale for the CWT.
filter_order : int
Low-pass filter order.
filter_cutoff : float
Low-pass filter cutoff in Hz.
corr_accel_orient : bool
Correct the accelerometer orientation.
use_optimal_scale : bool
Use the optimal scale based on step frequency.
Returns
-------
init_contact : numpy.ndarray
Indices of initial contacts
final_contact : numpy.ndarray
Indices of final contacts
vert_accel : numpy.ndarray
Filtered vertical acceleration
v_axis : int
The axis corresponding to the vertical acceleration
"""
assert accel.shape[0] == ts.size, "`vert_accel` and `ts` size must match"
# figure out vertical axis on a per-bout basis
acc_mean = mean(accel, axis=0)
v_axis = argmax(abs(acc_mean))
va_sign = sign(acc_mean[v_axis]) # sign of the vertical acceleration
# correct acceleration orientation if set
if corr_accel_orient:
# determine AP axis
ac = gait_endpoints._autocovariancefn(
accel, min(accel.shape[0] - 1, 1000), biased=True, axis=0
)
ap_axis = argsort(corrcoef(ac.T)[v_axis])[-2] # last is autocorrelation
accel = correct_accelerometer_orientation(accel, v_axis=v_axis, ap_axis=ap_axis)
vert_accel = detrend(accel[:, v_axis]) # detrend data just in case
# low-pass filter if we can
if 0 < (2 * filter_cutoff / fs) < 1:
sos = butter(filter_order, 2 * filter_cutoff / fs, btype="low", output="sos")
# multiply by 1 to ensure a copy and not a view
filt_vert_accel = sosfiltfilt(sos, vert_accel)
else:
filt_vert_accel = vert_accel * 1
# first integrate the vertical accel to get velocity
vert_velocity = cumtrapz(filt_vert_accel, x=ts - ts[0], initial=0)
# get the CWT scales
scale1, scale2 = get_cwt_scales(use_optimal_scale, vert_velocity, orig_scale, fs)
coef1, _ = cwt(vert_velocity, [scale1, scale2], "gaus1")
"""
Find the local minima in the signal. This should technically always require using
the negative signal in "find_peaks", however the way PyWavelets computes the
CWT results in the opposite signal that we want.
Therefore, if the sign of the acceleration was negative, we need to use the
positve coefficient signal, and opposite for positive acceleration reading.
"""
init_contact, *_ = find_peaks(-va_sign * coef1[0], height=0.5 * std(coef1[0]))
coef2, _ = cwt(coef1[1], scale2, "gaus1")
"""
Peaks are the final contact points
Same issue as above
"""
final_contact, *_ = find_peaks(-va_sign * coef2[0], height=0.5 * std(coef2[0]))
return init_contact, final_contact, filt_vert_accel, v_axis | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/get_gait_events.py | get_gait_events.py | from numpy import fft, argmax, std, abs, argsort, corrcoef, mean, sign
from scipy.signal import detrend, butter, sosfiltfilt, find_peaks
from scipy.integrate import cumtrapz
from pywt import cwt
from skdh.utility import correct_accelerometer_orientation
from skdh.gait.gait_endpoints import gait_endpoints
def get_cwt_scales(use_optimal_scale, vertical_velocity, original_scale, fs):
"""
Get the CWT scales for the IC and FC events.
Parameters
----------
use_optimal_scale : bool
Use the optimal scale based on step frequency.
vertical_velocity : numpy.ndarray
Vertical velocity, in units of "g".
original_scale : int
The original/default scale for the CWT.
fs : float
Sampling frequency, in Hz.
Returns
-------
scale1 : int
First scale for the CWT. For initial contact events.
scale2 : int
Second scale for the CWT. For final contact events.
"""
if use_optimal_scale:
coef_scale_original, _ = cwt(vertical_velocity, original_scale, "gaus1")
F = abs(fft.rfft(coef_scale_original[0]))
# compute an estimate of the step frequency
step_freq = argmax(F) / vertical_velocity.size * fs
# IC scale: -10 * sf + 56
# FC scale: -52 * sf + 131
# TODO verify the FC scale equation. This it not in the paper but is a
# guess from the graph
# original fs was 250hz, hence the conversion
scale1 = min(max(round((-10 * step_freq + 56) * (fs / 250)), 1), 90)
scale2 = min(max(round((-52 * step_freq + 131) * (fs / 250)), 1), 90)
# scale range is between 1 and 90
else:
scale1 = scale2 = original_scale
return scale1, scale2
def get_gait_events(
accel,
fs,
ts,
orig_scale,
filter_order,
filter_cutoff,
corr_accel_orient,
use_optimal_scale,
):
"""
Get the bouts of gait from the acceleration during a gait bout
Parameters
----------
accel : numpy.ndarray
(N, 3) array of acceleration during the gait bout.
fs : float
Sampling frequency for the acceleration.
ts : numpy.ndarray
Array of timestmaps (in seconds) corresponding to acceleration sampling times.
orig_scale : int
Original scale for the CWT.
filter_order : int
Low-pass filter order.
filter_cutoff : float
Low-pass filter cutoff in Hz.
corr_accel_orient : bool
Correct the accelerometer orientation.
use_optimal_scale : bool
Use the optimal scale based on step frequency.
Returns
-------
init_contact : numpy.ndarray
Indices of initial contacts
final_contact : numpy.ndarray
Indices of final contacts
vert_accel : numpy.ndarray
Filtered vertical acceleration
v_axis : int
The axis corresponding to the vertical acceleration
"""
assert accel.shape[0] == ts.size, "`vert_accel` and `ts` size must match"
# figure out vertical axis on a per-bout basis
acc_mean = mean(accel, axis=0)
v_axis = argmax(abs(acc_mean))
va_sign = sign(acc_mean[v_axis]) # sign of the vertical acceleration
# correct acceleration orientation if set
if corr_accel_orient:
# determine AP axis
ac = gait_endpoints._autocovariancefn(
accel, min(accel.shape[0] - 1, 1000), biased=True, axis=0
)
ap_axis = argsort(corrcoef(ac.T)[v_axis])[-2] # last is autocorrelation
accel = correct_accelerometer_orientation(accel, v_axis=v_axis, ap_axis=ap_axis)
vert_accel = detrend(accel[:, v_axis]) # detrend data just in case
# low-pass filter if we can
if 0 < (2 * filter_cutoff / fs) < 1:
sos = butter(filter_order, 2 * filter_cutoff / fs, btype="low", output="sos")
# multiply by 1 to ensure a copy and not a view
filt_vert_accel = sosfiltfilt(sos, vert_accel)
else:
filt_vert_accel = vert_accel * 1
# first integrate the vertical accel to get velocity
vert_velocity = cumtrapz(filt_vert_accel, x=ts - ts[0], initial=0)
# get the CWT scales
scale1, scale2 = get_cwt_scales(use_optimal_scale, vert_velocity, orig_scale, fs)
coef1, _ = cwt(vert_velocity, [scale1, scale2], "gaus1")
"""
Find the local minima in the signal. This should technically always require using
the negative signal in "find_peaks", however the way PyWavelets computes the
CWT results in the opposite signal that we want.
Therefore, if the sign of the acceleration was negative, we need to use the
positve coefficient signal, and opposite for positive acceleration reading.
"""
init_contact, *_ = find_peaks(-va_sign * coef1[0], height=0.5 * std(coef1[0]))
coef2, _ = cwt(coef1[1], scale2, "gaus1")
"""
Peaks are the final contact points
Same issue as above
"""
final_contact, *_ = find_peaks(-va_sign * coef2[0], height=0.5 * std(coef2[0]))
return init_contact, final_contact, filt_vert_accel, v_axis | 0.870625 | 0.564399 |
from numpy import (
max,
min,
mean,
arccos,
sum,
array,
sin,
cos,
full,
nan,
arctan2,
unwrap,
pi,
sign,
diff,
abs,
zeros,
cross,
)
from numpy.linalg import norm
from skdh.utility.internal import rle
def get_turns(gait, accel, gyro, fs, n_strides):
"""
Get the location of turns, to indicate if steps occur during a turn.
Parameters
----------
gait : dictionary
Dictionary of gait values needed for computation or the results.
accel : numpy.ndarray
Acceleration in units of 'g', for the current gait bout.
gyro : numpy.ndarray
Angular velocity in units of 'rad/s', for the current gait bout.
fs : float
Sampling frequency, in Hz.
n_strides : int
Number of strides in the current gait bout.
Notes
-----
Values indicate turns as follows:
- -1: Turns not detected (lacking angular velocity data)
- 0: No turn found
- 1: Turn overlaps with either Initial or Final contact
- 2: Turn overlaps with both Initial and Final contact
References
----------
.. [1] M. H. Pham et al., “Algorithm for Turning Detection and Analysis
Validated under Home-Like Conditions in Patients with Parkinson’s Disease
and Older Adults using a 6 Degree-of-Freedom Inertial Measurement Unit at
the Lower Back,” Front. Neurol., vol. 8, Apr. 2017,
doi: 10.3389/fneur.2017.00135.
"""
# first check if we can detect turns
if gyro is None or n_strides < 1:
gait["Turn"].extend([-1] * n_strides)
return
# get the first available still period to start the yaw tracking
n = int(0.05 * fs) # number of samples to use for still period
min_slice = None
for i in range(int(2 * fs)):
tmp = norm(accel[i : i + n], axis=1)
acc_range = max(tmp) - min(tmp)
if acc_range < (0.2 / 9.81): # range defined by the Pham paper
min_slice = accel[i : i + n]
break
if min_slice is None:
min_slice = accel[:n]
# compute the mean value over that time frame
acc_init = mean(min_slice, axis=0)
# compute the initial angle between this vector and global frame
phi = arccos(sum(acc_init * array([0, 0, 1])) / norm(acc_init))
# create the rotation matrix/rotations from sensor frame to global frame
gsZ = array([sin(phi), cos(phi), 0.0])
gsX = array([1.0, 0.0, 0.0])
gsY = cross(gsZ, gsX)
gsY /= norm(gsY)
gsX = cross(gsY, gsZ)
gsX /= norm(gsX)
gsR = array([gsX, gsY, gsZ])
# iterate over the gait bout
alpha = full(gyro.shape[0], nan) # allocate the yaw angle around vertical axis
alpha[0] = arctan2(gsR[2, 0], gsR[1, 0])
for i in range(1, gyro.shape[0]):
theta = norm(gyro[i]) / fs
c = cos(theta)
s = sin(theta)
t = 1 - c
wx = gyro[i, 0]
wy = gyro[i, 1]
wz = gyro[i, 2]
update_R = array(
[
[t * wx**2 + c, t * wx * wy + s * wz, t * wx * wz - s * wy],
[t * wx * wy - s * wz, t * wy**2 + c, t * wy * wz + s * wx],
[t * wx * wz + s * wy, t * wy * wz - s * wx, t * wz**2 + c],
]
)
gsR = update_R @ gsR
alpha[i] = arctan2(gsR[2, 0], gsR[1, 0])
# unwrap the angle so there are no discontinuities
alpha = unwrap(alpha, period=pi)
# get the sign of the difference as initial turn indication
turns = sign(diff(alpha))
# get the angles of the turns
lengths, starts, values = rle(turns == 1)
turn_angles = abs(alpha[starts + lengths] - alpha[starts])
# find hesitations in turns
mask = (lengths / fs) < 0.5 # less than half a second
mask[1:-1] &= turn_angles[:-2] >= (pi / 180 * 10) # adjacent turns > 10 degrees
mask[1:-1] &= turn_angles[2:] >= (pi / 180 * 10)
# one adjacent turn greater than 45 degrees
mask[1:-1] &= (turn_angles[:-2] > pi / 4) | (turn_angles[2:] >= pi / 4)
# magnitude of hesitation less than 10% of turn angle
mask[1:-1] = turn_angles[1:-1] < (0.1 * (turn_angles[:-2] + turn_angles[2:]))
# set hesitation turns to match surrounding
for l, s in zip(lengths[mask], starts[mask]):
turns[s : s + l] = turns[s - 1]
# enforce the time limit (0.1 - 10s) and angle limit (90 deg)
lengths, starts, values = rle(turns == 1)
mask = abs(alpha[starts + lengths] - alpha[starts]) < (pi / 2) # exclusion mask
mask |= ((lengths / fs) < 0.1) & ((lengths / fs) > 10)
for l, s in zip(lengths[mask], starts[mask]):
turns[s : s + l] = 0
# final list of turns
lengths, starts, values = rle(turns != 0)
# mask for strides in turn
in_turn = zeros(n_strides, dtype="int")
for d, s in zip(lengths[values == 1], starts[values == 1]):
in_turn += (gait["IC"][-n_strides:] > s) & (gait["IC"][-n_strides:] < (s + d))
in_turn += (gait["FC"][-n_strides:] > s) & (gait["FC"][-n_strides:] < (s + d))
gait["Turn"].extend(in_turn) | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/get_turns.py | get_turns.py | from numpy import (
max,
min,
mean,
arccos,
sum,
array,
sin,
cos,
full,
nan,
arctan2,
unwrap,
pi,
sign,
diff,
abs,
zeros,
cross,
)
from numpy.linalg import norm
from skdh.utility.internal import rle
def get_turns(gait, accel, gyro, fs, n_strides):
"""
Get the location of turns, to indicate if steps occur during a turn.
Parameters
----------
gait : dictionary
Dictionary of gait values needed for computation or the results.
accel : numpy.ndarray
Acceleration in units of 'g', for the current gait bout.
gyro : numpy.ndarray
Angular velocity in units of 'rad/s', for the current gait bout.
fs : float
Sampling frequency, in Hz.
n_strides : int
Number of strides in the current gait bout.
Notes
-----
Values indicate turns as follows:
- -1: Turns not detected (lacking angular velocity data)
- 0: No turn found
- 1: Turn overlaps with either Initial or Final contact
- 2: Turn overlaps with both Initial and Final contact
References
----------
.. [1] M. H. Pham et al., “Algorithm for Turning Detection and Analysis
Validated under Home-Like Conditions in Patients with Parkinson’s Disease
and Older Adults using a 6 Degree-of-Freedom Inertial Measurement Unit at
the Lower Back,” Front. Neurol., vol. 8, Apr. 2017,
doi: 10.3389/fneur.2017.00135.
"""
# first check if we can detect turns
if gyro is None or n_strides < 1:
gait["Turn"].extend([-1] * n_strides)
return
# get the first available still period to start the yaw tracking
n = int(0.05 * fs) # number of samples to use for still period
min_slice = None
for i in range(int(2 * fs)):
tmp = norm(accel[i : i + n], axis=1)
acc_range = max(tmp) - min(tmp)
if acc_range < (0.2 / 9.81): # range defined by the Pham paper
min_slice = accel[i : i + n]
break
if min_slice is None:
min_slice = accel[:n]
# compute the mean value over that time frame
acc_init = mean(min_slice, axis=0)
# compute the initial angle between this vector and global frame
phi = arccos(sum(acc_init * array([0, 0, 1])) / norm(acc_init))
# create the rotation matrix/rotations from sensor frame to global frame
gsZ = array([sin(phi), cos(phi), 0.0])
gsX = array([1.0, 0.0, 0.0])
gsY = cross(gsZ, gsX)
gsY /= norm(gsY)
gsX = cross(gsY, gsZ)
gsX /= norm(gsX)
gsR = array([gsX, gsY, gsZ])
# iterate over the gait bout
alpha = full(gyro.shape[0], nan) # allocate the yaw angle around vertical axis
alpha[0] = arctan2(gsR[2, 0], gsR[1, 0])
for i in range(1, gyro.shape[0]):
theta = norm(gyro[i]) / fs
c = cos(theta)
s = sin(theta)
t = 1 - c
wx = gyro[i, 0]
wy = gyro[i, 1]
wz = gyro[i, 2]
update_R = array(
[
[t * wx**2 + c, t * wx * wy + s * wz, t * wx * wz - s * wy],
[t * wx * wy - s * wz, t * wy**2 + c, t * wy * wz + s * wx],
[t * wx * wz + s * wy, t * wy * wz - s * wx, t * wz**2 + c],
]
)
gsR = update_R @ gsR
alpha[i] = arctan2(gsR[2, 0], gsR[1, 0])
# unwrap the angle so there are no discontinuities
alpha = unwrap(alpha, period=pi)
# get the sign of the difference as initial turn indication
turns = sign(diff(alpha))
# get the angles of the turns
lengths, starts, values = rle(turns == 1)
turn_angles = abs(alpha[starts + lengths] - alpha[starts])
# find hesitations in turns
mask = (lengths / fs) < 0.5 # less than half a second
mask[1:-1] &= turn_angles[:-2] >= (pi / 180 * 10) # adjacent turns > 10 degrees
mask[1:-1] &= turn_angles[2:] >= (pi / 180 * 10)
# one adjacent turn greater than 45 degrees
mask[1:-1] &= (turn_angles[:-2] > pi / 4) | (turn_angles[2:] >= pi / 4)
# magnitude of hesitation less than 10% of turn angle
mask[1:-1] = turn_angles[1:-1] < (0.1 * (turn_angles[:-2] + turn_angles[2:]))
# set hesitation turns to match surrounding
for l, s in zip(lengths[mask], starts[mask]):
turns[s : s + l] = turns[s - 1]
# enforce the time limit (0.1 - 10s) and angle limit (90 deg)
lengths, starts, values = rle(turns == 1)
mask = abs(alpha[starts + lengths] - alpha[starts]) < (pi / 2) # exclusion mask
mask |= ((lengths / fs) < 0.1) & ((lengths / fs) > 10)
for l, s in zip(lengths[mask], starts[mask]):
turns[s : s + l] = 0
# final list of turns
lengths, starts, values = rle(turns != 0)
# mask for strides in turn
in_turn = zeros(n_strides, dtype="int")
for d, s in zip(lengths[values == 1], starts[values == 1]):
in_turn += (gait["IC"][-n_strides:] > s) & (gait["IC"][-n_strides:] < (s + d))
in_turn += (gait["FC"][-n_strides:] > s) & (gait["FC"][-n_strides:] < (s + d))
gait["Turn"].extend(in_turn) | 0.835651 | 0.590455 |
import functools
import logging
from numpy import zeros, roll, full, nan, bool_, float_
def basic_asymmetry(f):
@functools.wraps(f)
def run_basic_asymmetry(self, *args, **kwargs):
f(self, *args, **kwargs)
self._predict_asymmetry(*args, **kwargs)
return run_basic_asymmetry
class GaitBoutEndpoint:
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __init__(self, name, logname, depends=None):
"""
Bout level endpoint base class
Parameters
----------
name : str
Name of the endpoint
depends : Iterable
Any other endpoints that are required to be computed beforehand
"""
self.name = name
self.logger = logging.getLogger(logname)
self.k_ = f"BOUTPARAM:{self.name}"
self._depends = depends
def predict(self, fs, leg_length, gait, gait_aux):
"""
Predict the bout level gait endpoint
Parameters
----------
fs : float
Sampling frequency in Hz
leg_length : {None, float}
Leg length in meters
gait : dict
Dictionary of gait items and results. Modified in place to add the endpoint being
calculated
gait_aux : dict
Dictionary of acceleration, velocity, and position data for bouts, and the mapping
from step to bout and inertial data
"""
if self.k_ in gait:
return
if self._depends is not None:
for param in self._depends:
param().predict(fs, leg_length, gait, gait_aux)
self._predict(fs, leg_length, gait, gait_aux)
class GaitEventEndpoint:
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __init__(self, name, logname, depends=None):
"""
Gait endpoint base class
Parameters
----------
name : str
Name of the endpoint
"""
self.name = name
self.logger = logging.getLogger(logname)
self.k_ = f"PARAM:{self.name}"
self._depends = depends
@staticmethod
def _get_mask(gait, offset):
if offset not in [1, 2]:
raise ValueError("invalid offset")
mask = zeros(gait["IC"].size, dtype=bool_)
mask[:-offset] = (gait["Bout N"][offset:] - gait["Bout N"][:-offset]) == 0
# account for non-continuous gait bouts
mask &= gait["forward cycles"] >= offset
return mask
def predict(self, fs, leg_length, gait, gait_aux):
"""
Predict the gait event-level endpoint
Parameters
----------
fs : float
Sampling frequency in Hz
leg_length : {None, float}
Leg length in meters
gait : dict
Dictionary of gait items and results. Modified in place to add the endpoint being
calculated
gait_aux : dict
Dictionary of acceleration, velocity, and position data for bouts, and the mapping
from step to bout and inertial data
"""
if self.k_ in gait:
return
if self._depends is not None:
for param in self._depends:
param().predict(fs, leg_length, gait, gait_aux)
self._predict(fs, leg_length, gait, gait_aux)
def _predict_asymmetry(self, dt, leg_length, gait, gait_aux):
asy_name = f"{self.k_} asymmetry"
gait[asy_name] = full(gait["IC"].size, nan, dtype=float_)
mask = self._get_mask(gait, 1)
mask_ofst = roll(mask, 1)
gait[asy_name][mask] = gait[self.k_][mask_ofst] - gait[self.k_][mask]
def _predict_init(self, gait, init=True, offset=None):
if init:
gait[self.k_] = full(gait["IC"].size, nan, dtype=float_)
if offset is not None:
mask = self._get_mask(gait, offset)
mask_ofst = roll(mask, offset)
return mask, mask_ofst | scikit-digital-health | /scikit_digital_health-0.11.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/skdh/gait/gait_endpoints/base.py | base.py | import functools
import logging
from numpy import zeros, roll, full, nan, bool_, float_
def basic_asymmetry(f):
@functools.wraps(f)
def run_basic_asymmetry(self, *args, **kwargs):
f(self, *args, **kwargs)
self._predict_asymmetry(*args, **kwargs)
return run_basic_asymmetry
class GaitBoutEndpoint:
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __init__(self, name, logname, depends=None):
"""
Bout level endpoint base class
Parameters
----------
name : str
Name of the endpoint
depends : Iterable
Any other endpoints that are required to be computed beforehand
"""
self.name = name
self.logger = logging.getLogger(logname)
self.k_ = f"BOUTPARAM:{self.name}"
self._depends = depends
def predict(self, fs, leg_length, gait, gait_aux):
"""
Predict the bout level gait endpoint
Parameters
----------
fs : float
Sampling frequency in Hz
leg_length : {None, float}
Leg length in meters
gait : dict
Dictionary of gait items and results. Modified in place to add the endpoint being
calculated
gait_aux : dict
Dictionary of acceleration, velocity, and position data for bouts, and the mapping
from step to bout and inertial data
"""
if self.k_ in gait:
return
if self._depends is not None:
for param in self._depends:
param().predict(fs, leg_length, gait, gait_aux)
self._predict(fs, leg_length, gait, gait_aux)
class GaitEventEndpoint:
def __str__(self):
return self.name
def __repr__(self):
return self.name
def __init__(self, name, logname, depends=None):
"""
Gait endpoint base class
Parameters
----------
name : str
Name of the endpoint
"""
self.name = name
self.logger = logging.getLogger(logname)
self.k_ = f"PARAM:{self.name}"
self._depends = depends
@staticmethod
def _get_mask(gait, offset):
if offset not in [1, 2]:
raise ValueError("invalid offset")
mask = zeros(gait["IC"].size, dtype=bool_)
mask[:-offset] = (gait["Bout N"][offset:] - gait["Bout N"][:-offset]) == 0
# account for non-continuous gait bouts
mask &= gait["forward cycles"] >= offset
return mask
def predict(self, fs, leg_length, gait, gait_aux):
"""
Predict the gait event-level endpoint
Parameters
----------
fs : float
Sampling frequency in Hz
leg_length : {None, float}
Leg length in meters
gait : dict
Dictionary of gait items and results. Modified in place to add the endpoint being
calculated
gait_aux : dict
Dictionary of acceleration, velocity, and position data for bouts, and the mapping
from step to bout and inertial data
"""
if self.k_ in gait:
return
if self._depends is not None:
for param in self._depends:
param().predict(fs, leg_length, gait, gait_aux)
self._predict(fs, leg_length, gait, gait_aux)
def _predict_asymmetry(self, dt, leg_length, gait, gait_aux):
asy_name = f"{self.k_} asymmetry"
gait[asy_name] = full(gait["IC"].size, nan, dtype=float_)
mask = self._get_mask(gait, 1)
mask_ofst = roll(mask, 1)
gait[asy_name][mask] = gait[self.k_][mask_ofst] - gait[self.k_][mask]
def _predict_init(self, gait, init=True, offset=None):
if init:
gait[self.k_] = full(gait["IC"].size, nan, dtype=float_)
if offset is not None:
mask = self._get_mask(gait, offset)
mask_ofst = roll(mask, offset)
return mask, mask_ofst | 0.899953 | 0.286144 |
[![Build status](https://ci.appveyor.com/api/projects/status/tvumlfad69g6ap3u/branch/master?svg=true)](https://ci.appveyor.com/project/j-bac/scikit-dimension/branch/master)
[![CircleCI](https://circleci.com/gh/scikit-learn-contrib/scikit-dimension/tree/master.svg?style=shield)](https://app.circleci.com/pipelines/github/scikit-learn-contrib/scikit-dimension)
[![Documentation Status](https://readthedocs.org/projects/scikit-dimension/badge/?version=latest)](https://scikit-dimension.readthedocs.io/en/latest/?badge=latest)
[![codecov](https://codecov.io/gh/j-bac/scikit-dimension/branch/master/graph/badge.svg)](https://codecov.io/gh/j-bac/scikit-dimension)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/j-bac/scikit-dimension.svg?logo=lgtm&logoWidth=18&label=%20code%20quality)](https://lgtm.com/projects/g/j-bac/scikit-dimension/context:python)
[![GitHub license](https://img.shields.io/github/license/j-bac/scikit-dimension)](https://github.com/j-bac/scikit-dimension/blob/master/LICENSE)
[![Downloads](https://pepy.tech/badge/scikit-dimension)](https://pepy.tech/project/scikit-dimension)
# scikit-dimension
scikit-dimension is a Python module for intrinsic dimension estimation built according to the [scikit-learn](https://github.com/scikit-learn/scikit-learn) API and distributed under the 3-Clause BSD license.
Please refer to the [documentation](https://scikit-dimension.readthedocs.io) and the [paper](https://www.mdpi.com/1099-4300/23/10/1368) for detailed API, examples and references
### Installation
Using pip:
```bash
pip install scikit-dimension
```
From source:
```bash
git clone https://github.com/j-bac/scikit-dimension
cd scikit-dimension
pip install .
```
### Quick start
Local and global estimators can be used in this way:
```python
import skdim
import numpy as np
#generate data : np.array (n_points x n_dim). Here a uniformly sampled 5-ball embedded in 10 dimensions
data = np.zeros((1000,10))
data[:,:5] = skdim.datasets.hyperBall(n = 1000, d = 5, radius = 1, random_state = 0)
#estimate global intrinsic dimension
danco = skdim.id.DANCo().fit(data)
#estimate local intrinsic dimension (dimension in k-nearest-neighborhoods around each point):
lpca = skdim.id.lPCA().fit_pw(data,
n_neighbors = 100,
n_jobs = 1)
#get estimated intrinsic dimension
print(danco.dimension_, np.mean(lpca.dimension_pw_))
```
| scikit-dimension | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/README.md | README.md | pip install scikit-dimension
git clone https://github.com/j-bac/scikit-dimension
cd scikit-dimension
pip install .
import skdim
import numpy as np
#generate data : np.array (n_points x n_dim). Here a uniformly sampled 5-ball embedded in 10 dimensions
data = np.zeros((1000,10))
data[:,:5] = skdim.datasets.hyperBall(n = 1000, d = 5, radius = 1, random_state = 0)
#estimate global intrinsic dimension
danco = skdim.id.DANCo().fit(data)
#estimate local intrinsic dimension (dimension in k-nearest-neighborhoods around each point):
lpca = skdim.id.lPCA().fit_pw(data,
n_neighbors = 100,
n_jobs = 1)
#get estimated intrinsic dimension
print(danco.dimension_, np.mean(lpca.dimension_pw_)) | 0.659734 | 0.869548 |
import numpy as np
from .._commonfuncs import LocalEstimator
from scipy.spatial.distance import pdist, squareform
class TLE(LocalEstimator):
"""Intrinsic dimension estimation using the Tight Local intrinsic dimensionality Estimator algorithm. [Amsaleg2019]_ [IDRadovanović]_
Parameters
----------
epsilon: float
"""
_N_NEIGHBORS = 20
def __init__(
self, epsilon=1e-4,
):
self.epsilon = epsilon
def _fit(self, X, dists, knnidx):
self.dimension_pw_ = np.zeros(len(X))
for i in range(len(X)):
self.dimension_pw_[i] = self._idtle(X[knnidx[i, :]], dists[[i], :])
def _idtle(self, nn, dists):
# nn - matrix of nearest neighbors (n_neighbors x d), sorted by distance
# dists - nearest-neighbor distances (1 x n_neighbors), sorted
r = dists[0, -1] # distance to n_neighbors-th neighbor
# Boundary case 1: If $r = 0$, this is fatal, since the neighborhood would be degenerate.
if r == 0:
raise ValueError("All k-NN distances are zero!")
# Main computation
n_neighbors = dists.shape[1]
V = squareform(pdist(nn))
Di = np.tile(dists.T, (1, n_neighbors))
Dj = Di.T
Z2 = 2 * Di ** 2 + 2 * Dj ** 2 - V ** 2
S = (
r
* (
((Di ** 2 + V ** 2 - Dj ** 2) ** 2 + 4 * V ** 2 * (r ** 2 - Di ** 2))
** 0.5
- (Di ** 2 + V ** 2 - Dj ** 2)
)
/ (2 * (r ** 2 - Di ** 2))
)
T = (
r
* (
((Di ** 2 + Z2 - Dj ** 2) ** 2 + 4 * Z2 * (r ** 2 - Di ** 2)) ** 0.5
- (Di ** 2 + Z2 - Dj ** 2)
)
/ (2 * (r ** 2 - Di ** 2))
)
# handle case of repeating k-NN distances
Dr = (dists == r).squeeze()
S[Dr, :] = r * V[Dr, :] ** 2 / (r ** 2 + V[Dr, :] ** 2 - Dj[Dr, :] ** 2)
T[Dr, :] = r * Z2[Dr, :] / (r ** 2 + Z2[Dr, :] - Dj[Dr, :] ** 2)
# Boundary case 2: If $u_i = 0$, then for all $1\leq j\leq n_neighbors$ the measurements $s_{ij}$ and $t_{ij}$ reduce to $u_j$.
Di0 = (Di == 0).squeeze()
T[Di0] = Dj[Di0]
S[Di0] = Dj[Di0]
# Boundary case 3: If $u_j = 0$, then for all $1\leq j\leq n_neighbors$ the measurements $s_{ij}$ and $t_{ij}$ reduce to $\frac{r v_{ij}}{r + v_{ij}}$.
Dj0 = (Dj == 0).squeeze()
T[Dj0] = r * V[Dj0] / (r + V[Dj0])
S[Dj0] = r * V[Dj0] / (r + V[Dj0])
# Boundary case 4: If $v_{ij} = 0$, then the measurement $s_{ij}$ is zero and must be dropped. The measurement $t_{ij}$ should be dropped as well.
V0 = (V == 0).squeeze()
np.fill_diagonal(V0, False)
# by setting to r, $t_{ij}$ will not contribute to the sum s1t
T[V0] = r
# by setting to r, $s_{ij}$ will not contribute to the sum s1s
S[V0] = r
# will subtract twice this number during ID computation below
nV0 = np.sum(V0)
# Drop T & S measurements below epsilon (V4: If $s_{ij}$ is thrown out, then for the sake of balance, $t_{ij}$ should be thrown out as well (or vice versa).)
TSeps = (T < self.epsilon) | (S < self.epsilon)
np.fill_diagonal(TSeps, 0)
nTSeps = np.sum(TSeps)
T[TSeps] = r
T = np.log(T / r)
S[TSeps] = r
S = np.log(S / r)
np.fill_diagonal(T, 0) # delete diagonal elements
np.fill_diagonal(S, 0)
# Sum over the whole matrices
s1t = np.sum(T)
s1s = np.sum(S)
# Drop distances below epsilon and compute sum
Deps = dists < self.epsilon
nDeps = np.sum(Deps, dtype=int)
dists = dists[nDeps:]
s2 = np.sum(np.log(dists / r))
# Compute ID, subtracting numbers of dropped measurements
ID = -2 * (n_neighbors ** 2 - nTSeps - nDeps - nV0) / (s1t + s1s + 2 * s2)
return ID | scikit-dimension | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_TLE.py | _TLE.py | import numpy as np
from .._commonfuncs import LocalEstimator
from scipy.spatial.distance import pdist, squareform
class TLE(LocalEstimator):
"""Intrinsic dimension estimation using the Tight Local intrinsic dimensionality Estimator algorithm. [Amsaleg2019]_ [IDRadovanović]_
Parameters
----------
epsilon: float
"""
_N_NEIGHBORS = 20
def __init__(
self, epsilon=1e-4,
):
self.epsilon = epsilon
def _fit(self, X, dists, knnidx):
self.dimension_pw_ = np.zeros(len(X))
for i in range(len(X)):
self.dimension_pw_[i] = self._idtle(X[knnidx[i, :]], dists[[i], :])
def _idtle(self, nn, dists):
# nn - matrix of nearest neighbors (n_neighbors x d), sorted by distance
# dists - nearest-neighbor distances (1 x n_neighbors), sorted
r = dists[0, -1] # distance to n_neighbors-th neighbor
# Boundary case 1: If $r = 0$, this is fatal, since the neighborhood would be degenerate.
if r == 0:
raise ValueError("All k-NN distances are zero!")
# Main computation
n_neighbors = dists.shape[1]
V = squareform(pdist(nn))
Di = np.tile(dists.T, (1, n_neighbors))
Dj = Di.T
Z2 = 2 * Di ** 2 + 2 * Dj ** 2 - V ** 2
S = (
r
* (
((Di ** 2 + V ** 2 - Dj ** 2) ** 2 + 4 * V ** 2 * (r ** 2 - Di ** 2))
** 0.5
- (Di ** 2 + V ** 2 - Dj ** 2)
)
/ (2 * (r ** 2 - Di ** 2))
)
T = (
r
* (
((Di ** 2 + Z2 - Dj ** 2) ** 2 + 4 * Z2 * (r ** 2 - Di ** 2)) ** 0.5
- (Di ** 2 + Z2 - Dj ** 2)
)
/ (2 * (r ** 2 - Di ** 2))
)
# handle case of repeating k-NN distances
Dr = (dists == r).squeeze()
S[Dr, :] = r * V[Dr, :] ** 2 / (r ** 2 + V[Dr, :] ** 2 - Dj[Dr, :] ** 2)
T[Dr, :] = r * Z2[Dr, :] / (r ** 2 + Z2[Dr, :] - Dj[Dr, :] ** 2)
# Boundary case 2: If $u_i = 0$, then for all $1\leq j\leq n_neighbors$ the measurements $s_{ij}$ and $t_{ij}$ reduce to $u_j$.
Di0 = (Di == 0).squeeze()
T[Di0] = Dj[Di0]
S[Di0] = Dj[Di0]
# Boundary case 3: If $u_j = 0$, then for all $1\leq j\leq n_neighbors$ the measurements $s_{ij}$ and $t_{ij}$ reduce to $\frac{r v_{ij}}{r + v_{ij}}$.
Dj0 = (Dj == 0).squeeze()
T[Dj0] = r * V[Dj0] / (r + V[Dj0])
S[Dj0] = r * V[Dj0] / (r + V[Dj0])
# Boundary case 4: If $v_{ij} = 0$, then the measurement $s_{ij}$ is zero and must be dropped. The measurement $t_{ij}$ should be dropped as well.
V0 = (V == 0).squeeze()
np.fill_diagonal(V0, False)
# by setting to r, $t_{ij}$ will not contribute to the sum s1t
T[V0] = r
# by setting to r, $s_{ij}$ will not contribute to the sum s1s
S[V0] = r
# will subtract twice this number during ID computation below
nV0 = np.sum(V0)
# Drop T & S measurements below epsilon (V4: If $s_{ij}$ is thrown out, then for the sake of balance, $t_{ij}$ should be thrown out as well (or vice versa).)
TSeps = (T < self.epsilon) | (S < self.epsilon)
np.fill_diagonal(TSeps, 0)
nTSeps = np.sum(TSeps)
T[TSeps] = r
T = np.log(T / r)
S[TSeps] = r
S = np.log(S / r)
np.fill_diagonal(T, 0) # delete diagonal elements
np.fill_diagonal(S, 0)
# Sum over the whole matrices
s1t = np.sum(T)
s1s = np.sum(S)
# Drop distances below epsilon and compute sum
Deps = dists < self.epsilon
nDeps = np.sum(Deps, dtype=int)
dists = dists[nDeps:]
s2 = np.sum(np.log(dists / r))
# Compute ID, subtracting numbers of dropped measurements
ID = -2 * (n_neighbors ** 2 - nTSeps - nDeps - nV0) / (s1t + s1s + 2 * s2)
return ID | 0.898555 | 0.875734 |
import numpy as np
import warnings
from .._commonfuncs import get_nn, GlobalEstimator
from scipy.optimize import minimize
from sklearn.utils.validation import check_array
class MiND_ML(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2017 Kerstin Johnsson [IDJohnsson]_
"""Intrinsic dimension estimation using the MiND_MLk and MiND_MLi algorithms. [Rozza2012]_ [IDJohnsson]_
Parameters
----------
k: int, default=20
Neighborhood parameter for ver='MLk' or ver='MLi'.
ver: str
'MLk' or 'MLi'. See the reference paper
"""
def __init__(self, k=20, D=10, ver="MLk"):
self.k = k
self.D = D
self.ver = ver
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
if self.k + 1 >= len(X):
warnings.warn("k+1 >= len(X), using k+1 = len(X)-1")
self.dimension_ = self._MiND_MLx(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _MiND_MLx(self, X):
nbh_data, idx = get_nn(X, min(self.k + 1, len(X) - 1))
# if (self.ver == 'ML1'):
# return self._MiND_ML1(nbh_data)
rhos = nbh_data[:, 0] / nbh_data[:, -1]
d_MIND_MLi = self._MiND_MLi(rhos)
if self.ver == "MLi":
return d_MIND_MLi
d_MIND_MLk = self._MiND_MLk(rhos, d_MIND_MLi)
if self.ver == "MLk":
return d_MIND_MLk
else:
raise ValueError("Unknown version: ", self.ver)
# @staticmethod
# def _MiND_ML1(nbh_data):
# n = len(nbh_data)
# #need only squared dists to first 2 neighbors
# dists2 = nbh_data[:, :2]**2
# s = np.sum(np.log(dists2[:, 0]/dists2[:, 1]))
# ID = -2/(s/n)
# return ID
def _MiND_MLi(self, rhos):
# MiND MLi MLk REVERSED COMPARED TO R TO CORRESPOND TO PAPER
N = len(rhos)
d_lik = np.array([np.nan] * self.D)
for d in range(self.D):
d_lik[d] = self._lld(d + 1, rhos, N)
return np.argmax(d_lik) + 1
def _MiND_MLk(self, rhos, dinit):
# MiND MLi MLk REVERSED COMPARED TO R TO CORRESPOND TO PAPER
res = minimize(
fun=self._nlld,
x0=np.array([dinit]),
jac=self._nlld_gr,
args=(rhos, len(rhos)),
method="L-BFGS-B",
bounds=[(0, self.D)],
)
return res["x"][0]
def _nlld(self, d, rhos, N):
return -self._lld(d, rhos, N)
def _lld(self, d, rhos, N):
if d == 0:
return np.array([-1e30])
else:
return (
N * np.log(self.k * d)
+ (d - 1) * np.sum(np.log(rhos))
+ (self.k - 1) * np.sum(np.log(1 - rhos ** d))
)
def _nlld_gr(self, d, rhos, N):
if d == 0:
return np.array([-1e30])
else:
return -(
N / d
+ np.sum(
np.log(rhos)
- (self.k - 1) * (rhos ** d) * np.log(rhos) / (1 - rhos ** d)
)
) | scikit-dimension | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_MiND_ML.py | _MiND_ML.py | import numpy as np
import warnings
from .._commonfuncs import get_nn, GlobalEstimator
from scipy.optimize import minimize
from sklearn.utils.validation import check_array
class MiND_ML(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2017 Kerstin Johnsson [IDJohnsson]_
"""Intrinsic dimension estimation using the MiND_MLk and MiND_MLi algorithms. [Rozza2012]_ [IDJohnsson]_
Parameters
----------
k: int, default=20
Neighborhood parameter for ver='MLk' or ver='MLi'.
ver: str
'MLk' or 'MLi'. See the reference paper
"""
def __init__(self, k=20, D=10, ver="MLk"):
self.k = k
self.D = D
self.ver = ver
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
if self.k + 1 >= len(X):
warnings.warn("k+1 >= len(X), using k+1 = len(X)-1")
self.dimension_ = self._MiND_MLx(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _MiND_MLx(self, X):
nbh_data, idx = get_nn(X, min(self.k + 1, len(X) - 1))
# if (self.ver == 'ML1'):
# return self._MiND_ML1(nbh_data)
rhos = nbh_data[:, 0] / nbh_data[:, -1]
d_MIND_MLi = self._MiND_MLi(rhos)
if self.ver == "MLi":
return d_MIND_MLi
d_MIND_MLk = self._MiND_MLk(rhos, d_MIND_MLi)
if self.ver == "MLk":
return d_MIND_MLk
else:
raise ValueError("Unknown version: ", self.ver)
# @staticmethod
# def _MiND_ML1(nbh_data):
# n = len(nbh_data)
# #need only squared dists to first 2 neighbors
# dists2 = nbh_data[:, :2]**2
# s = np.sum(np.log(dists2[:, 0]/dists2[:, 1]))
# ID = -2/(s/n)
# return ID
def _MiND_MLi(self, rhos):
# MiND MLi MLk REVERSED COMPARED TO R TO CORRESPOND TO PAPER
N = len(rhos)
d_lik = np.array([np.nan] * self.D)
for d in range(self.D):
d_lik[d] = self._lld(d + 1, rhos, N)
return np.argmax(d_lik) + 1
def _MiND_MLk(self, rhos, dinit):
# MiND MLi MLk REVERSED COMPARED TO R TO CORRESPOND TO PAPER
res = minimize(
fun=self._nlld,
x0=np.array([dinit]),
jac=self._nlld_gr,
args=(rhos, len(rhos)),
method="L-BFGS-B",
bounds=[(0, self.D)],
)
return res["x"][0]
def _nlld(self, d, rhos, N):
return -self._lld(d, rhos, N)
def _lld(self, d, rhos, N):
if d == 0:
return np.array([-1e30])
else:
return (
N * np.log(self.k * d)
+ (d - 1) * np.sum(np.log(rhos))
+ (self.k - 1) * np.sum(np.log(1 - rhos ** d))
)
def _nlld_gr(self, d, rhos, N):
if d == 0:
return np.array([-1e30])
else:
return -(
N / d
+ np.sum(
np.log(rhos)
- (self.k - 1) * (rhos ** d) * np.log(rhos) / (1 - rhos ** d)
)
) | 0.824638 | 0.49939 |
import numpy as np
from scipy.spatial.distance import pdist, squareform
from sklearn.utils.validation import check_array
from .._commonfuncs import GlobalEstimator
class KNN(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2017 Kerstin Johnsson [IDJohnsson]_
"""Intrinsic dimension estimation using the kNN algorithm. [Carter2010]_ [IDJohnsson]_
This is a simplified version of the kNN dimension estimation method described by Carter et al. (2010),
the difference being that block bootstrapping is not used.
Parameters
----------
X: 2D numeric array
A 2D data set with each row describing a data point.
k: int
Number of distances to neighbors used at a time.
ps: 1D numeric array
Vector with sample sizes; each sample size has to be larger than k and smaller than nrow(data).
M: int, default=1
Number of bootstrap samples for each sample size.
gamma: int, default=2
Weighting constant.
"""
def __init__(self, k=None, ps=None, M=1, gamma=2):
self.k = k
self.ps = ps
self.M = M
self.gamma = gamma
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self: object
Returns self.
self.dimension_: float
The estimated intrinsic dimension
self.residual_: float
Residuals
"""
self._k = 2 if self.k is None else self.k
self._ps = np.arange(self._k + 1, self._k + 5) if self.ps is None else self.ps
X = check_array(X, ensure_min_samples=self._k + 1, ensure_min_features=2)
self.dimension_, self.residual_ = self._knnDimEst(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _knnDimEst(self, X):
n = len(X)
Q = len(self._ps)
if min(self._ps) <= self._k or max(self._ps) > n:
raise ValueError("ps must satisfy k<ps<len(X)")
# Compute the distance between any two points in the X set
dist = squareform(pdist(X))
# Compute weighted graph length for each sample
L = np.zeros((Q, self.M))
for i in range(Q):
for j in range(self.M):
samp_ind = np.random.randint(0, n, self._ps[i])
for l in samp_ind:
L[i, j] += np.sum(
np.sort(dist[l, samp_ind])[1 : (self._k + 1)] ** self.gamma
)
# Add the weighted sum of the distances to the k nearest neighbors.
# We should not include the sample itself, to which the distance is
# zero.
# Least squares solution for m
d = X.shape[1]
epsilon = np.repeat(np.nan, d)
for m0, m in enumerate(np.arange(1, d + 1)):
alpha = (m - self.gamma) / m
ps_alpha = self._ps ** alpha
hat_c = np.sum(ps_alpha * np.sum(L, axis=1)) / (
np.sum(ps_alpha ** 2) * self.M
)
epsilon[m0] = np.sum(
(L - np.tile((hat_c * ps_alpha)[:, None], self.M)) ** 2
)
# matrix(vec, nrow = length(vec), ncol = b) is a matrix with b
# identical columns equal to vec
# sum(matr) is the sum of all elements in the matrix matr
de = np.argmin(epsilon) + 1 # Missing values are discarded
return de, epsilon[de - 1] | scikit-dimension | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_KNN.py | _KNN.py | import numpy as np
from scipy.spatial.distance import pdist, squareform
from sklearn.utils.validation import check_array
from .._commonfuncs import GlobalEstimator
class KNN(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2017 Kerstin Johnsson [IDJohnsson]_
"""Intrinsic dimension estimation using the kNN algorithm. [Carter2010]_ [IDJohnsson]_
This is a simplified version of the kNN dimension estimation method described by Carter et al. (2010),
the difference being that block bootstrapping is not used.
Parameters
----------
X: 2D numeric array
A 2D data set with each row describing a data point.
k: int
Number of distances to neighbors used at a time.
ps: 1D numeric array
Vector with sample sizes; each sample size has to be larger than k and smaller than nrow(data).
M: int, default=1
Number of bootstrap samples for each sample size.
gamma: int, default=2
Weighting constant.
"""
def __init__(self, k=None, ps=None, M=1, gamma=2):
self.k = k
self.ps = ps
self.M = M
self.gamma = gamma
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self: object
Returns self.
self.dimension_: float
The estimated intrinsic dimension
self.residual_: float
Residuals
"""
self._k = 2 if self.k is None else self.k
self._ps = np.arange(self._k + 1, self._k + 5) if self.ps is None else self.ps
X = check_array(X, ensure_min_samples=self._k + 1, ensure_min_features=2)
self.dimension_, self.residual_ = self._knnDimEst(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _knnDimEst(self, X):
n = len(X)
Q = len(self._ps)
if min(self._ps) <= self._k or max(self._ps) > n:
raise ValueError("ps must satisfy k<ps<len(X)")
# Compute the distance between any two points in the X set
dist = squareform(pdist(X))
# Compute weighted graph length for each sample
L = np.zeros((Q, self.M))
for i in range(Q):
for j in range(self.M):
samp_ind = np.random.randint(0, n, self._ps[i])
for l in samp_ind:
L[i, j] += np.sum(
np.sort(dist[l, samp_ind])[1 : (self._k + 1)] ** self.gamma
)
# Add the weighted sum of the distances to the k nearest neighbors.
# We should not include the sample itself, to which the distance is
# zero.
# Least squares solution for m
d = X.shape[1]
epsilon = np.repeat(np.nan, d)
for m0, m in enumerate(np.arange(1, d + 1)):
alpha = (m - self.gamma) / m
ps_alpha = self._ps ** alpha
hat_c = np.sum(ps_alpha * np.sum(L, axis=1)) / (
np.sum(ps_alpha ** 2) * self.M
)
epsilon[m0] = np.sum(
(L - np.tile((hat_c * ps_alpha)[:, None], self.M)) ** 2
)
# matrix(vec, nrow = length(vec), ncol = b) is a matrix with b
# identical columns equal to vec
# sum(matr) is the sum of all elements in the matrix matr
de = np.argmin(epsilon) + 1 # Missing values are discarded
return de, epsilon[de - 1] | 0.923846 | 0.760384 |
from sklearn.utils.validation import check_array
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances_chunked
from sklearn.linear_model import LinearRegression
from .._commonfuncs import get_nn, GlobalEstimator
class TwoNN(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2019 Francesco Mottes [IDMottes]_
"""Intrinsic dimension estimation using the TwoNN algorithm. [Facco2019]_ [IDFacco]_ [IDMottes]_
Parameters
----------
discard_fraction: float
Fraction (between 0 and 1) of largest distances to discard (heuristic from the paper)
dist: bool
Whether data is a precomputed distance matrix
Attributes
----------
x_: 1d array
np.array with the -log(mu) values.
y_: 1d array
np.array with the -log(F(mu_{sigma(i)})) values.
"""
def __init__(self, discard_fraction: float = 0.1, dist: bool = False):
self.discard_fraction = discard_fraction
self.dist = dist
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
A data set for which the intrinsic dimension is estimated.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
self.dimension_, self.x_, self.y_ = self._twonn(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _twonn(self, X):
"""
Calculates intrinsic dimension of the provided data points with the TWO-NN algorithm.
-----------
Parameters:
X : 2d array-like
2d data matrix. Samples on rows and features on columns.
return_xy : bool (default=False)
Whether to return also the coordinate vectors used for the linear fit.
discard_fraction : float between 0 and 1
Fraction of largest distances to discard (heuristic from the paper)
dist : bool (default=False)
Whether data is a precomputed distance matrix
-----------
Returns:
d : float
Intrinsic dimension of the dataset according to TWO-NN.
x : 1d np.array (optional)
Array with the -log(mu) values.
y : 1d np.array (optional)
Array with the -log(F(mu_{sigma(i)})) values.
-----------
References:
E. Facco, M. d’Errico, A. Rodriguez & A. Laio
Estimating the intrinsic dimension of datasets by a minimal neighborhood information (https://doi.org/10.1038/s41598-017-11873-y)
"""
N = len(X)
if self.dist:
r1, r2 = X[:, 0], X[:, 1]
_mu = r2 / r1
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
else:
# mu = r2/r1 for each data point
# relatively high dimensional data, use distance matrix generator
if X.shape[1] > 25:
distmat_chunks = pairwise_distances_chunked(X)
_mu = np.zeros((len(X)))
i = 0
for x in distmat_chunks:
x = np.sort(x, axis=1)
r1, r2 = x[:, 1], x[:, 2]
_mu[i : i + len(x)] = r2 / r1
i += len(x)
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
else: # relatively low dimensional data, search nearest neighbors directly
dists, _ = get_nn(X, k=2)
r1, r2 = dists[:, 0], dists[:, 1]
_mu = r2 / r1
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
# Empirical cumulate
Femp = np.arange(int(N * (1 - self.discard_fraction))) / N
# Fit line
lr = LinearRegression(fit_intercept=False)
lr.fit(np.log(mu).reshape(-1, 1), -np.log(1 - Femp).reshape(-1, 1))
d = lr.coef_[0][0] # extract slope
return (
d,
np.log(mu).reshape(-1, 1),
-np.log(1 - Femp).reshape(-1, 1),
) | scikit-dimension | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_TwoNN.py | _TwoNN.py |
from sklearn.utils.validation import check_array
import numpy as np
from sklearn.metrics.pairwise import pairwise_distances_chunked
from sklearn.linear_model import LinearRegression
from .._commonfuncs import get_nn, GlobalEstimator
class TwoNN(GlobalEstimator):
# SPDX-License-Identifier: MIT, 2019 Francesco Mottes [IDMottes]_
"""Intrinsic dimension estimation using the TwoNN algorithm. [Facco2019]_ [IDFacco]_ [IDMottes]_
Parameters
----------
discard_fraction: float
Fraction (between 0 and 1) of largest distances to discard (heuristic from the paper)
dist: bool
Whether data is a precomputed distance matrix
Attributes
----------
x_: 1d array
np.array with the -log(mu) values.
y_: 1d array
np.array with the -log(F(mu_{sigma(i)})) values.
"""
def __init__(self, discard_fraction: float = 0.1, dist: bool = False):
self.discard_fraction = discard_fraction
self.dist = dist
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
A data set for which the intrinsic dimension is estimated.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
self.dimension_, self.x_, self.y_ = self._twonn(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _twonn(self, X):
"""
Calculates intrinsic dimension of the provided data points with the TWO-NN algorithm.
-----------
Parameters:
X : 2d array-like
2d data matrix. Samples on rows and features on columns.
return_xy : bool (default=False)
Whether to return also the coordinate vectors used for the linear fit.
discard_fraction : float between 0 and 1
Fraction of largest distances to discard (heuristic from the paper)
dist : bool (default=False)
Whether data is a precomputed distance matrix
-----------
Returns:
d : float
Intrinsic dimension of the dataset according to TWO-NN.
x : 1d np.array (optional)
Array with the -log(mu) values.
y : 1d np.array (optional)
Array with the -log(F(mu_{sigma(i)})) values.
-----------
References:
E. Facco, M. d’Errico, A. Rodriguez & A. Laio
Estimating the intrinsic dimension of datasets by a minimal neighborhood information (https://doi.org/10.1038/s41598-017-11873-y)
"""
N = len(X)
if self.dist:
r1, r2 = X[:, 0], X[:, 1]
_mu = r2 / r1
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
else:
# mu = r2/r1 for each data point
# relatively high dimensional data, use distance matrix generator
if X.shape[1] > 25:
distmat_chunks = pairwise_distances_chunked(X)
_mu = np.zeros((len(X)))
i = 0
for x in distmat_chunks:
x = np.sort(x, axis=1)
r1, r2 = x[:, 1], x[:, 2]
_mu[i : i + len(x)] = r2 / r1
i += len(x)
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
else: # relatively low dimensional data, search nearest neighbors directly
dists, _ = get_nn(X, k=2)
r1, r2 = dists[:, 0], dists[:, 1]
_mu = r2 / r1
# discard the largest distances
mu = _mu[np.argsort(_mu)[: int(N * (1 - self.discard_fraction))]]
# Empirical cumulate
Femp = np.arange(int(N * (1 - self.discard_fraction))) / N
# Fit line
lr = LinearRegression(fit_intercept=False)
lr.fit(np.log(mu).reshape(-1, 1), -np.log(1 - Femp).reshape(-1, 1))
d = lr.coef_[0][0] # extract slope
return (
d,
np.log(mu).reshape(-1, 1),
-np.log(1 - Femp).reshape(-1, 1),
) | 0.963857 | 0.798344 |
import warnings
import numpy as np
from sklearn.metrics import pairwise_distances_chunked
from .._commonfuncs import get_nn, GlobalEstimator
from sklearn.utils.validation import check_array
class CorrInt(GlobalEstimator):
"""Intrinsic dimension estimation using the Correlation Dimension. [Grassberger1983]_ [IDHino]_
Parameters
----------
k1: int
First neighborhood size considered
k2: int
Last neighborhood size considered
DM: bool, default=False
Is the input a precomputed distance matrix (dense)
"""
def __init__(self, k1=10, k2=20, DM=False):
self.k1 = k1
self.k2 = k2
self.DM = DM
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
if self.k2 >= len(X):
warnings.warn("k2 larger or equal to len(X), using len(X)-1")
self.k2 = len(X) - 1
if self.k1 >= len(X) or self.k1 > self.k2:
warnings.warn("k1 larger than k2 or len(X), using k2-1")
self.k1 = self.k2 - 1
self.dimension_ = self._corrint(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _corrint(self, X):
n_elements = len(X) ** 2 # number of elements
dists, _ = get_nn(X, self.k2)
if self.DM is False:
chunked_distmat = pairwise_distances_chunked(X)
else:
chunked_distmat = X
r1 = np.median(dists[:, self.k1 - 1])
r2 = np.median(dists[:, self.k2 - 1])
n_diagonal_entries = len(X) # remove diagonal from sum count
s1 = -n_diagonal_entries
s2 = -n_diagonal_entries
for chunk in chunked_distmat:
s1 += (chunk < r1).sum()
s2 += (chunk < r2).sum()
Cr = np.array([s1 / n_elements, s2 / n_elements])
estq = np.diff(np.log(Cr)) / np.log(r2 / r1)
return estq[0] | scikit-dimension | /scikit-dimension-0.3.3.tar.gz/scikit-dimension-0.3.3/skdim/id/_CorrInt.py | _CorrInt.py | import warnings
import numpy as np
from sklearn.metrics import pairwise_distances_chunked
from .._commonfuncs import get_nn, GlobalEstimator
from sklearn.utils.validation import check_array
class CorrInt(GlobalEstimator):
"""Intrinsic dimension estimation using the Correlation Dimension. [Grassberger1983]_ [IDHino]_
Parameters
----------
k1: int
First neighborhood size considered
k2: int
Last neighborhood size considered
DM: bool, default=False
Is the input a precomputed distance matrix (dense)
"""
def __init__(self, k1=10, k2=20, DM=False):
self.k1 = k1
self.k2 = k2
self.DM = DM
def fit(self, X, y=None):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples.
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
if self.k2 >= len(X):
warnings.warn("k2 larger or equal to len(X), using len(X)-1")
self.k2 = len(X) - 1
if self.k1 >= len(X) or self.k1 > self.k2:
warnings.warn("k1 larger than k2 or len(X), using k2-1")
self.k1 = self.k2 - 1
self.dimension_ = self._corrint(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _corrint(self, X):
n_elements = len(X) ** 2 # number of elements
dists, _ = get_nn(X, self.k2)
if self.DM is False:
chunked_distmat = pairwise_distances_chunked(X)
else:
chunked_distmat = X
r1 = np.median(dists[:, self.k1 - 1])
r2 = np.median(dists[:, self.k2 - 1])
n_diagonal_entries = len(X) # remove diagonal from sum count
s1 = -n_diagonal_entries
s2 = -n_diagonal_entries
for chunk in chunked_distmat:
s1 += (chunk < r1).sum()
s2 += (chunk < r2).sum()
Cr = np.array([s1 / n_elements, s2 / n_elements])
estq = np.diff(np.log(Cr)) / np.log(r2 / r1)
return estq[0] | 0.882453 | 0.662309 |
<p align="left">
<img alt="Scikit Discovery" src="https://github.com/MITHaystack/scikit-discovery/raw/master/skdiscovery/docs/images/skdiscovery_logo360x100.png"/>
</p>
- Explore scientific data with a set of tools for human-guided or automated discovery
- Design & configure data processing pipelines
- Define the parameter ranges for your algorithms, available algorithmic choices, and the framework will generate pipeline instances for you
- Use automatically perturbed data processing pipelines to create different data products.
- Easy to use with [scikit-dataaccess](https://github.com/MITHaystack/scikit-dataaccess) for integration of a variety of scientific data sets
<p align="center">
<img alt="Scikit Discovery Overview" src="https://github.com/MITHaystack/scikit-discovery/raw/master/skdiscovery/docs/images/skdiscovery_overviewdiag.png"/>
</p>
### Install
```
pip install scikit-discovery
```
### Documentation
See <https://github.com/MITHaystack/scikit-discovery/tree/master/skdiscovery/docs>
### Contributors
Project lead: [Victor Pankratius (MIT)](http://www.victorpankratius.com)<br>
Contributors: Cody M. Rude, Justin D. Li, David M. Blair, Michael G. Gowanlock, Evan Wojciechowski, Victor Pankratius
### Acknowledgements
We acknowledge support from NASA AIST14-NNX15AG84G, NASA AIST16-80NSSC17K0125, NSF ACI-1442997, NSF AGS-1343967, and Amazon AWS computing access support.
## Examples
Example code with complete science case studies are available as Jupyter Notebooks at:
[/MITHaystack/science-casestudies](https://github.com/MITHaystack/science-casestudies)
| scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/README.md | README.md | pip install scikit-discovery | 0.637821 | 0.929184 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
Framework - Offloading Pipeline Processing to Amazon Demo
=====================
This notebook demonstrates offloading work to an amazon server
Initial imports
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (14.0, 3.0)
import re
```
skdaccess imports
```
from skdaccess.framework.param_class import *
from skdaccess.geo.groundwater import DataFetcher as GWDF
```
skdiscovery imports
```
from skdiscovery.data_structure.framework import DiscoveryPipeline
from skdiscovery.data_structure.framework.stagecontainers import *
from skdiscovery.data_structure.table.filters import MedianFilter
from skdiscovery.data_structure.generic.accumulators import DataAccumulator
```
Configure groundwater data fetcher
```
# Setup time range
start_date = '2000-01-01'
end_date = '2015-12-31'
# Select station
station_id = 340503117104104
# Create Data Fetcher
gwdf = GWDF([AutoList([station_id])],start_date,end_date)
```
Create Pipeline
```
ap_window = AutoParamListCycle([1,
15,
40,
70,
150,
300])
fl_median = MedianFilter('Median Filter',[ap_window],interpolate=False)
sc_median = StageContainer(fl_median)
acc_data = DataAccumulator('Data Accumulator',[])
sc_data = StageContainer(acc_data)
pipeline = DiscoveryPipeline(gwdf,[sc_median, sc_data])
```
Display pipeline
```
pipeline.plotPipelineInstance()
```
Run the pipeline, offloading the processing to a node on Amazon.
While running, the amazon node can display the jobs:
![Amazon Node](images/amazon_run.png)
```
pipeline.run(num_runs=6,amazon=True)
```
Plot the results
```
# Get the results
results = pipeline.getResults()
metadata = pipeline.getMetadataHistory()
# Loop over each pipeline run
for index,(run,info) in enumerate(zip(results,metadata)):
# Plot the depth to water level
plt.plot(run['Data Accumulator'][340503117104104].loc[:,'Median Depth to Water']);
# Set xlabel
plt.xlabel('Date');
# Set ylabel
plt.ylabel("Depth to Water Level");
# Set title
plt.title('Median Filter Window: ' + re.search("\[\'(.*)\'\]",info[1]).group(1) + ' Days');
#Create new figure
plt.figure();
```
| scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/examples/Amazon_Offload.ipynb | Amazon_Offload.ipynb | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (14.0, 3.0)
import re
from skdaccess.framework.param_class import *
from skdaccess.geo.groundwater import DataFetcher as GWDF
from skdiscovery.data_structure.framework import DiscoveryPipeline
from skdiscovery.data_structure.framework.stagecontainers import *
from skdiscovery.data_structure.table.filters import MedianFilter
from skdiscovery.data_structure.generic.accumulators import DataAccumulator
# Setup time range
start_date = '2000-01-01'
end_date = '2015-12-31'
# Select station
station_id = 340503117104104
# Create Data Fetcher
gwdf = GWDF([AutoList([station_id])],start_date,end_date)
ap_window = AutoParamListCycle([1,
15,
40,
70,
150,
300])
fl_median = MedianFilter('Median Filter',[ap_window],interpolate=False)
sc_median = StageContainer(fl_median)
acc_data = DataAccumulator('Data Accumulator',[])
sc_data = StageContainer(acc_data)
pipeline = DiscoveryPipeline(gwdf,[sc_median, sc_data])
pipeline.plotPipelineInstance()
pipeline.run(num_runs=6,amazon=True)
# Get the results
results = pipeline.getResults()
metadata = pipeline.getMetadataHistory()
# Loop over each pipeline run
for index,(run,info) in enumerate(zip(results,metadata)):
# Plot the depth to water level
plt.plot(run['Data Accumulator'][340503117104104].loc[:,'Median Depth to Water']);
# Set xlabel
plt.xlabel('Date');
# Set ylabel
plt.ylabel("Depth to Water Level");
# Set title
plt.title('Median Filter Window: ' + re.search("\[\'(.*)\'\]",info[1]).group(1) + ' Days');
#Create new figure
plt.figure(); | 0.555918 | 0.699036 |
The MIT License (MIT)<br>
Copyright (c) 2017 Massachusetts Institute of Technology<br>
Author: Cody Rude<br>
This software has been created in projects supported by the US National<br>
Science Foundation and NASA (PI: Pankratius)<br>
Permission is hereby granted, free of charge, to any person obtaining a copy<br>
of this software and associated documentation files (the "Software"), to deal<br>
in the Software without restriction, including without limitation the rights<br>
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell<br>
copies of the Software, and to permit persons to whom the Software is<br>
furnished to do so, subject to the following conditions:<br>
The above copyright notice and this permission notice shall be included in<br>
all copies or substantial portions of the Software.<br>
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR<br>
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,<br>
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE<br>
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER<br>
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,<br>
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN<br>
THE SOFTWARE.<br>
```
from skdiscovery.utilities.cloud import amazon_gui as ag
ag.init()
```
| scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/examples/Amazon_GUI.ipynb | Amazon_GUI.ipynb | from skdiscovery.utilities.cloud import amazon_gui as ag
ag.init() | 0.396886 | 0.432363 |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
class General_Component_Analysis(PipelineItem):
'''
Performs either ICA or PCA analysis on series data
'''
def __init__(self, str_description, ap_paramList):
'''
Initialize Analysis object.
@param str_description: String description of analysis
@param ap_paramList[num_components]: Number of components
@param ap_paramList[component_type]: Type of component analysis (CA); either PCA or ICA
@param ap_paramList[start_time]: Starting time for CA
@param ap_paramList[end_time]: ending time for CA
@param ap_paramList[labels]: Optional list of label names
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = ['n_components','component_type','start_time','end_time','label_names']
self.results = dict()
def process(self, obj_data):
'''
Perform component analysis on data:
Results are added to the data wrapper as a dictionary with
results['CA'] = Eigenvenctors
results['Projection'] = Projection on to the eigenvectors
@param obj_data: Data wrapper containing the data
'''
num_components = self.ap_paramList[0]()
component_type = self.ap_paramList[1]()
start_time = self.ap_paramList[2]()
end_time = self.ap_paramList[3]()
results = dict()
results['start_date'] = start_time
results['end_date'] = end_time
if len(self.ap_paramList) >= 5:
label_names = self.ap_paramList[4]()
else:
label_names = None
cut_data = []
for label, data, err in obj_data.getIterator():
if label_names == None or label in label_names:
cut_data.append(data[start_time:end_time])
cut_data = np.array(cut_data)
if len(cut_data) > 0:
if component_type == 'ICA' :
ca = FastICA(n_components = num_components)
else:
ca = PCA(n_components = num_components)
time_projection = ca.fit_transform(cut_data.T)
results['CA'] = ca
results['Projection'] = time_projection
else:
results['CA'] = None
results['Projection'] = None
obj_data.addResult(self.str_description, results) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/series/analysis/gca.py | gca.py |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
class General_Component_Analysis(PipelineItem):
'''
Performs either ICA or PCA analysis on series data
'''
def __init__(self, str_description, ap_paramList):
'''
Initialize Analysis object.
@param str_description: String description of analysis
@param ap_paramList[num_components]: Number of components
@param ap_paramList[component_type]: Type of component analysis (CA); either PCA or ICA
@param ap_paramList[start_time]: Starting time for CA
@param ap_paramList[end_time]: ending time for CA
@param ap_paramList[labels]: Optional list of label names
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = ['n_components','component_type','start_time','end_time','label_names']
self.results = dict()
def process(self, obj_data):
'''
Perform component analysis on data:
Results are added to the data wrapper as a dictionary with
results['CA'] = Eigenvenctors
results['Projection'] = Projection on to the eigenvectors
@param obj_data: Data wrapper containing the data
'''
num_components = self.ap_paramList[0]()
component_type = self.ap_paramList[1]()
start_time = self.ap_paramList[2]()
end_time = self.ap_paramList[3]()
results = dict()
results['start_date'] = start_time
results['end_date'] = end_time
if len(self.ap_paramList) >= 5:
label_names = self.ap_paramList[4]()
else:
label_names = None
cut_data = []
for label, data, err in obj_data.getIterator():
if label_names == None or label in label_names:
cut_data.append(data[start_time:end_time])
cut_data = np.array(cut_data)
if len(cut_data) > 0:
if component_type == 'ICA' :
ca = FastICA(n_components = num_components)
else:
ca = PCA(n_components = num_components)
time_projection = ca.fit_transform(cut_data.T)
results['CA'] = ca
results['Projection'] = time_projection
else:
results['CA'] = None
results['Projection'] = None
obj_data.addResult(self.str_description, results) | 0.705278 | 0.32142 |
import collections
import numpy as np
import scipy.optimize as optimize
import skdaccess.utilities.pbo_util as pbo_utils
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import pbo_tools
from skdiscovery.utilities.patterns.pbo_tools import SourceWrapper, MogiVectors
class Mogi_Inversion(PipelineItem):
'''
Perform a Mogi source inversion on a set of gps series data
The source is assumed to be a Mogi source (point source), but other source models can be selected.
Assumes directions are named ('dN', 'dE', 'dU').
'''
def __init__(self, str_description, ap_paramList):
'''
Initialize Mogi analysis item
@param str_description: Description of the item
@param ap_paramList[h_pca_name]: Name of the pca computed by General_Component_Analysis. Gets start and end date from the PCA fit.
@param ap_paramList[source_type]: Type of magma chamber source model to use (mogi [default],finite_sphere,closed_pipe,constant_open_pipe,rising_open_pipe,sill)
'''
super(Mogi_Inversion, self).__init__(str_description, ap_paramList)
self.ap_paramNames = ['pca_name','source_type']
def FitPCA(self, hPCA_Proj):
'''
Determine the timing of the inflation event.
Uses the first component of the pca projection and
fits A * arctan( (t - t0) / c ) + B to the first pca projection.
@param hPCA_Proj: The sklearn PCA projection
@return [t0, c]
'''
fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3]
errfunc = lambda p,x,y: fitfunc(p,x) - y
dLen = len(hPCA_Proj[:,0])
pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0]))
ct = pA[1:3]
return ct, pA[0]
def FitTimeSeries(self, pd_series, ct):
'''
Fits the amplitude and offset of an inflation event given the time and length of the event.
Fits A and B in A * arctan( (t - t0) / c) + B
@param pd_series: Time series to be fit
@param ct: [t0, c]
@return Amplitude of fit
'''
fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1]
errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y
dLen = len(pd_series)
pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series))
# res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0]
res = pA[0]*np.pi
s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2)
pcov = pcov * s_sq
error = []
for i in range(len(pA)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
perr_leastsq = np.array(error)
return res, perr_leastsq
def process(self, obj_data):
'''
Finds the magma source (default-mogi) from PBO GPS data.
Assumes time series columns are named ('dN', 'dE', 'dU'). Predicts location of the
magma source using scipy.optimize.curve_fit
The location of the magma source is stored in the data wrapper as a list
res[0] = latitude
res[1] = longitude
res[2] = source depth (km)
res[3] = volume change (meters^3)
res[4] = extra parameters (depends on mogi fit type)
@param obj_data: Data object containing the results from the PCA stage
'''
h_pca_name = self.ap_paramList[0]()
if len(self.ap_paramList)>=2:
exN = {'mogi':0,'finite_sphere':1,'closed_pipe':1,'constant_open_pipe':1,'rising_open_pipe':2,'sill':0}
try:
mag_source = getattr(pbo_tools,self.ap_paramList[1]().lower())
ExScParams = tuple(np.ones((exN[self.ap_paramList[1]().lower()],)))
except:
mag_source = pbo_tools.mogi
ExScParams = ()
print('No source type called '+self.ap_paramList[1]()+', defaulting to a Mogi source.')
else:
mag_source = pbo_tools.mogi
ExScParams = ()
mag_source = SourceWrapper(mag_source)
projection = obj_data.getResults()[h_pca_name]['Projection']
start_date = obj_data.getResults()[h_pca_name]['start_date']
end_date = obj_data.getResults()[h_pca_name]['end_date']
ct, pca_amp = self.FitPCA(projection)
pca_amp *= np.pi
tp_directions = ('dN', 'dE', 'dU')
xvs = []
yvs = []
zvs = []
for label, data, err in obj_data.getIterator():
if label in tp_directions:
distance,f_error = self.FitTimeSeries(data.loc[start_date:end_date], ct)
if label == tp_directions[1]:
xvs.append(distance)
elif label == tp_directions[0]:
yvs.append(distance)
elif label == tp_directions[2]:
zvs.append(distance)
else:
print('Ignoring column: ', label)
xvs = np.array(xvs)*1e-6
yvs = np.array(yvs)*1e-6
zvs = np.array(zvs)*1e-6
ydata = np.hstack((xvs, yvs,zvs)).T
station_list = obj_data.get().minor_axis
meta_data = obj_data.info()
station_coords = pbo_utils.getStationCoords(meta_data, station_list)
dimensions = ('x','y','z')
xdata = []
for dim in dimensions:
for coord in station_coords:
xdata.append((dim, coord[0], coord[1]))
coord_range = np.array(pbo_utils.getLatLonRange(meta_data, station_list))
lat_guess = np.mean(coord_range[0,:])
lon_guess = np.mean(coord_range[1,:])
fit = optimize.curve_fit(mag_source, xdata, ydata, (lat_guess, lon_guess, 5, 1e-4)+ExScParams)
res = collections.OrderedDict()
res['lat'] = fit[0][0]
res['lon'] = fit[0][1]
res['depth'] = fit[0][2]
res['amplitude'] = fit[0][3]
if len(fit[0])>4:
res['ex_params'] = fit[0][4:]
else:
res['ex_params'] = np.nan
res['pca_amplitude'] = pca_amp
if len(self.ap_paramList)>=2:
res['source_type'] = self.ap_paramList[1]().lower()
else:
res['source_type'] = 'mogi'
obj_data.addResult(self.str_description, res)
# lat_fit_range = (np.min(lat_list)-0.15, np.max(lat_list)+0.15)
# lon_fit_range = (np.min(lon_list)-0.15, np.max(lon_list)+0.15)
# res = optimize.brute(self.mogi, (lat_fit_range, lon_fit_range,
# (1,10), (1e-5, 1e-3)),
# args = (xvs*1e-6, yvs*1e-6, zvs*1e-6,
# station_list, meta_data)) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/series/analysis/mogi.py | mogi.py |
import collections
import numpy as np
import scipy.optimize as optimize
import skdaccess.utilities.pbo_util as pbo_utils
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import pbo_tools
from skdiscovery.utilities.patterns.pbo_tools import SourceWrapper, MogiVectors
class Mogi_Inversion(PipelineItem):
'''
Perform a Mogi source inversion on a set of gps series data
The source is assumed to be a Mogi source (point source), but other source models can be selected.
Assumes directions are named ('dN', 'dE', 'dU').
'''
def __init__(self, str_description, ap_paramList):
'''
Initialize Mogi analysis item
@param str_description: Description of the item
@param ap_paramList[h_pca_name]: Name of the pca computed by General_Component_Analysis. Gets start and end date from the PCA fit.
@param ap_paramList[source_type]: Type of magma chamber source model to use (mogi [default],finite_sphere,closed_pipe,constant_open_pipe,rising_open_pipe,sill)
'''
super(Mogi_Inversion, self).__init__(str_description, ap_paramList)
self.ap_paramNames = ['pca_name','source_type']
def FitPCA(self, hPCA_Proj):
'''
Determine the timing of the inflation event.
Uses the first component of the pca projection and
fits A * arctan( (t - t0) / c ) + B to the first pca projection.
@param hPCA_Proj: The sklearn PCA projection
@return [t0, c]
'''
fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3]
errfunc = lambda p,x,y: fitfunc(p,x) - y
dLen = len(hPCA_Proj[:,0])
pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0]))
ct = pA[1:3]
return ct, pA[0]
def FitTimeSeries(self, pd_series, ct):
'''
Fits the amplitude and offset of an inflation event given the time and length of the event.
Fits A and B in A * arctan( (t - t0) / c) + B
@param pd_series: Time series to be fit
@param ct: [t0, c]
@return Amplitude of fit
'''
fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1]
errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y
dLen = len(pd_series)
pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series))
# res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0]
res = pA[0]*np.pi
s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2)
pcov = pcov * s_sq
error = []
for i in range(len(pA)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
perr_leastsq = np.array(error)
return res, perr_leastsq
def process(self, obj_data):
'''
Finds the magma source (default-mogi) from PBO GPS data.
Assumes time series columns are named ('dN', 'dE', 'dU'). Predicts location of the
magma source using scipy.optimize.curve_fit
The location of the magma source is stored in the data wrapper as a list
res[0] = latitude
res[1] = longitude
res[2] = source depth (km)
res[3] = volume change (meters^3)
res[4] = extra parameters (depends on mogi fit type)
@param obj_data: Data object containing the results from the PCA stage
'''
h_pca_name = self.ap_paramList[0]()
if len(self.ap_paramList)>=2:
exN = {'mogi':0,'finite_sphere':1,'closed_pipe':1,'constant_open_pipe':1,'rising_open_pipe':2,'sill':0}
try:
mag_source = getattr(pbo_tools,self.ap_paramList[1]().lower())
ExScParams = tuple(np.ones((exN[self.ap_paramList[1]().lower()],)))
except:
mag_source = pbo_tools.mogi
ExScParams = ()
print('No source type called '+self.ap_paramList[1]()+', defaulting to a Mogi source.')
else:
mag_source = pbo_tools.mogi
ExScParams = ()
mag_source = SourceWrapper(mag_source)
projection = obj_data.getResults()[h_pca_name]['Projection']
start_date = obj_data.getResults()[h_pca_name]['start_date']
end_date = obj_data.getResults()[h_pca_name]['end_date']
ct, pca_amp = self.FitPCA(projection)
pca_amp *= np.pi
tp_directions = ('dN', 'dE', 'dU')
xvs = []
yvs = []
zvs = []
for label, data, err in obj_data.getIterator():
if label in tp_directions:
distance,f_error = self.FitTimeSeries(data.loc[start_date:end_date], ct)
if label == tp_directions[1]:
xvs.append(distance)
elif label == tp_directions[0]:
yvs.append(distance)
elif label == tp_directions[2]:
zvs.append(distance)
else:
print('Ignoring column: ', label)
xvs = np.array(xvs)*1e-6
yvs = np.array(yvs)*1e-6
zvs = np.array(zvs)*1e-6
ydata = np.hstack((xvs, yvs,zvs)).T
station_list = obj_data.get().minor_axis
meta_data = obj_data.info()
station_coords = pbo_utils.getStationCoords(meta_data, station_list)
dimensions = ('x','y','z')
xdata = []
for dim in dimensions:
for coord in station_coords:
xdata.append((dim, coord[0], coord[1]))
coord_range = np.array(pbo_utils.getLatLonRange(meta_data, station_list))
lat_guess = np.mean(coord_range[0,:])
lon_guess = np.mean(coord_range[1,:])
fit = optimize.curve_fit(mag_source, xdata, ydata, (lat_guess, lon_guess, 5, 1e-4)+ExScParams)
res = collections.OrderedDict()
res['lat'] = fit[0][0]
res['lon'] = fit[0][1]
res['depth'] = fit[0][2]
res['amplitude'] = fit[0][3]
if len(fit[0])>4:
res['ex_params'] = fit[0][4:]
else:
res['ex_params'] = np.nan
res['pca_amplitude'] = pca_amp
if len(self.ap_paramList)>=2:
res['source_type'] = self.ap_paramList[1]().lower()
else:
res['source_type'] = 'mogi'
obj_data.addResult(self.str_description, res)
# lat_fit_range = (np.min(lat_list)-0.15, np.max(lat_list)+0.15)
# lon_fit_range = (np.min(lon_list)-0.15, np.max(lon_list)+0.15)
# res = optimize.brute(self.mogi, (lat_fit_range, lon_fit_range,
# (1,10), (1e-5, 1e-3)),
# args = (xvs*1e-6, yvs*1e-6, zvs*1e-6,
# station_list, meta_data)) | 0.677154 | 0.442215 |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
class OffsetDetrend(PipelineItem):
'''
Trend filter that fits a stepwise function to linearly detrended series data
On detrended data this filter fits a stepwise function (number of
steps provided by the user) to correct the linear fit by
accounting for discontinuous offsets, such as due to a change in
the antenna or from an earthquake. The final linear fit handles
each portion of the offset independently. If the number of
discontinuities is not provided as an autoparam, the filter
assumes a single discontinuity.
'''
def __init__(self, str_description, ap_paramList = [], labels=None, column_names=None, time_point=None, time_interval=None):
'''
Initialize OffsetDetrend filter
@param str_description: String describing filter
@param ap_paramList[step_count]: Number of steps to remove from data (Default: 1)
@param labels: List of labels used to select data to be removed (using None will apply to all labels)
@param column_names: List of column names to select data to be removed (using None will apply to all columns)
@param time_point: Time of offset
@param time_interval: Interval within which the offset occurs
'''
self.labels = labels
self.column_names = column_names
self.time_point = time_point
if time_interval == None:
self.time_interval = [-500,500]
else:
if type(time_interval) == int:
self.time_interval = [-time_interval,time_interval]
else:
self.time_interval = time_interval
self.ap_paramNames = ['step_count']
super(OffsetDetrend, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply offset estimation and detrending filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
labels = self.labels
column_names = self.column_names
# user provided number of steps/offsets in the data
step_count = 1
if len(self.ap_paramList) != 0:
step_count = self.ap_paramList[0]()
for label, data, err in obj_data.getIterator():
if (labels is None or label in labels) and \
(column_names is None or data.name in column_names):
# keep track of the time index and the location of nan's
tindex = data.index
reind = np.array(np.isnan(data))
# a temporary time index and data array without nan's
nts = np.arange(len(data))
nts = np.delete(nts,nts[reind])
nys = data[reind==False]
# Decision Tree Regressor for finding the discontinuities
regr_1 = DecisionTreeRegressor(max_depth=step_count)
if self.time_point == None:
regr_1.fit(nts[:,np.newaxis], nys)
else:
# make time_point (a string) into an index
time_point = np.where(tindex==self.time_point)[0][0]
regr_1.fit(nts[(time_point+self.time_interval[0]):(time_point+self.time_interval[1]),np.newaxis],
nys[(time_point+self.time_interval[0]):(time_point+self.time_interval[1])])
r1 = regr_1.predict(nts[:,np.newaxis])
# offset the discontinuity to be continous and fit a single line
# (using median of 5 points on either side of discontinuity)
nys[r1==r1[-1]] += np.median(nys[r1==r1[0]][-5:-1]) - np.median(nys[r1==r1[-1]][0:5])
z3 = np.polyfit(nts, nys, 1)
# make the data into a pd series and correctly index
x3 = pd.Series(data=nys-(z3[0]*nts+z3[1]),index=tindex[reind==False])
x3 = x3.reindex(tindex)
# and then use that to update in place
data.update(x3) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/series/filters/offset_detrend.py | offset_detrend.py |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
class OffsetDetrend(PipelineItem):
'''
Trend filter that fits a stepwise function to linearly detrended series data
On detrended data this filter fits a stepwise function (number of
steps provided by the user) to correct the linear fit by
accounting for discontinuous offsets, such as due to a change in
the antenna or from an earthquake. The final linear fit handles
each portion of the offset independently. If the number of
discontinuities is not provided as an autoparam, the filter
assumes a single discontinuity.
'''
def __init__(self, str_description, ap_paramList = [], labels=None, column_names=None, time_point=None, time_interval=None):
'''
Initialize OffsetDetrend filter
@param str_description: String describing filter
@param ap_paramList[step_count]: Number of steps to remove from data (Default: 1)
@param labels: List of labels used to select data to be removed (using None will apply to all labels)
@param column_names: List of column names to select data to be removed (using None will apply to all columns)
@param time_point: Time of offset
@param time_interval: Interval within which the offset occurs
'''
self.labels = labels
self.column_names = column_names
self.time_point = time_point
if time_interval == None:
self.time_interval = [-500,500]
else:
if type(time_interval) == int:
self.time_interval = [-time_interval,time_interval]
else:
self.time_interval = time_interval
self.ap_paramNames = ['step_count']
super(OffsetDetrend, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply offset estimation and detrending filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
labels = self.labels
column_names = self.column_names
# user provided number of steps/offsets in the data
step_count = 1
if len(self.ap_paramList) != 0:
step_count = self.ap_paramList[0]()
for label, data, err in obj_data.getIterator():
if (labels is None or label in labels) and \
(column_names is None or data.name in column_names):
# keep track of the time index and the location of nan's
tindex = data.index
reind = np.array(np.isnan(data))
# a temporary time index and data array without nan's
nts = np.arange(len(data))
nts = np.delete(nts,nts[reind])
nys = data[reind==False]
# Decision Tree Regressor for finding the discontinuities
regr_1 = DecisionTreeRegressor(max_depth=step_count)
if self.time_point == None:
regr_1.fit(nts[:,np.newaxis], nys)
else:
# make time_point (a string) into an index
time_point = np.where(tindex==self.time_point)[0][0]
regr_1.fit(nts[(time_point+self.time_interval[0]):(time_point+self.time_interval[1]),np.newaxis],
nys[(time_point+self.time_interval[0]):(time_point+self.time_interval[1])])
r1 = regr_1.predict(nts[:,np.newaxis])
# offset the discontinuity to be continous and fit a single line
# (using median of 5 points on either side of discontinuity)
nys[r1==r1[-1]] += np.median(nys[r1==r1[0]][-5:-1]) - np.median(nys[r1==r1[-1]][0:5])
z3 = np.polyfit(nts, nys, 1)
# make the data into a pd series and correctly index
x3 = pd.Series(data=nys-(z3[0]*nts+z3[1]),index=tindex[reind==False])
x3 = x3.reindex(tindex)
# and then use that to update in place
data.update(x3) | 0.736211 | 0.637905 |
from collections import OrderedDict
from skdiscovery.data_structure.framework.base import PipelineItem
from skdiscovery.utilities.patterns.image_tools import divideIntoSquares
import numpy as np
class TileImage(PipelineItem):
'''
Create several smaller images from a larger image
'''
def __init__(self, str_description, ap_paramList, size, min_deviation=None, min_fraction=None, deviation_as_percent=False):
'''
Initialize TileImage item
@param str_description: String description of item
@param ap_paramList[stride]: Distance between neighboring tiles
@param size: Size of tile (length of one side of a square)
@param min_deviation = Minimum deviation to use when determining to keep tile
@param min_fraction: Minimum fraction of pixels above min_deviation needed to keep tile
@param deviation_as_percent: Treat min_deviation as a percentage of the max value of the original image
'''
if deviation_as_percent and min_deviation is None:
raise RuntimeError('Must supply min_deviation when deviation_as_percent is True')
self.size = size
self._min_deviation = min_deviation
self._min_fraction = min_fraction
self._deviation_as_percent = deviation_as_percent
super(TileImage, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Genrate new images by tiling input images
@param obj_data: Input image wrapper
'''
stride = self.ap_paramList[0]()
if len(self.ap_paramList) > 1:
threshold_function = self.ap_paramList[1]()
else:
threshold_function = None
if threshold_function is not None and self._min_fraction is None:
raise RuntimeError('Must supply min_fraction with threshold function')
if threshold_function is not None and self._min_deviation is not None:
raise RuntimeError('Cannot supply both min_deviation and threshold function')
results = OrderedDict()
metadata = OrderedDict()
for label, data in obj_data.getIterator():
extents, patches = divideIntoSquares(data, self.size, stride)
if self._deviation_as_percent:
min_deviation = self._min_deviation * np.max(data)
else:
min_deviation = self._min_deviation
if self._min_fraction is not None:
if min_deviation is not None:
valid_index = np.count_nonzero(np.abs(patches) < min_deviation, axis=(1,2)) / np.prod(patches.shape[1:]) > self._min_fraction
else:
threshold = threshold_function(np.abs(data))
threshold_data = np.abs(patches.copy())
threshold_data[threshold_data < threshold] = np.nan
valid_index = np.count_nonzero(~np.isnan(threshold_data), axis=(1,2)) / np.prod(patches.shape[1:]) > self._min_fraction
patches = patches[valid_index]
extents = extents[valid_index]
try:
metadata[label] = obj_data.info(label)
except TypeError:
pass
for index in range(0,patches.shape[0]):
new_label = label + ', Square: ' + str(index)
results[new_label] = patches[index, ...]
metadata[new_label] = OrderedDict()
metadata[new_label]['extent'] = extents[index,...]
obj_data.update(results)
obj_data.updateMetadata(metadata) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/image/generate/tile_image.py | tile_image.py |
from collections import OrderedDict
from skdiscovery.data_structure.framework.base import PipelineItem
from skdiscovery.utilities.patterns.image_tools import divideIntoSquares
import numpy as np
class TileImage(PipelineItem):
'''
Create several smaller images from a larger image
'''
def __init__(self, str_description, ap_paramList, size, min_deviation=None, min_fraction=None, deviation_as_percent=False):
'''
Initialize TileImage item
@param str_description: String description of item
@param ap_paramList[stride]: Distance between neighboring tiles
@param size: Size of tile (length of one side of a square)
@param min_deviation = Minimum deviation to use when determining to keep tile
@param min_fraction: Minimum fraction of pixels above min_deviation needed to keep tile
@param deviation_as_percent: Treat min_deviation as a percentage of the max value of the original image
'''
if deviation_as_percent and min_deviation is None:
raise RuntimeError('Must supply min_deviation when deviation_as_percent is True')
self.size = size
self._min_deviation = min_deviation
self._min_fraction = min_fraction
self._deviation_as_percent = deviation_as_percent
super(TileImage, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Genrate new images by tiling input images
@param obj_data: Input image wrapper
'''
stride = self.ap_paramList[0]()
if len(self.ap_paramList) > 1:
threshold_function = self.ap_paramList[1]()
else:
threshold_function = None
if threshold_function is not None and self._min_fraction is None:
raise RuntimeError('Must supply min_fraction with threshold function')
if threshold_function is not None and self._min_deviation is not None:
raise RuntimeError('Cannot supply both min_deviation and threshold function')
results = OrderedDict()
metadata = OrderedDict()
for label, data in obj_data.getIterator():
extents, patches = divideIntoSquares(data, self.size, stride)
if self._deviation_as_percent:
min_deviation = self._min_deviation * np.max(data)
else:
min_deviation = self._min_deviation
if self._min_fraction is not None:
if min_deviation is not None:
valid_index = np.count_nonzero(np.abs(patches) < min_deviation, axis=(1,2)) / np.prod(patches.shape[1:]) > self._min_fraction
else:
threshold = threshold_function(np.abs(data))
threshold_data = np.abs(patches.copy())
threshold_data[threshold_data < threshold] = np.nan
valid_index = np.count_nonzero(~np.isnan(threshold_data), axis=(1,2)) / np.prod(patches.shape[1:]) > self._min_fraction
patches = patches[valid_index]
extents = extents[valid_index]
try:
metadata[label] = obj_data.info(label)
except TypeError:
pass
for index in range(0,patches.shape[0]):
new_label = label + ', Square: ' + str(index)
results[new_label] = patches[index, ...]
metadata[new_label] = OrderedDict()
metadata[new_label]['extent'] = extents[index,...]
obj_data.update(results)
obj_data.updateMetadata(metadata) | 0.77223 | 0.371507 |
from skdiscovery.data_structure.framework.base import PipelineItem
from skdiscovery.data_structure.framework import DiscoveryPipeline
from skdiscovery.data_structure.generic.accumulators import DataAccumulator
from skdiscovery.data_structure.table.filters import CalibrateGRACE, Resample, CalibrateGRACEMascon
from skdiscovery.data_structure.framework.stagecontainers import *
from skdaccess.framework.param_class import *
from skdaccess.geo.grace import DataFetcher as GDF
from skdaccess.geo.grace.mascon.cache import DataFetcher as MasconDF
from skdaccess.geo.gldas import DataFetcher as GLDASDF
import numpy as np
class GraceFusion(PipelineItem):
'''
Fuses GRACE equivelent water depth time series
Works on table data (original data from http://grace.jpl.nasa.gov/data/get-data/monthly-mass-grids-land/)
'''
def __init__(self, str_description, ap_paramList, metadata, column_data_name = 'Grace', column_error_name = 'Grace_Uncertainty'):
'''
Initialize Grace Fusion item
@param str_description: String describing item
@param ap_paramList[gldas]: How to use of the global land data assimilation water model
@param ap_paramList[mascons]: Boolean indicating if the mascon solution should be used
@param ap_paramList[apply_scale_factor]: Boolean indicating if the scaling factors shoud be applied
@param metadata: Metadata that contains lat,lon coordinates based on data labels
@param column_data_name: Name of column for GRACE data
@param column_error_name: Grace Uncertainty column name
'''
super(GraceFusion, self).__init__(str_description, ap_paramList)
self.metadata = metadata.copy()
self.column_data_name = column_data_name
self.column_error_name = column_error_name
# remove_sm_and_snow
self._tileCache = None
def process(self, obj_data):
'''
Adds columns for GRACE data and uncertainties
@param obj_data: Input DataWrapper, will be modified in place
'''
# Only perform fusion if data exists
gldas = self.ap_paramList[0]()
use_mascons = self.ap_paramList[1]()
apply_scale_factor = self.ap_paramList[2]()
if obj_data.getLength() > 0:
start_date = None
end_date = None
for label, data in obj_data.getIterator():
try:
lat = self.metadata[label]['Lat']
lon = self.metadata[label]['Lon']
except:
lat = self.metadata.loc[label,'Lat']
lon = self.metadata.loc[label,'Lon']
locations = [(lat,lon)]
if start_date == None:
start_date = data.index[0]
end_date = data.index[-1]
else:
if start_date != data.index[0] \
or end_date != data.index[-1]:
raise RuntimeError("Varying starting and ending dates not supported")
al_locations = AutoList(locations)
al_locations_gldas = AutoList(locations)
if use_mascons == False:
graceDF = GDF([al_locations], start_date, end_date)
else:
graceDF = MasconDF([al_locations], start_date, end_date)
gldasDF = GLDASDF([al_locations_gldas], start_date, end_date)
def getData(datafetcher, pipe_type):
ac_data = DataAccumulator('Data',[])
sc_data = StageContainer(ac_data)
fl_grace = CalibrateGRACE('Calibrate', apply_scale_factor = apply_scale_factor)
sc_grace = StageContainer(fl_grace)
fl_mascon = CalibrateGRACEMascon('CalibrateMascon', apply_scale_factor = apply_scale_factor)
sc_mascon = StageContainer(fl_mascon)
fl_resample = Resample('Resample',start_date, end_date)
sc_resample = StageContainer(fl_resample)
if pipe_type == 'grace':
pipeline = DiscoveryPipeline(datafetcher, [sc_grace, sc_resample, sc_data])
elif pipe_type == 'mascon':
pipeline = DiscoveryPipeline(datafetcher, [sc_mascon, sc_resample, sc_data])
elif pipe_type == 'gldas':
pipeline = DiscoveryPipeline(datafetcher, [sc_resample, sc_data])
else:
raise RuntimeError('pipe_type: ' + str(pipe_type) + ' not understood')
pipeline.run(num_cores=1)
key = list(pipeline.getResults(0)['Data'].keys())[0]
return pipeline.getResults(0)['Data'][key]
# Load GRACE data
if use_mascons == False:
grace_data = getData(graceDF, 'grace')
else:
grace_data = getData(graceDF, 'mascon')
if gldas.lower() == 'off':
# We are not removing sm and snow
obj_data.addColumn(label, self.column_data_name, grace_data['EWD'])
obj_data.addColumn(label, self.column_error_name, grace_data['EWD_Error'])
elif gldas.lower() == 'remove':
# If we are removing sm and snow
gldas_data = getData(gldasDF, 'gldas')
grace = grace_data['Data']
gldas = gldas_data['Data']
grace_index = grace.index
grace.dropna(inplace=True)
# If no grace data available, no need to remove gldas
if len(grace) == 0:
continue
# Get matching start and end
start_grace = grace.index[0]
end_grace = grace.index[-1]
start_gldas = gldas.index[0]
end_gldas = gldas.index[-1]
start_month = np.max([start_gldas,start_grace]).strftime('%Y-%m')
end_month = np.min([end_gldas,end_grace]).strftime('%Y-%m')
# Convert gldas to a data frame
# and save index
# gldas = gldas.loc[:,:,'GLDAS'].copy()
gldas.loc[:,'Date'] = gldas.index
# Index GLDAS data by month
new_index = [date.strftime('%Y-%m') for date in gldas.index]
gldas.loc[:,'Month'] = new_index
gldas.set_index('Month',inplace=True)
# select only months that are also in GRACE
cut_gldas = gldas.loc[[date.strftime('%Y-%m') for date in grace.loc[start_month:end_month,:].index],:]
# index GLDAS data to GRACE dates
cut_gldas.loc[:,'Grace Index'] = grace.loc[start_month:end_month,:].index
cut_gldas.set_index('Grace Index', inplace=True)
# Calculate distance between days
offset_days = cut_gldas.index - cut_gldas.loc[:,'Date']
offset_days = offset_days.apply(lambda t: t.days)
cut_gldas.loc[:,'Offset'] = offset_days
# Remove any data where the difference between gldas and grace are > 10 days
cut_gldas = cut_gldas[np.abs(cut_gldas.loc[:,'Offset']) < 10].copy()
# Select appropriate Grace Data
cut_grace = grace.loc[cut_gldas.index,:]
# Remove contribution of snow + sm to GRACE
cut_grace.loc[:,'Grace'] = cut_grace.loc[:,'Grace'] - cut_gldas['GLDAS']
# Now restore to original index, filling in with NaN's
grace = cut_grace.reindex(grace_index)
# index, place the result back into the
grace_data[key]['Data'] = grace
# All the snow and sm contribution has been removed,
# so the dictionary can now be returned
obj_data.addColumn(label, self.column_data_name, grace_data['EWD'])
obj_data.addColumn(label, self.column_error_name, grace_data['EWD_Error'])
elif gldas.lower() == 'only':
obj_data.addColumn(label, self.column_data_name, ['EWD'])
else:
raise ValueError('Did not understand gldas option: ' + gldas.lower()) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/fusion/grace.py | grace.py |
from skdiscovery.data_structure.framework.base import PipelineItem
from skdiscovery.data_structure.framework import DiscoveryPipeline
from skdiscovery.data_structure.generic.accumulators import DataAccumulator
from skdiscovery.data_structure.table.filters import CalibrateGRACE, Resample, CalibrateGRACEMascon
from skdiscovery.data_structure.framework.stagecontainers import *
from skdaccess.framework.param_class import *
from skdaccess.geo.grace import DataFetcher as GDF
from skdaccess.geo.grace.mascon.cache import DataFetcher as MasconDF
from skdaccess.geo.gldas import DataFetcher as GLDASDF
import numpy as np
class GraceFusion(PipelineItem):
'''
Fuses GRACE equivelent water depth time series
Works on table data (original data from http://grace.jpl.nasa.gov/data/get-data/monthly-mass-grids-land/)
'''
def __init__(self, str_description, ap_paramList, metadata, column_data_name = 'Grace', column_error_name = 'Grace_Uncertainty'):
'''
Initialize Grace Fusion item
@param str_description: String describing item
@param ap_paramList[gldas]: How to use of the global land data assimilation water model
@param ap_paramList[mascons]: Boolean indicating if the mascon solution should be used
@param ap_paramList[apply_scale_factor]: Boolean indicating if the scaling factors shoud be applied
@param metadata: Metadata that contains lat,lon coordinates based on data labels
@param column_data_name: Name of column for GRACE data
@param column_error_name: Grace Uncertainty column name
'''
super(GraceFusion, self).__init__(str_description, ap_paramList)
self.metadata = metadata.copy()
self.column_data_name = column_data_name
self.column_error_name = column_error_name
# remove_sm_and_snow
self._tileCache = None
def process(self, obj_data):
'''
Adds columns for GRACE data and uncertainties
@param obj_data: Input DataWrapper, will be modified in place
'''
# Only perform fusion if data exists
gldas = self.ap_paramList[0]()
use_mascons = self.ap_paramList[1]()
apply_scale_factor = self.ap_paramList[2]()
if obj_data.getLength() > 0:
start_date = None
end_date = None
for label, data in obj_data.getIterator():
try:
lat = self.metadata[label]['Lat']
lon = self.metadata[label]['Lon']
except:
lat = self.metadata.loc[label,'Lat']
lon = self.metadata.loc[label,'Lon']
locations = [(lat,lon)]
if start_date == None:
start_date = data.index[0]
end_date = data.index[-1]
else:
if start_date != data.index[0] \
or end_date != data.index[-1]:
raise RuntimeError("Varying starting and ending dates not supported")
al_locations = AutoList(locations)
al_locations_gldas = AutoList(locations)
if use_mascons == False:
graceDF = GDF([al_locations], start_date, end_date)
else:
graceDF = MasconDF([al_locations], start_date, end_date)
gldasDF = GLDASDF([al_locations_gldas], start_date, end_date)
def getData(datafetcher, pipe_type):
ac_data = DataAccumulator('Data',[])
sc_data = StageContainer(ac_data)
fl_grace = CalibrateGRACE('Calibrate', apply_scale_factor = apply_scale_factor)
sc_grace = StageContainer(fl_grace)
fl_mascon = CalibrateGRACEMascon('CalibrateMascon', apply_scale_factor = apply_scale_factor)
sc_mascon = StageContainer(fl_mascon)
fl_resample = Resample('Resample',start_date, end_date)
sc_resample = StageContainer(fl_resample)
if pipe_type == 'grace':
pipeline = DiscoveryPipeline(datafetcher, [sc_grace, sc_resample, sc_data])
elif pipe_type == 'mascon':
pipeline = DiscoveryPipeline(datafetcher, [sc_mascon, sc_resample, sc_data])
elif pipe_type == 'gldas':
pipeline = DiscoveryPipeline(datafetcher, [sc_resample, sc_data])
else:
raise RuntimeError('pipe_type: ' + str(pipe_type) + ' not understood')
pipeline.run(num_cores=1)
key = list(pipeline.getResults(0)['Data'].keys())[0]
return pipeline.getResults(0)['Data'][key]
# Load GRACE data
if use_mascons == False:
grace_data = getData(graceDF, 'grace')
else:
grace_data = getData(graceDF, 'mascon')
if gldas.lower() == 'off':
# We are not removing sm and snow
obj_data.addColumn(label, self.column_data_name, grace_data['EWD'])
obj_data.addColumn(label, self.column_error_name, grace_data['EWD_Error'])
elif gldas.lower() == 'remove':
# If we are removing sm and snow
gldas_data = getData(gldasDF, 'gldas')
grace = grace_data['Data']
gldas = gldas_data['Data']
grace_index = grace.index
grace.dropna(inplace=True)
# If no grace data available, no need to remove gldas
if len(grace) == 0:
continue
# Get matching start and end
start_grace = grace.index[0]
end_grace = grace.index[-1]
start_gldas = gldas.index[0]
end_gldas = gldas.index[-1]
start_month = np.max([start_gldas,start_grace]).strftime('%Y-%m')
end_month = np.min([end_gldas,end_grace]).strftime('%Y-%m')
# Convert gldas to a data frame
# and save index
# gldas = gldas.loc[:,:,'GLDAS'].copy()
gldas.loc[:,'Date'] = gldas.index
# Index GLDAS data by month
new_index = [date.strftime('%Y-%m') for date in gldas.index]
gldas.loc[:,'Month'] = new_index
gldas.set_index('Month',inplace=True)
# select only months that are also in GRACE
cut_gldas = gldas.loc[[date.strftime('%Y-%m') for date in grace.loc[start_month:end_month,:].index],:]
# index GLDAS data to GRACE dates
cut_gldas.loc[:,'Grace Index'] = grace.loc[start_month:end_month,:].index
cut_gldas.set_index('Grace Index', inplace=True)
# Calculate distance between days
offset_days = cut_gldas.index - cut_gldas.loc[:,'Date']
offset_days = offset_days.apply(lambda t: t.days)
cut_gldas.loc[:,'Offset'] = offset_days
# Remove any data where the difference between gldas and grace are > 10 days
cut_gldas = cut_gldas[np.abs(cut_gldas.loc[:,'Offset']) < 10].copy()
# Select appropriate Grace Data
cut_grace = grace.loc[cut_gldas.index,:]
# Remove contribution of snow + sm to GRACE
cut_grace.loc[:,'Grace'] = cut_grace.loc[:,'Grace'] - cut_gldas['GLDAS']
# Now restore to original index, filling in with NaN's
grace = cut_grace.reindex(grace_index)
# index, place the result back into the
grace_data[key]['Data'] = grace
# All the snow and sm contribution has been removed,
# so the dictionary can now be returned
obj_data.addColumn(label, self.column_data_name, grace_data['EWD'])
obj_data.addColumn(label, self.column_error_name, grace_data['EWD_Error'])
elif gldas.lower() == 'only':
obj_data.addColumn(label, self.column_data_name, ['EWD'])
else:
raise ValueError('Did not understand gldas option: ' + gldas.lower()) | 0.675122 | 0.286063 |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
class General_Component_Analysis(PipelineItem):
'''
Performs a general component analysis on table data.
Currently, the two built-in types of analysis are either ICA or PCA.
'''
def __init__(self, str_description, ap_paramList, n_components, column_names, **kwargs):
'''
Initialize Analysis object
@param str_description: String description of analysis
@param ap_paramList[component_type]: Type of CA; either PCA or ICA
@param ap_paramList[start_time]: Starting time for CA
@param ap_paramList[end_time]: ending time for CA
@param n_components: Number of components to compute
@param column_names: Columns names to use
@param kwargs: Extra keyword arguments to pass on to ICA (ignored for PCA)
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = ['component_type','start_time','end_time']
self.n_components = n_components
self.column_names = column_names
self.kwargs = kwargs
self.results = dict()
def process(self, obj_data):
'''
Perform component analysis on data
Results are added to the data wrapper as a dictionary with
results['CA'] = Eigenvenctors
results['Projection'] = Projection on to the eigenvectors
@param obj_data: Data wrapper
'''
component_type = self.ap_paramList[0]()
start_time = self.ap_paramList[1]()
end_time = self.ap_paramList[2]()
num_components = self.n_components
results = dict()
results['start_date'] = start_time
results['end_date'] = end_time
cut_data = []
label_list = []
for label, data in obj_data.getIterator():
for column in self.column_names:
cut_data.append(data.loc[start_time:end_time, column])
label_list.append(label)
cut_data = np.array(cut_data)
if len(cut_data) > 0:
if component_type == 'ICA' :
ca = FastICA(n_components = num_components, **self.kwargs)
else:
ca = PCA(n_components = num_components)
time_projection = ca.fit_transform(cut_data.T)
results['CA'] = ca
results['Projection'] = time_projection
else:
results['CA'] = None
results['Projection'] = None
results['labels'] = label_list
obj_data.addResult(self.str_description, results) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/gca.py | gca.py |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
class General_Component_Analysis(PipelineItem):
'''
Performs a general component analysis on table data.
Currently, the two built-in types of analysis are either ICA or PCA.
'''
def __init__(self, str_description, ap_paramList, n_components, column_names, **kwargs):
'''
Initialize Analysis object
@param str_description: String description of analysis
@param ap_paramList[component_type]: Type of CA; either PCA or ICA
@param ap_paramList[start_time]: Starting time for CA
@param ap_paramList[end_time]: ending time for CA
@param n_components: Number of components to compute
@param column_names: Columns names to use
@param kwargs: Extra keyword arguments to pass on to ICA (ignored for PCA)
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = ['component_type','start_time','end_time']
self.n_components = n_components
self.column_names = column_names
self.kwargs = kwargs
self.results = dict()
def process(self, obj_data):
'''
Perform component analysis on data
Results are added to the data wrapper as a dictionary with
results['CA'] = Eigenvenctors
results['Projection'] = Projection on to the eigenvectors
@param obj_data: Data wrapper
'''
component_type = self.ap_paramList[0]()
start_time = self.ap_paramList[1]()
end_time = self.ap_paramList[2]()
num_components = self.n_components
results = dict()
results['start_date'] = start_time
results['end_date'] = end_time
cut_data = []
label_list = []
for label, data in obj_data.getIterator():
for column in self.column_names:
cut_data.append(data.loc[start_time:end_time, column])
label_list.append(label)
cut_data = np.array(cut_data)
if len(cut_data) > 0:
if component_type == 'ICA' :
ca = FastICA(n_components = num_components, **self.kwargs)
else:
ca = PCA(n_components = num_components)
time_projection = ca.fit_transform(cut_data.T)
results['CA'] = ca
results['Projection'] = time_projection
else:
results['CA'] = None
results['Projection'] = None
results['labels'] = label_list
obj_data.addResult(self.str_description, results) | 0.847021 | 0.349089 |
# 3rd part imports
import numpy as np
import pandas as pd
from scipy.optimize import brute
from fastdtw import fastdtw
# scikit discovery imports
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools as tt
# Standard library imports
from collections import OrderedDict
class RotatePCA(PipelineItem):
"""
*** In Development *** Class for rotating PCA to seperate superimposed signals
"""
def __init__(self, str_description, ap_paramList, pca_name, model, norm=None, num_components=3):
'''
@param str_description: String description of this item
@param ap_paramList[fit_type]: Fitness test to use (either 'dtw' or 'remove')
@param ap_paramList[resolution]: Fitting resolution when using brute force
@param pca_name: Name of pca results
@param model: Model to compare to (used in dtw)
@param norm: Normalization to use when comparing data and model (if None, absolute differences are used)
@param num_components: Number of pca components to use
'''
self._pca_name = pca_name
self._model = tt.normalize(model)
self.norm = norm
if num_components not in (3,4):
raise NotImplementedError('Only 3 or 4 components implemented')
self.num_components = num_components
super(RotatePCA, self).__init__(str_description, ap_paramList)
def _rotate(self, col_vector, az, ay, ax):
'''
Rotate column vectors in three dimensions
Rx * Ry * Rz * col_vectors
@param col_vector: Data as a column vector
@param az: Z angle
@param ay: Y angle
@param ax: X angle
@return rotated column vectors
'''
rz = np.array([[np.cos(az), -np.sin(az), 0], [np.sin(az), np.cos(az), 0], [0, 0, 1]])
ry = np.array([[np.cos(ay), 0, np.sin(ay)], [0, 1, 0], [-np.sin(ay), 0, np.cos(ay)]])
rx = np.array([[ 1, 0, 0], [0, np.cos(ax), -np.sin(ax)], [0, np.sin(ax), np.cos(ax)]])
rot = rx @ ry @ rz
return rot @ col_vector
def _rotate4d(self, col_vector, rot_angles):
'''
Rotate column vectors in four dimensions
@param col_vector: Data as a column vector
@param rot_angles: Rotation angles ('xy', 'yz', 'zx', 'xw', 'yw', 'zw')
@return rotated column vectors
'''
index_list = []
index_list.append([0,1])
index_list.append([1,2])
index_list.append([0,2])
index_list.append([0,3])
index_list.append([1,3])
index_list.append([2,3])
# Two different types:
# left sine is negative: Type 0
# right sine is negative: Type 1
type_list = [0, 0, 1, 0, 1, 1]
rotation_dict = OrderedDict()
# The order of the rotation matrix is as follows:
# (see https://hollasch.github.io/ray4/Four-Space_Visualization_of_4D_Objects.html#s2.2)
label_list = ['xy', 'yz', 'zx', 'xw', 'yw', 'zw']
for angle, label, index, negative_type in zip(rot_angles, label_list, index_list, type_list):
ct = np.cos(angle)
st = np.sin(angle)
rotation_matrix = np.eye(4)
rotation_matrix[index[0], index[0]] = ct
rotation_matrix[index[1], index[1]] = ct
rotation_matrix[index[0], index[1]] = st
rotation_matrix[index[1], index[0]] = st
if negative_type == 0:
rotation_matrix[index[1], index[0]] *= -1
elif negative_type == 1:
rotation_matrix[index[0], index[1]] *= -1
else:
raise RuntimeError('Invalid value of negative_type')
rotation_dict[label]=rotation_matrix
rot_matrix = np.eye(4)
for label, matrix in rotation_dict.items():
rot_matrix = rot_matrix @ matrix
return rot_matrix @ col_vector
def _rollFastDTW(self, data, centered_tiled_model, model_size):
'''
Compute minimum fastdtw distance for a model to match length of real data at all possible phases
@param data: Real input data
@param centered_tiled_model: Model after being tiled to appropriate length and normalized (mean removed an scaled by standard devation)
@param model_size: Size of the original model (before tiling)
@return Index of minimum distance, minimum distance
'''
centered_data = tt.normalize(data)
fitness_values = [fastdtw(centered_data, np.roll(centered_tiled_model, i), dist=self.norm)[0] for i in range(model_size)]
min_index = np.argmin(fitness_values)
return min_index, fitness_values[min_index]
def _tileModel(self, in_model, new_size):
'''
Tile a model to increase its length
@param in_model: Input model
@param new_size: Size of tiled model
@return Tiled model
'''
num_models = int(np.ceil(new_size / len(in_model)))
return np.tile(in_model, num_models)[:new_size]
def _fitness(self, z, data, model, fit_type = 'dtw', num_components=3):
'''
Compute fitness of data given a model and rotation
@param z: Rotation angles
@param data: Input data
@param model: Input model
@param fit_type: Choose fitness computation between dynamic time warping ('dtw') or
by comparing to an seasonal and linear signal ('remove')
@param num_components: Number of pca components to use. Can be 3 or 4 for fit_type='dtw'
or 3 for fit_type='remove'
@return fitness value
'''
if num_components == 3:
new_data = self._rotate(data.as_matrix().T, *z)
elif num_components == 4:
new_data = self._rotate4d(data.as_matrix().T, z)
if fit_type == 'dtw':
return self._fitnessDTW(new_data, model, num_components)
elif fit_type == 'remove' and num_components == 3:
return self._fitnessRemove(pd.DataFrame(new_data.T, columns=['PC1','PC2','PC3'],
index=data.index))
elif fit_type == 'remove':
raise NotImplementedError("The 'remove' fitness type only works with 3 components")
else:
raise NotImplementedError('Only "dtw" and "remove" fitness types implemented')
def _fitnessDTW(self, new_data, model, num_components=3):
'''
Compute fitness value using dynamic time warping
@param new_data: Input data
@param model: Input model
@param: Number of pca components to use (3 or 4)
@return fitness value using dynamic time warping
'''
tiled_model = tt.normalize(self._tileModel(model, new_data.shape[1]))
roll, primary_results = self._rollFastDTW(new_data[num_components-1,:], tiled_model, len(model))
# pc1_results = np.min([fastdtw(tt.normalize(new_data[0,:]), np.roll(tiled_model, roll))[0],
# fastdtw(-tt.normalize(-new_data[0,:]), np.roll(tiled_model, roll))[0]])
# pc2_results = np.min([fastdtw(tt.normalize(new_data[1,:]), np.roll(tiled_model, roll))[0],
# fastdtw(tt.normalize(-new_data[1,:]), np.roll(tiled_model, roll))[0]])
other_pc_results = 0
for i in range(num_components-1):
other_pc_results += self._rollFastDTW(new_data[i,:], tiled_model, len(model))[1]
return primary_results - other_pc_results
def _fitnessRemove(self, new_data):
'''
fitness value determined by how well seasonal and linear signals can be removed frm first two components
@param new_data: Input data
@return fitness value determined by comparison of first two components to seasonal and linear signals
'''
linear_removed = tt.getTrend(new_data['PC1'].asfreq('D'))[0]
annual_removed = tt.sinuFits(new_data['PC2'].asfreq('D'), 1, 1)
return linear_removed.var() + annual_removed.var()
def process(self, obj_data):
'''
Compute rotation angles for PCA
@param obj_data: Input table data wrapper
'''
fit_type = self.ap_paramList[0]()
resolution = self.ap_paramList[1]()
pca_results = obj_data.getResults()[self._pca_name]
date_range = pd.date_range(pca_results['start_date'], pca_results['end_date'])
column_names = ['PC' + str(i+1) for i in range(pca_results['CA'].n_components)]
pca = pd.DataFrame(data = pca_results['Projection'], index = date_range, columns=column_names)
pca.index.name='Date'
pca = pca.loc[:,['PC' + str(i+1) for i in range(self.num_components)]]
end_point = 360 - (360/resolution)
if self.num_components == 3:
num_ranges = 3
elif self.num_components == 4:
num_ranges = 4
else:
raise ValueError('Wrong number of components')
ranges = []
for i in range(num_ranges):
ranges.append((0, np.deg2rad(end_point)))
new_angles = brute(func=self._fitness,
ranges=ranges,
Ns=resolution,
args=(pca, self._model, fit_type, self.num_components))
final_score = self._fitness(new_angles, pca, self._model, fit_type, self.num_components)
rotated_pcs = pd.DataFrame(self._rotate(pca.T, *new_angles).T, index=pca.index, columns = pca.columns)
results = OrderedDict()
results['rotation_angles'] = new_angles
results['rotated_pcs'] = rotated_pcs
results['final_score'] = final_score
results['rotated_components'] = self._rotate(pca_results['CA'].components_, *new_angles)
obj_data.addResult(self.str_description, results) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/rotate_pca.py | rotate_pca.py |
# 3rd part imports
import numpy as np
import pandas as pd
from scipy.optimize import brute
from fastdtw import fastdtw
# scikit discovery imports
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools as tt
# Standard library imports
from collections import OrderedDict
class RotatePCA(PipelineItem):
"""
*** In Development *** Class for rotating PCA to seperate superimposed signals
"""
def __init__(self, str_description, ap_paramList, pca_name, model, norm=None, num_components=3):
'''
@param str_description: String description of this item
@param ap_paramList[fit_type]: Fitness test to use (either 'dtw' or 'remove')
@param ap_paramList[resolution]: Fitting resolution when using brute force
@param pca_name: Name of pca results
@param model: Model to compare to (used in dtw)
@param norm: Normalization to use when comparing data and model (if None, absolute differences are used)
@param num_components: Number of pca components to use
'''
self._pca_name = pca_name
self._model = tt.normalize(model)
self.norm = norm
if num_components not in (3,4):
raise NotImplementedError('Only 3 or 4 components implemented')
self.num_components = num_components
super(RotatePCA, self).__init__(str_description, ap_paramList)
def _rotate(self, col_vector, az, ay, ax):
'''
Rotate column vectors in three dimensions
Rx * Ry * Rz * col_vectors
@param col_vector: Data as a column vector
@param az: Z angle
@param ay: Y angle
@param ax: X angle
@return rotated column vectors
'''
rz = np.array([[np.cos(az), -np.sin(az), 0], [np.sin(az), np.cos(az), 0], [0, 0, 1]])
ry = np.array([[np.cos(ay), 0, np.sin(ay)], [0, 1, 0], [-np.sin(ay), 0, np.cos(ay)]])
rx = np.array([[ 1, 0, 0], [0, np.cos(ax), -np.sin(ax)], [0, np.sin(ax), np.cos(ax)]])
rot = rx @ ry @ rz
return rot @ col_vector
def _rotate4d(self, col_vector, rot_angles):
'''
Rotate column vectors in four dimensions
@param col_vector: Data as a column vector
@param rot_angles: Rotation angles ('xy', 'yz', 'zx', 'xw', 'yw', 'zw')
@return rotated column vectors
'''
index_list = []
index_list.append([0,1])
index_list.append([1,2])
index_list.append([0,2])
index_list.append([0,3])
index_list.append([1,3])
index_list.append([2,3])
# Two different types:
# left sine is negative: Type 0
# right sine is negative: Type 1
type_list = [0, 0, 1, 0, 1, 1]
rotation_dict = OrderedDict()
# The order of the rotation matrix is as follows:
# (see https://hollasch.github.io/ray4/Four-Space_Visualization_of_4D_Objects.html#s2.2)
label_list = ['xy', 'yz', 'zx', 'xw', 'yw', 'zw']
for angle, label, index, negative_type in zip(rot_angles, label_list, index_list, type_list):
ct = np.cos(angle)
st = np.sin(angle)
rotation_matrix = np.eye(4)
rotation_matrix[index[0], index[0]] = ct
rotation_matrix[index[1], index[1]] = ct
rotation_matrix[index[0], index[1]] = st
rotation_matrix[index[1], index[0]] = st
if negative_type == 0:
rotation_matrix[index[1], index[0]] *= -1
elif negative_type == 1:
rotation_matrix[index[0], index[1]] *= -1
else:
raise RuntimeError('Invalid value of negative_type')
rotation_dict[label]=rotation_matrix
rot_matrix = np.eye(4)
for label, matrix in rotation_dict.items():
rot_matrix = rot_matrix @ matrix
return rot_matrix @ col_vector
def _rollFastDTW(self, data, centered_tiled_model, model_size):
'''
Compute minimum fastdtw distance for a model to match length of real data at all possible phases
@param data: Real input data
@param centered_tiled_model: Model after being tiled to appropriate length and normalized (mean removed an scaled by standard devation)
@param model_size: Size of the original model (before tiling)
@return Index of minimum distance, minimum distance
'''
centered_data = tt.normalize(data)
fitness_values = [fastdtw(centered_data, np.roll(centered_tiled_model, i), dist=self.norm)[0] for i in range(model_size)]
min_index = np.argmin(fitness_values)
return min_index, fitness_values[min_index]
def _tileModel(self, in_model, new_size):
'''
Tile a model to increase its length
@param in_model: Input model
@param new_size: Size of tiled model
@return Tiled model
'''
num_models = int(np.ceil(new_size / len(in_model)))
return np.tile(in_model, num_models)[:new_size]
def _fitness(self, z, data, model, fit_type = 'dtw', num_components=3):
'''
Compute fitness of data given a model and rotation
@param z: Rotation angles
@param data: Input data
@param model: Input model
@param fit_type: Choose fitness computation between dynamic time warping ('dtw') or
by comparing to an seasonal and linear signal ('remove')
@param num_components: Number of pca components to use. Can be 3 or 4 for fit_type='dtw'
or 3 for fit_type='remove'
@return fitness value
'''
if num_components == 3:
new_data = self._rotate(data.as_matrix().T, *z)
elif num_components == 4:
new_data = self._rotate4d(data.as_matrix().T, z)
if fit_type == 'dtw':
return self._fitnessDTW(new_data, model, num_components)
elif fit_type == 'remove' and num_components == 3:
return self._fitnessRemove(pd.DataFrame(new_data.T, columns=['PC1','PC2','PC3'],
index=data.index))
elif fit_type == 'remove':
raise NotImplementedError("The 'remove' fitness type only works with 3 components")
else:
raise NotImplementedError('Only "dtw" and "remove" fitness types implemented')
def _fitnessDTW(self, new_data, model, num_components=3):
'''
Compute fitness value using dynamic time warping
@param new_data: Input data
@param model: Input model
@param: Number of pca components to use (3 or 4)
@return fitness value using dynamic time warping
'''
tiled_model = tt.normalize(self._tileModel(model, new_data.shape[1]))
roll, primary_results = self._rollFastDTW(new_data[num_components-1,:], tiled_model, len(model))
# pc1_results = np.min([fastdtw(tt.normalize(new_data[0,:]), np.roll(tiled_model, roll))[0],
# fastdtw(-tt.normalize(-new_data[0,:]), np.roll(tiled_model, roll))[0]])
# pc2_results = np.min([fastdtw(tt.normalize(new_data[1,:]), np.roll(tiled_model, roll))[0],
# fastdtw(tt.normalize(-new_data[1,:]), np.roll(tiled_model, roll))[0]])
other_pc_results = 0
for i in range(num_components-1):
other_pc_results += self._rollFastDTW(new_data[i,:], tiled_model, len(model))[1]
return primary_results - other_pc_results
def _fitnessRemove(self, new_data):
'''
fitness value determined by how well seasonal and linear signals can be removed frm first two components
@param new_data: Input data
@return fitness value determined by comparison of first two components to seasonal and linear signals
'''
linear_removed = tt.getTrend(new_data['PC1'].asfreq('D'))[0]
annual_removed = tt.sinuFits(new_data['PC2'].asfreq('D'), 1, 1)
return linear_removed.var() + annual_removed.var()
def process(self, obj_data):
'''
Compute rotation angles for PCA
@param obj_data: Input table data wrapper
'''
fit_type = self.ap_paramList[0]()
resolution = self.ap_paramList[1]()
pca_results = obj_data.getResults()[self._pca_name]
date_range = pd.date_range(pca_results['start_date'], pca_results['end_date'])
column_names = ['PC' + str(i+1) for i in range(pca_results['CA'].n_components)]
pca = pd.DataFrame(data = pca_results['Projection'], index = date_range, columns=column_names)
pca.index.name='Date'
pca = pca.loc[:,['PC' + str(i+1) for i in range(self.num_components)]]
end_point = 360 - (360/resolution)
if self.num_components == 3:
num_ranges = 3
elif self.num_components == 4:
num_ranges = 4
else:
raise ValueError('Wrong number of components')
ranges = []
for i in range(num_ranges):
ranges.append((0, np.deg2rad(end_point)))
new_angles = brute(func=self._fitness,
ranges=ranges,
Ns=resolution,
args=(pca, self._model, fit_type, self.num_components))
final_score = self._fitness(new_angles, pca, self._model, fit_type, self.num_components)
rotated_pcs = pd.DataFrame(self._rotate(pca.T, *new_angles).T, index=pca.index, columns = pca.columns)
results = OrderedDict()
results['rotation_angles'] = new_angles
results['rotated_pcs'] = rotated_pcs
results['final_score'] = final_score
results['rotated_components'] = self._rotate(pca_results['CA'].components_, *new_angles)
obj_data.addResult(self.str_description, results) | 0.761006 | 0.525125 |
from skdiscovery.data_structure.framework.base import PipelineItem
import numpy as np
import pandas as pd
from statsmodels.robust import mad
class MIDAS(PipelineItem):
'''
*In Development* A basic MIDAS trend estimator
See http://onlinelibrary.wiley.com/doi/10.1002/2015JB012552/full
'''
def __init__(self, str_description,column_names = None):
'''
Initiatlize the MIDAS filtering item
@param str_description: String description of filter
@param column_names: List of column names to analyze
'''
super(MIDAS, self).__init__(str_description, [])
self.column_names = column_names
def process(self, obj_data):
'''
Apply the MIDAS estimator to generate velocity estimates
Adds the result to the data wrapper
@param obj_data: Data wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
time_diff = pd.to_timedelta('365d')
results = dict()
for label, data in obj_data.getIterator():
start_date = data.index[0]
end_date = data.index[-1]
for column in column_names:
start_data = data.loc[start_date:(end_date-time_diff), column]
end_data = data.loc[start_date+time_diff:end_date, column]
offsets = end_data.values - start_data.values
offsets = offsets[~np.isnan(offsets)]
med_off = np.median(offsets)
mad_off = mad(offsets)
cut_offsets = offsets[np.logical_and(offsets < med_off + 2*mad_off,
offsets > med_off - 2*mad_off)]
final_vel = np.median(cut_offsets)
final_unc = np.sqrt(np.pi/2) * mad(cut_offsets) / np.sqrt(len(cut_offsets))
results[label] = pd.DataFrame([final_vel,final_unc], ['velocity', 'uncertainty'] ,[column])
obj_data.addResult(self.str_description, pd.Panel.fromDict(results,orient='minor')) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/midas.py | midas.py |
from skdiscovery.data_structure.framework.base import PipelineItem
import numpy as np
import pandas as pd
from statsmodels.robust import mad
class MIDAS(PipelineItem):
'''
*In Development* A basic MIDAS trend estimator
See http://onlinelibrary.wiley.com/doi/10.1002/2015JB012552/full
'''
def __init__(self, str_description,column_names = None):
'''
Initiatlize the MIDAS filtering item
@param str_description: String description of filter
@param column_names: List of column names to analyze
'''
super(MIDAS, self).__init__(str_description, [])
self.column_names = column_names
def process(self, obj_data):
'''
Apply the MIDAS estimator to generate velocity estimates
Adds the result to the data wrapper
@param obj_data: Data wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
time_diff = pd.to_timedelta('365d')
results = dict()
for label, data in obj_data.getIterator():
start_date = data.index[0]
end_date = data.index[-1]
for column in column_names:
start_data = data.loc[start_date:(end_date-time_diff), column]
end_data = data.loc[start_date+time_diff:end_date, column]
offsets = end_data.values - start_data.values
offsets = offsets[~np.isnan(offsets)]
med_off = np.median(offsets)
mad_off = mad(offsets)
cut_offsets = offsets[np.logical_and(offsets < med_off + 2*mad_off,
offsets > med_off - 2*mad_off)]
final_vel = np.median(cut_offsets)
final_unc = np.sqrt(np.pi/2) * mad(cut_offsets) / np.sqrt(len(cut_offsets))
results[label] = pd.DataFrame([final_vel,final_unc], ['velocity', 'uncertainty'] ,[column])
obj_data.addResult(self.str_description, pd.Panel.fromDict(results,orient='minor')) | 0.841109 | 0.362743 |
from skdiscovery.data_structure.framework import PipelineItem
import pandas as pd
import numpy as np
class Correlate(PipelineItem):
'''
Computes the correlation for table data and stores the result as a matrix.
'''
def __init__(self, str_description, column_names = None, local_match = False, correlation_type = 'pearson'):
'''
Initialize Correlate analysis item for use on tables
@param str_description: String describing analysis item
@param column_names: List of column names to correlate
@param local_match: Only correlate data on the same frames
@param correlation_type: Type of correlation to be passed to pandas ('pearson', 'kendall', 'spearman')
'''
super(Correlate, self).__init__(str_description,[])
self.column_names = column_names
self.local_match = local_match
self.corr_type = correlation_type
def process(self, obj_data):
'''
Computes the correlation between columns and stores the results in obj_data
@param obj_data: Data wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
if self.local_match == False:
data = []
index = []
for label, data_in in obj_data.getIterator():
for column in column_names:
data.append(data_in[column])
index.append(label + '.' + column)
index = np.array(index)
result = []
for s1 in data:
row = []
for s2 in data:
row.append(s1.corr(s2, method=self.corr_type))
result.append(row)
obj_data.addResult(self.str_description, pd.DataFrame(result, index=index, columns=index))
else:
full_results = dict()
for label, data_in in obj_data.getIterator():
data = []
index = []
for column in column_names:
data.append(data_in[column])
index.append(column)
result = []
for s1 in data:
row = []
for s2 in data:
row.append(s1.corr(s2, method=self.corr_type))
result.append(row)
full_results[label] = pd.DataFrame(result,index=index,columns=index)
obj_data.addResult(self.str_description, pd.Panel.from_dict(full_results)) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/correlate.py | correlate.py |
from skdiscovery.data_structure.framework import PipelineItem
import pandas as pd
import numpy as np
class Correlate(PipelineItem):
'''
Computes the correlation for table data and stores the result as a matrix.
'''
def __init__(self, str_description, column_names = None, local_match = False, correlation_type = 'pearson'):
'''
Initialize Correlate analysis item for use on tables
@param str_description: String describing analysis item
@param column_names: List of column names to correlate
@param local_match: Only correlate data on the same frames
@param correlation_type: Type of correlation to be passed to pandas ('pearson', 'kendall', 'spearman')
'''
super(Correlate, self).__init__(str_description,[])
self.column_names = column_names
self.local_match = local_match
self.corr_type = correlation_type
def process(self, obj_data):
'''
Computes the correlation between columns and stores the results in obj_data
@param obj_data: Data wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
if self.local_match == False:
data = []
index = []
for label, data_in in obj_data.getIterator():
for column in column_names:
data.append(data_in[column])
index.append(label + '.' + column)
index = np.array(index)
result = []
for s1 in data:
row = []
for s2 in data:
row.append(s1.corr(s2, method=self.corr_type))
result.append(row)
obj_data.addResult(self.str_description, pd.DataFrame(result, index=index, columns=index))
else:
full_results = dict()
for label, data_in in obj_data.getIterator():
data = []
index = []
for column in column_names:
data.append(data_in[column])
index.append(column)
result = []
for s1 in data:
row = []
for s2 in data:
row.append(s1.corr(s2, method=self.corr_type))
result.append(row)
full_results[label] = pd.DataFrame(result,index=index,columns=index)
obj_data.addResult(self.str_description, pd.Panel.from_dict(full_results)) | 0.708414 | 0.39158 |
import collections
import numpy as np
import scipy.optimize as optimize
import skdaccess.utilities.pbo_util as pbo_utils
from skdiscovery.data_structure.framework import PipelineItem
import skdiscovery.utilities.patterns.pbo_tools as pbo_tools
from skdiscovery.utilities.patterns.pbo_tools import SourceWrapper, MogiVectors
class Mogi_Inversion(PipelineItem):
'''
Perform a mogi source inversion on a set of gps table data
The source is assumed to be a mogi source (point source), but other source models can be selected.
Assumes directions are named ('dN', 'dE', 'dU').
'''
def __init__(self, str_description, ap_paramList, pca_name, column_names=['dN', 'dE', 'dU']):
'''
Initialize Mogi analysis item
@param str_description: Description of item
@param ap_paramList[source_type]: Type of magma chamber source model to use (default-mogi,finite_sphere,closed_pipe,constant_open_pipe,rising_open_pipe,sill)
@param pca_name: Name of pca result
@param column_names: The data direction column names
'''
self.pca_name = pca_name
self.column_names = column_names
super(Mogi_Inversion, self).__init__(str_description, ap_paramList)
self.ap_paramNames = ['source_type']
def FitPCA(self, hPCA_Proj):
'''
Determine the timing of the inflation event from the first component of the pca projection
fits A * arctan( (t - t0) / c ) + B to the first pca projection, in order to estimate
source amplitude parameters
@param hPCA_Proj: The sklearn PCA
@return ct: the t0, c, and B parameters from the fit
@return pA[0]: the fitted amplitude parameter
'''
fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3]
errfunc = lambda p,x,y: fitfunc(p,x) - y
dLen = len(hPCA_Proj[:,0])
pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0]))
ct = pA[1:3]
return ct, pA[0]
def FitTimeSeries(self, pd_series, ct):
'''
Fits the amplitude and offset of an inflation event given the time and length of the event
Fits A and B in A * arctan( (t - t0) / c) + B
@param pd_series: Time series to be fit
@param ct: the time constants for the arctan
@return res: Amplitude of the fit
@return perr_leastsq: Error of the fit
'''
fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1]
errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y
dLen = len(pd_series)
pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series))
# res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0]
res = pA[0]*np.pi
s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2)
pcov = pcov * s_sq
error = []
for i in range(len(pA)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
perr_leastsq = np.array(error)
return res, perr_leastsq
def process(self, obj_data):
'''
Finds the magma source (default-mogi) from PBO GPS data.
Assumes time series columns are named ('dN', 'dE', 'dU'). Predicts the
location of the magma source using scipy.optimize.curve_fit
The result is added to the data wrapper as a list, with the four
elements describing the location of the magma source:
res[0] = latitude
res[1] = longitude
res[2] = source depth (km)
res[3] = volume change (meters^3)
@param obj_data:
'''
h_pca_name = self.pca_name
exN = {'mogi':0,'finite_sphere':1,'closed_pipe':1,'constant_open_pipe':1,'rising_open_pipe':1,'sill':0}
try:
mag_source = getattr(pbo_tools,self.ap_paramList[0]().lower())
ExScParams = tuple(np.ones((exN[self.ap_paramList[0]().lower()],)))
except:
mag_source = pbo_tools.mogi
ExScParams = ()
print('No source type called '+self.ap_paramList[0]()+', defaulting to a Mogi source.')
wrapped_mag_source = SourceWrapper(mag_source)
projection = obj_data.getResults()[h_pca_name]['Projection']
start_date = obj_data.getResults()[h_pca_name]['start_date']
end_date = obj_data.getResults()[h_pca_name]['end_date']
ct, pca_amp = self.FitPCA(projection)
pca_amp *= np.pi
xvs = []
yvs = []
zvs = []
label_list = []
for label, data in obj_data.getIterator():
label_list.append(label)
for column in self.column_names:
distance,f_error = self.FitTimeSeries(data.loc[start_date:end_date,column], ct)
if column == self.column_names[1]:
xvs.append(distance)
elif column == self.column_names[0]:
yvs.append(distance)
elif column == self.column_names[2]:
zvs.append(distance)
else:
print('Ignoring column: ', label)
xvs = np.array(xvs)*1e-6
yvs = np.array(yvs)*1e-6
zvs = np.array(zvs)*1e-6
ydata = np.hstack((xvs, yvs,zvs)).T
station_list = obj_data.get().keys()
meta_data = obj_data.info()
station_coords = pbo_utils.getStationCoords(meta_data, station_list)
dimensions = ('x','y','z')
xdata = []
for dim in dimensions:
for coord in station_coords:
xdata.append((dim, coord[0], coord[1]))
coord_range = np.array(pbo_utils.getLatLonRange(meta_data, station_list))
lat_guess = np.mean(coord_range[0,:])
lon_guess = np.mean(coord_range[1,:])
fit = optimize.curve_fit(wrapped_mag_source, xdata, ydata, (lat_guess, lon_guess, 5, 1e-4)+ExScParams)
res = collections.OrderedDict()
res['lat'] = fit[0][0]
res['lon'] = fit[0][1]
res['depth'] = fit[0][2]
res['amplitude'] = fit[0][3]
res['labels'] = label_list
if len(fit[0])>4:
res['ex_params'] = fit[0][4:]
else:
res['ex_params'] = np.nan
res['pca_amplitude'] = pca_amp
res['source_type'] = self.ap_paramList[0]().lower()
obj_data.addResult(self.str_description, res)
# lat_fit_range = (np.min(lat_list)-0.15, np.max(lat_list)+0.15)
# lon_fit_range = (np.min(lon_list)-0.15, np.max(lon_list)+0.15)
# res = optimize.brute(self.mogi, (lat_fit_range, lon_fit_range,
# (1,10), (1e-5, 1e-3)),
# args = (xvs*1e-6, yvs*1e-6, zvs*1e-6,
# station_list, meta_data)) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/analysis/mogi.py | mogi.py |
import collections
import numpy as np
import scipy.optimize as optimize
import skdaccess.utilities.pbo_util as pbo_utils
from skdiscovery.data_structure.framework import PipelineItem
import skdiscovery.utilities.patterns.pbo_tools as pbo_tools
from skdiscovery.utilities.patterns.pbo_tools import SourceWrapper, MogiVectors
class Mogi_Inversion(PipelineItem):
'''
Perform a mogi source inversion on a set of gps table data
The source is assumed to be a mogi source (point source), but other source models can be selected.
Assumes directions are named ('dN', 'dE', 'dU').
'''
def __init__(self, str_description, ap_paramList, pca_name, column_names=['dN', 'dE', 'dU']):
'''
Initialize Mogi analysis item
@param str_description: Description of item
@param ap_paramList[source_type]: Type of magma chamber source model to use (default-mogi,finite_sphere,closed_pipe,constant_open_pipe,rising_open_pipe,sill)
@param pca_name: Name of pca result
@param column_names: The data direction column names
'''
self.pca_name = pca_name
self.column_names = column_names
super(Mogi_Inversion, self).__init__(str_description, ap_paramList)
self.ap_paramNames = ['source_type']
def FitPCA(self, hPCA_Proj):
'''
Determine the timing of the inflation event from the first component of the pca projection
fits A * arctan( (t - t0) / c ) + B to the first pca projection, in order to estimate
source amplitude parameters
@param hPCA_Proj: The sklearn PCA
@return ct: the t0, c, and B parameters from the fit
@return pA[0]: the fitted amplitude parameter
'''
fitfunc = lambda p,t: p[0]*np.arctan((t-p[1])/p[2])+p[3]
errfunc = lambda p,x,y: fitfunc(p,x) - y
dLen = len(hPCA_Proj[:,0])
pA, success = optimize.leastsq(errfunc,[1.,dLen/2.,1.,0.],args=(np.arange(dLen),hPCA_Proj[:,0]))
ct = pA[1:3]
return ct, pA[0]
def FitTimeSeries(self, pd_series, ct):
'''
Fits the amplitude and offset of an inflation event given the time and length of the event
Fits A and B in A * arctan( (t - t0) / c) + B
@param pd_series: Time series to be fit
@param ct: the time constants for the arctan
@return res: Amplitude of the fit
@return perr_leastsq: Error of the fit
'''
fitfunc2 = lambda p,c,t: p[0]*np.arctan((t-c[0])/c[1])+p[1]
errfunc2 = lambda p,c,x,y: fitfunc2(p,c,x) - y
dLen = len(pd_series)
pA, pcov = optimize.leastsq(errfunc2,[1.,0.],args=(ct,np.arange(dLen),pd_series))
# res = fitfunc2(pA,ct,np.arange(dLen))[-1]-fitfunc2(pA,ct,np.arange(dLen))[0]
res = pA[0]*np.pi
s_sq = (errfunc2(pA,ct,np.arange(dLen),pd_series)**2).sum()/(len(pd_series)-2)
pcov = pcov * s_sq
error = []
for i in range(len(pA)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
perr_leastsq = np.array(error)
return res, perr_leastsq
def process(self, obj_data):
'''
Finds the magma source (default-mogi) from PBO GPS data.
Assumes time series columns are named ('dN', 'dE', 'dU'). Predicts the
location of the magma source using scipy.optimize.curve_fit
The result is added to the data wrapper as a list, with the four
elements describing the location of the magma source:
res[0] = latitude
res[1] = longitude
res[2] = source depth (km)
res[3] = volume change (meters^3)
@param obj_data:
'''
h_pca_name = self.pca_name
exN = {'mogi':0,'finite_sphere':1,'closed_pipe':1,'constant_open_pipe':1,'rising_open_pipe':1,'sill':0}
try:
mag_source = getattr(pbo_tools,self.ap_paramList[0]().lower())
ExScParams = tuple(np.ones((exN[self.ap_paramList[0]().lower()],)))
except:
mag_source = pbo_tools.mogi
ExScParams = ()
print('No source type called '+self.ap_paramList[0]()+', defaulting to a Mogi source.')
wrapped_mag_source = SourceWrapper(mag_source)
projection = obj_data.getResults()[h_pca_name]['Projection']
start_date = obj_data.getResults()[h_pca_name]['start_date']
end_date = obj_data.getResults()[h_pca_name]['end_date']
ct, pca_amp = self.FitPCA(projection)
pca_amp *= np.pi
xvs = []
yvs = []
zvs = []
label_list = []
for label, data in obj_data.getIterator():
label_list.append(label)
for column in self.column_names:
distance,f_error = self.FitTimeSeries(data.loc[start_date:end_date,column], ct)
if column == self.column_names[1]:
xvs.append(distance)
elif column == self.column_names[0]:
yvs.append(distance)
elif column == self.column_names[2]:
zvs.append(distance)
else:
print('Ignoring column: ', label)
xvs = np.array(xvs)*1e-6
yvs = np.array(yvs)*1e-6
zvs = np.array(zvs)*1e-6
ydata = np.hstack((xvs, yvs,zvs)).T
station_list = obj_data.get().keys()
meta_data = obj_data.info()
station_coords = pbo_utils.getStationCoords(meta_data, station_list)
dimensions = ('x','y','z')
xdata = []
for dim in dimensions:
for coord in station_coords:
xdata.append((dim, coord[0], coord[1]))
coord_range = np.array(pbo_utils.getLatLonRange(meta_data, station_list))
lat_guess = np.mean(coord_range[0,:])
lon_guess = np.mean(coord_range[1,:])
fit = optimize.curve_fit(wrapped_mag_source, xdata, ydata, (lat_guess, lon_guess, 5, 1e-4)+ExScParams)
res = collections.OrderedDict()
res['lat'] = fit[0][0]
res['lon'] = fit[0][1]
res['depth'] = fit[0][2]
res['amplitude'] = fit[0][3]
res['labels'] = label_list
if len(fit[0])>4:
res['ex_params'] = fit[0][4:]
else:
res['ex_params'] = np.nan
res['pca_amplitude'] = pca_amp
res['source_type'] = self.ap_paramList[0]().lower()
obj_data.addResult(self.str_description, res)
# lat_fit_range = (np.min(lat_list)-0.15, np.max(lat_list)+0.15)
# lon_fit_range = (np.min(lon_list)-0.15, np.max(lon_list)+0.15)
# res = optimize.brute(self.mogi, (lat_fit_range, lon_fit_range,
# (1,10), (1e-5, 1e-3)),
# args = (xvs*1e-6, yvs*1e-6, zvs*1e-6,
# station_list, meta_data)) | 0.612657 | 0.453927 |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import matplotlib.pyplot as plt
import math
class Plotter(PipelineItem):
'''
Make a plot of table data
'''
def __init__(self, str_description, column_names=None, error_column_names = None, num_columns = 3, width=13, height=4, columns_together=False,
annotate_column = None, annotate_data = None, xlim = None, ylim = None, **kwargs):
'''
Initialize Plotter
@param str_description: String describing accumulator
@param column_names: Columns to be plot
@param error_column_names Columns containing uncertainties to be plot, no errorbars if None
@param num_columns: Number of columns to use when plotting data
@param width: Total width of all columns combined
@param height: Height of single row of plots
@param columns_together: If true, plot the columns on the same graph
@param annotate_column: Column of annotation data to use for annotation
@param annotate_data: Annotation data
@param xlim: The x limit
@param ylim: The y limit
@param **kwargs: Any additional keyword arguments are passed on to matplotlib
'''
self.xlim = xlim
self.ylim = ylim
self.kwargs = kwargs
self.num_columns = num_columns
self.height = height
self.width = width
self.column_names = column_names
self.annotate_column = annotate_column
self.annotate_data = annotate_data
self.error_column_names = error_column_names
self.columns_together = columns_together
super(Plotter, self).__init__(str_description, [])
def process(self, obj_data):
'''
Plot each column in obj_data
@param obj_data: Data Wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
width = self.width
height = self.height
# Determine total number of figures needed
if self.columns_together == False:
num_figures = obj_data.getLength() * len(column_names)
else:
num_figures = obj_data.getLength()
if num_figures > 0:
# Determine number of rows and height needed to plot all figures
rows = math.ceil( num_figures / self.num_columns)
height *= rows
figure = plt.figure()
figure.set_size_inches(width, height, True)
if self.xlim != None:
plt.xlim(*self.xlim)
if self.ylim != None:
plt.ylim(*self.ylim)
num = 0
# Main loop that iterates over all data
for label, data in obj_data.getIterator():
if self.columns_together == True:
num += 1
# Plotting with errorbars
if self.error_column_names != None:
for column, err_column in zip(column_names, self.error_column_names):
if self.columns_together == False:
num += 1
plt.subplot(rows, self.num_columns, num)
plt.title(label)
plt.ylabel(column)
plt.xticks(rotation=45)
plt.errorbar(np.array(data.index),np.array(data[column]), yerr=np.array(data[err_column]), **self.kwargs)
if self.annotate_column is not None:
try:
for vline in self.annotate_data[label][self.annotate_column]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# print('cannot find info')
elif self.annotate_data is not None:
try:
for vline in self.annotate_data[label]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# print('cannot find info')
# Plotting without errorbars
else:
for column in column_names:
if self.columns_together == False:
num += 1
plt.subplot(rows, self.num_columns, num)
plt.title(label)
plt.ylabel(column)
plt.xticks(rotation=45)
plt.plot(data[column], **self.kwargs)
if self.annotate_column is not None:
try:
for vline in self.annotate_data[label][self.annotate_column]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
elif self.annotate_data is not None:
try:
for vline in self.annotate_data[label]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# Tight layout usually dispalys nicer
plt.tight_layout()
# If run_id is > -1, display run number on figure
if(obj_data.run_id > -1):
figure.suptitle( "Run: " + str(obj_data.run_id), y=1.02) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/accumulators/plotter.py | plotter.py |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import matplotlib.pyplot as plt
import math
class Plotter(PipelineItem):
'''
Make a plot of table data
'''
def __init__(self, str_description, column_names=None, error_column_names = None, num_columns = 3, width=13, height=4, columns_together=False,
annotate_column = None, annotate_data = None, xlim = None, ylim = None, **kwargs):
'''
Initialize Plotter
@param str_description: String describing accumulator
@param column_names: Columns to be plot
@param error_column_names Columns containing uncertainties to be plot, no errorbars if None
@param num_columns: Number of columns to use when plotting data
@param width: Total width of all columns combined
@param height: Height of single row of plots
@param columns_together: If true, plot the columns on the same graph
@param annotate_column: Column of annotation data to use for annotation
@param annotate_data: Annotation data
@param xlim: The x limit
@param ylim: The y limit
@param **kwargs: Any additional keyword arguments are passed on to matplotlib
'''
self.xlim = xlim
self.ylim = ylim
self.kwargs = kwargs
self.num_columns = num_columns
self.height = height
self.width = width
self.column_names = column_names
self.annotate_column = annotate_column
self.annotate_data = annotate_data
self.error_column_names = error_column_names
self.columns_together = columns_together
super(Plotter, self).__init__(str_description, [])
def process(self, obj_data):
'''
Plot each column in obj_data
@param obj_data: Data Wrapper
'''
if self.column_names == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
width = self.width
height = self.height
# Determine total number of figures needed
if self.columns_together == False:
num_figures = obj_data.getLength() * len(column_names)
else:
num_figures = obj_data.getLength()
if num_figures > 0:
# Determine number of rows and height needed to plot all figures
rows = math.ceil( num_figures / self.num_columns)
height *= rows
figure = plt.figure()
figure.set_size_inches(width, height, True)
if self.xlim != None:
plt.xlim(*self.xlim)
if self.ylim != None:
plt.ylim(*self.ylim)
num = 0
# Main loop that iterates over all data
for label, data in obj_data.getIterator():
if self.columns_together == True:
num += 1
# Plotting with errorbars
if self.error_column_names != None:
for column, err_column in zip(column_names, self.error_column_names):
if self.columns_together == False:
num += 1
plt.subplot(rows, self.num_columns, num)
plt.title(label)
plt.ylabel(column)
plt.xticks(rotation=45)
plt.errorbar(np.array(data.index),np.array(data[column]), yerr=np.array(data[err_column]), **self.kwargs)
if self.annotate_column is not None:
try:
for vline in self.annotate_data[label][self.annotate_column]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# print('cannot find info')
elif self.annotate_data is not None:
try:
for vline in self.annotate_data[label]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# print('cannot find info')
# Plotting without errorbars
else:
for column in column_names:
if self.columns_together == False:
num += 1
plt.subplot(rows, self.num_columns, num)
plt.title(label)
plt.ylabel(column)
plt.xticks(rotation=45)
plt.plot(data[column], **self.kwargs)
if self.annotate_column is not None:
try:
for vline in self.annotate_data[label][self.annotate_column]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
elif self.annotate_data is not None:
try:
for vline in self.annotate_data[label]:
plt.axvline(vline,color='black',linewidth=3,alpha=0.5)
except KeyError:
pass
# Tight layout usually dispalys nicer
plt.tight_layout()
# If run_id is > -1, display run number on figure
if(obj_data.run_id > -1):
figure.suptitle( "Run: " + str(obj_data.run_id), y=1.02) | 0.842669 | 0.441312 |
# Framework import
from skdiscovery.data_structure.framework.base import PipelineItem
# 3rd party libraries import
import pandas as pd
class CombineColumns(PipelineItem):
'''
Create a new column by selecting data from a column
Fills in any missing values using a second column
'''
def __init__(self, str_description, column_1, column_2, new_column_name):
'''
Initialize a CombineColumns object
@param str_description: String describing filter
@param column_1: Name of primary column
@param column_2: Name of secondary column to be used
when data from the primary column is not avaiable
@param new_column_name: Name of resulting column
'''
self.column_1 = column_1
self.column_2 = column_2
self.new_column_name = new_column_name
super(CombineColumns,self).__init__(str_description)
def process(self, obj_data):
'''
Apply combine column filter to data set, operating on the data_obj
@param obj_data: Table data wrapper.
'''
for label, data in obj_data.getIterator():
if self.column_1 in data.columns and self.column_2 in data.columns:
# replacing all null median data with mean data
col1_null_index = pd.isnull(data.loc[:,self.column_1])
data.loc[:,self.new_column_name] = data.loc[:,self.column_1]
# Check if there is any replacement data available
if (~pd.isnull(data.loc[col1_null_index, self.column_2])).sum() > 0:
data.loc[col1_null_index, self.new_column_name] = data.loc[col1_null_index, self.column_2]
elif self.column_2 in data.columns and self.column_1 not in data.columns:
data.loc[:,self.new_column_name] = data.loc[:,self.column_2]
elif self.column_2 not in data.columns and self.column_1 in data.columns:
data.loc[:,self.new_column_name] = data.loc[:,self.column_1]
else:
raise KeyError('data needs either "' + self.column_2 + '" or "' + self.column_1 + '" or both') | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/combine_columns.py | combine_columns.py |
# Framework import
from skdiscovery.data_structure.framework.base import PipelineItem
# 3rd party libraries import
import pandas as pd
class CombineColumns(PipelineItem):
'''
Create a new column by selecting data from a column
Fills in any missing values using a second column
'''
def __init__(self, str_description, column_1, column_2, new_column_name):
'''
Initialize a CombineColumns object
@param str_description: String describing filter
@param column_1: Name of primary column
@param column_2: Name of secondary column to be used
when data from the primary column is not avaiable
@param new_column_name: Name of resulting column
'''
self.column_1 = column_1
self.column_2 = column_2
self.new_column_name = new_column_name
super(CombineColumns,self).__init__(str_description)
def process(self, obj_data):
'''
Apply combine column filter to data set, operating on the data_obj
@param obj_data: Table data wrapper.
'''
for label, data in obj_data.getIterator():
if self.column_1 in data.columns and self.column_2 in data.columns:
# replacing all null median data with mean data
col1_null_index = pd.isnull(data.loc[:,self.column_1])
data.loc[:,self.new_column_name] = data.loc[:,self.column_1]
# Check if there is any replacement data available
if (~pd.isnull(data.loc[col1_null_index, self.column_2])).sum() > 0:
data.loc[col1_null_index, self.new_column_name] = data.loc[col1_null_index, self.column_2]
elif self.column_2 in data.columns and self.column_1 not in data.columns:
data.loc[:,self.new_column_name] = data.loc[:,self.column_2]
elif self.column_2 not in data.columns and self.column_1 in data.columns:
data.loc[:,self.new_column_name] = data.loc[:,self.column_1]
else:
raise KeyError('data needs either "' + self.column_2 + '" or "' + self.column_1 + '" or both') | 0.641198 | 0.498047 |
import numpy as np
import pandas as pd
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import kalman_smoother
class KalmanFilter(PipelineItem):
'''
Runs a forward and backward Kalman Smoother with a FOGM state on table data
For more information see: Ji, K. H. 2011, PhD thesis, MIT, and
Fraser, D. C., and Potter, J. E. 1969, IEEE Trans. Automat. Contr., Acl4, 4, 387-390
'''
def __init__(self, str_description, ap_paramList, uncertainty_clip=5, column_names=None,
error_column_names = None, fillna=True):
'''
Initialize Kalman Smoother
@param str_description: String describing filter
@param ap_paramList[ap_tau]: the correlation time
@param ap_paramList[ap_sigmaSq]: the data noise
@param ap_paramList[ap_R]: the process noise
@param uncertainty_clip: Clip data with uncertainties greater than uncertainty_clip * median uncertainty
@param column_names: List of column names to smooth (using None will apply to all columns)
@param error_column_names: List of error column names to smooth (using None will use default error columns)
@param fillna: Fill in missing values
'''
super(KalmanFilter, self).__init__(str_description, ap_paramList)
self.uncertainty_clip = uncertainty_clip
self.ap_paramNames = ['Tau','SigmaSq','R']
self.column_names = column_names
self.error_column_names = error_column_names
self.fillna = fillna
def process(self, obj_data):
'''
Apply kalman smoother to data set
@param obj_data: Input data. Changes are made in place.
'''
uncertainty_clip = self.uncertainty_clip
ap_tau = self.ap_paramList[0]()
ap_sigmaSq = self.ap_paramList[1]()
ap_R = self.ap_paramList[2]()
if self.column_names is None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
if self.error_column_names is None:
error_column_names = obj_data.getDefaultErrorColumns()
else:
error_column_names = self.error_column_names
for label, dataframe in obj_data.getIterator():
for column, error_column in zip(column_names, error_column_names):
data = dataframe.loc[:,column].copy()
err = dataframe.loc[:,error_column].copy()
# Clip data with high uncertainties
data.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# clip = np.nanmedian(err) * uncertainty_clip
err.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# If the beginning is missing data, the smoother will diverge
if np.sum(~np.isnan(data.iloc[:20])) == 0:
data.iloc[:2] = np.nanmedian(data)
if ap_R == 'formal':
R = err
else:
R = ap_R
# Smooth the data
smoothed, variance, t, sigma_sq, R = kalman_smoother.KalmanSmoother(data,
t = ap_tau,
sigma_sq = ap_sigmaSq,
R = R)
# Set uncertainties for missing data to those estimated from
# the filter.
err.loc[pd.isnull(err)] = variance[pd.isnull(err)]
# Calculate the sample variance
T = len(data)
r = np.exp(-1 / t)
sample_var = sigma_sq * (T / (T - 1)) * ( 1 - ((1+r) / (T * (1-r))) + ((2*r*(1-r**T)) / (T**2 * (1-r)**2)))
if self.fillna == True:
obj_data.updateData(label, data.index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, data.index, column, smoothed)
else:
obj_data.updateData(label, dataframe.loc[:,error_column].dropna().index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, dataframe.loc[:,column].dropna().index, column, smoothed)
def _applyKalman(self,label_dataframe,obj_data,run_Params):
column_names = run_Params[0]
error_column_names = run_Params[1]
uncertainty_clip = run_Params[2]
ap_tau = run_Params[3]
ap_sigmaSq = run_Params[4]
ap_R = run_Params[5]
label = label_dataframe[0]
dataframe = label_dataframe[1]
result = {label:dict()}
for column, error_column in zip(column_names, error_column_names):
data = dataframe.loc[:,column].copy()
err = dataframe.loc[:,error_column].copy()
# Clip data with high uncertainties
data.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# clip = np.nanmedian(err) * uncertainty_clip
err.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# If the beginning is missing data, the smoother will diverge
if np.sum(~np.isnan(data.iloc[:20])) == 0:
data.iloc[:2] = np.nanmedian(data)
if ap_R == 'formal':
R = err
else:
R = ap_R
# Smooth the data
smoothed, variance, t, sigma_sq, R = kalman_smoother.KalmanSmoother(data,
t = ap_tau,
sigma_sq = ap_sigmaSq,
R = R)
# Set uncertainties for missing data to those estimated from
# the filter.
err.loc[pd.isnull(err)] = variance[pd.isnull(err)]
# Calculate the sample variance
T = len(data)
r = np.exp(-1 / t)
sample_var = sigma_sq * (T / (T - 1)) * ( 1 - ((1+r) / (T * (1-r))) + ((2*r*(1-r**T)) / (T**2 * (1-r)**2)))
if obj_data != None:
obj_data.updateData(label, data.index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, data.index, column, smoothed)
if obj_data == None:
result[label]['index'] = data.index
result[label][error_column] = np.sqrt(err**2 + sample_var)
result[label][column] = smoothed
if obj_data == None:
return result, label | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/kalman.py | kalman.py |
import numpy as np
import pandas as pd
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import kalman_smoother
class KalmanFilter(PipelineItem):
'''
Runs a forward and backward Kalman Smoother with a FOGM state on table data
For more information see: Ji, K. H. 2011, PhD thesis, MIT, and
Fraser, D. C., and Potter, J. E. 1969, IEEE Trans. Automat. Contr., Acl4, 4, 387-390
'''
def __init__(self, str_description, ap_paramList, uncertainty_clip=5, column_names=None,
error_column_names = None, fillna=True):
'''
Initialize Kalman Smoother
@param str_description: String describing filter
@param ap_paramList[ap_tau]: the correlation time
@param ap_paramList[ap_sigmaSq]: the data noise
@param ap_paramList[ap_R]: the process noise
@param uncertainty_clip: Clip data with uncertainties greater than uncertainty_clip * median uncertainty
@param column_names: List of column names to smooth (using None will apply to all columns)
@param error_column_names: List of error column names to smooth (using None will use default error columns)
@param fillna: Fill in missing values
'''
super(KalmanFilter, self).__init__(str_description, ap_paramList)
self.uncertainty_clip = uncertainty_clip
self.ap_paramNames = ['Tau','SigmaSq','R']
self.column_names = column_names
self.error_column_names = error_column_names
self.fillna = fillna
def process(self, obj_data):
'''
Apply kalman smoother to data set
@param obj_data: Input data. Changes are made in place.
'''
uncertainty_clip = self.uncertainty_clip
ap_tau = self.ap_paramList[0]()
ap_sigmaSq = self.ap_paramList[1]()
ap_R = self.ap_paramList[2]()
if self.column_names is None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.column_names
if self.error_column_names is None:
error_column_names = obj_data.getDefaultErrorColumns()
else:
error_column_names = self.error_column_names
for label, dataframe in obj_data.getIterator():
for column, error_column in zip(column_names, error_column_names):
data = dataframe.loc[:,column].copy()
err = dataframe.loc[:,error_column].copy()
# Clip data with high uncertainties
data.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# clip = np.nanmedian(err) * uncertainty_clip
err.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# If the beginning is missing data, the smoother will diverge
if np.sum(~np.isnan(data.iloc[:20])) == 0:
data.iloc[:2] = np.nanmedian(data)
if ap_R == 'formal':
R = err
else:
R = ap_R
# Smooth the data
smoothed, variance, t, sigma_sq, R = kalman_smoother.KalmanSmoother(data,
t = ap_tau,
sigma_sq = ap_sigmaSq,
R = R)
# Set uncertainties for missing data to those estimated from
# the filter.
err.loc[pd.isnull(err)] = variance[pd.isnull(err)]
# Calculate the sample variance
T = len(data)
r = np.exp(-1 / t)
sample_var = sigma_sq * (T / (T - 1)) * ( 1 - ((1+r) / (T * (1-r))) + ((2*r*(1-r**T)) / (T**2 * (1-r)**2)))
if self.fillna == True:
obj_data.updateData(label, data.index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, data.index, column, smoothed)
else:
obj_data.updateData(label, dataframe.loc[:,error_column].dropna().index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, dataframe.loc[:,column].dropna().index, column, smoothed)
def _applyKalman(self,label_dataframe,obj_data,run_Params):
column_names = run_Params[0]
error_column_names = run_Params[1]
uncertainty_clip = run_Params[2]
ap_tau = run_Params[3]
ap_sigmaSq = run_Params[4]
ap_R = run_Params[5]
label = label_dataframe[0]
dataframe = label_dataframe[1]
result = {label:dict()}
for column, error_column in zip(column_names, error_column_names):
data = dataframe.loc[:,column].copy()
err = dataframe.loc[:,error_column].copy()
# Clip data with high uncertainties
data.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# clip = np.nanmedian(err) * uncertainty_clip
err.loc[np.logical_and(~pd.isnull(err), err > np.nanmedian(err) * uncertainty_clip)] = np.nan
# If the beginning is missing data, the smoother will diverge
if np.sum(~np.isnan(data.iloc[:20])) == 0:
data.iloc[:2] = np.nanmedian(data)
if ap_R == 'formal':
R = err
else:
R = ap_R
# Smooth the data
smoothed, variance, t, sigma_sq, R = kalman_smoother.KalmanSmoother(data,
t = ap_tau,
sigma_sq = ap_sigmaSq,
R = R)
# Set uncertainties for missing data to those estimated from
# the filter.
err.loc[pd.isnull(err)] = variance[pd.isnull(err)]
# Calculate the sample variance
T = len(data)
r = np.exp(-1 / t)
sample_var = sigma_sq * (T / (T - 1)) * ( 1 - ((1+r) / (T * (1-r))) + ((2*r*(1-r**T)) / (T**2 * (1-r)**2)))
if obj_data != None:
obj_data.updateData(label, data.index, error_column, np.sqrt(err**2 + sample_var))
obj_data.updateData(label, data.index, column, smoothed)
if obj_data == None:
result[label]['index'] = data.index
result[label][error_column] = np.sqrt(err**2 + sample_var)
result[label][column] = smoothed
if obj_data == None:
return result, label | 0.688678 | 0.500244 |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
class OffsetDetrend(PipelineItem):
'''
Trend filter that fits a stepwise function to linearly detrended table data
On detrended data this filter fits a stepwise function (number of
steps provided by the user) to correct the linear fit by
accounting for discontinuous offsets, such as due to a change in
the antenna or from an earthquake. The final linear fit handles
each portion of the offset independently. If the number of
discontinuities is not provided as an autoparam, the filter
assumes a single discontinuity.
'''
def __init__(self, str_description, column_names, ap_paramList = [], labels=None, time_point=None, time_interval=None):
'''
Initialize OffsetDetrend filter for use on table data
@param str_description: String describing filter
@param column_names: List of column names to select data to be removed (using None will apply to all columns)
@param ap_paramList[step_count]: Number of steps to remove from data (Default: 1)
@param labels: List of labels used to select data to be removed (using None will apply to all labels)
@param time_point: Time of offset
@param time_interval: Interval within which the offset occurs
'''
self.labels = labels
self.column_names = column_names
self.time_point = time_point
if time_interval == None:
self.time_interval = [-500,500]
else:
if type(time_interval) == int:
self.time_interval = [-time_interval,time_interval]
else:
self.time_interval = time_interval
self.ap_paramNames = ['step_count']
super(OffsetDetrend, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply offset estimation and detrending filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
labels = self.labels
column_names = self.column_names
# user provided number of steps/offsets in the data
step_count = 1
if len(self.ap_paramList) != 0:
step_count = self.ap_paramList[0]()
for label, data in obj_data.getIterator():
for column in column_names:
if (labels is None or label in labels):
# keep track of the time index and the location of nan's
tindex = data.index
reind = np.array(np.isnan(data))
# a temporary time index and data array without nan's
nts = np.arange(len(data))
nts = np.delete(nts,nts[reind])
nys = data[reind==False]
# Decision Tree Regressor for finding the discontinuities
regr_1 = DecisionTreeRegressor(max_depth=step_count)
if self.time_point == None:
regr_1.fit(nts[:,np.newaxis], nys)
else:
# make time_point (a string) into an index
time_point = np.where(tindex==self.time_point)[0][0]
regr_1.fit(nts[(time_point+self.time_interval[0]):(time_point+self.time_interval[1]),np.newaxis],
nys[(time_point+self.time_interval[0]):(time_point+self.time_interval[1])])
r1 = regr_1.predict(nts[:,np.newaxis])
# offset the discontinuity to be continous and fit a single line
# (using median of 5 points on either side of discontinuity)
nys[r1==r1[-1]] += np.median(nys[r1==r1[0]][-5:-1]) - np.median(nys[r1==r1[-1]][0:5])
z3 = np.polyfit(nts, nys, 1)
# make the data into a pd series and correctly index
x3 = pd.Series(data=nys-(z3[0]*nts+z3[1]),index=tindex[reind==False])
x3 = x3.reindex(tindex)
# and then use that to update in place
obj_data.updateData(label, x3.index, column, x3) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/offset_detrend.py | offset_detrend.py |
from skdiscovery.data_structure.framework import PipelineItem
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
class OffsetDetrend(PipelineItem):
'''
Trend filter that fits a stepwise function to linearly detrended table data
On detrended data this filter fits a stepwise function (number of
steps provided by the user) to correct the linear fit by
accounting for discontinuous offsets, such as due to a change in
the antenna or from an earthquake. The final linear fit handles
each portion of the offset independently. If the number of
discontinuities is not provided as an autoparam, the filter
assumes a single discontinuity.
'''
def __init__(self, str_description, column_names, ap_paramList = [], labels=None, time_point=None, time_interval=None):
'''
Initialize OffsetDetrend filter for use on table data
@param str_description: String describing filter
@param column_names: List of column names to select data to be removed (using None will apply to all columns)
@param ap_paramList[step_count]: Number of steps to remove from data (Default: 1)
@param labels: List of labels used to select data to be removed (using None will apply to all labels)
@param time_point: Time of offset
@param time_interval: Interval within which the offset occurs
'''
self.labels = labels
self.column_names = column_names
self.time_point = time_point
if time_interval == None:
self.time_interval = [-500,500]
else:
if type(time_interval) == int:
self.time_interval = [-time_interval,time_interval]
else:
self.time_interval = time_interval
self.ap_paramNames = ['step_count']
super(OffsetDetrend, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply offset estimation and detrending filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
labels = self.labels
column_names = self.column_names
# user provided number of steps/offsets in the data
step_count = 1
if len(self.ap_paramList) != 0:
step_count = self.ap_paramList[0]()
for label, data in obj_data.getIterator():
for column in column_names:
if (labels is None or label in labels):
# keep track of the time index and the location of nan's
tindex = data.index
reind = np.array(np.isnan(data))
# a temporary time index and data array without nan's
nts = np.arange(len(data))
nts = np.delete(nts,nts[reind])
nys = data[reind==False]
# Decision Tree Regressor for finding the discontinuities
regr_1 = DecisionTreeRegressor(max_depth=step_count)
if self.time_point == None:
regr_1.fit(nts[:,np.newaxis], nys)
else:
# make time_point (a string) into an index
time_point = np.where(tindex==self.time_point)[0][0]
regr_1.fit(nts[(time_point+self.time_interval[0]):(time_point+self.time_interval[1]),np.newaxis],
nys[(time_point+self.time_interval[0]):(time_point+self.time_interval[1])])
r1 = regr_1.predict(nts[:,np.newaxis])
# offset the discontinuity to be continous and fit a single line
# (using median of 5 points on either side of discontinuity)
nys[r1==r1[-1]] += np.median(nys[r1==r1[0]][-5:-1]) - np.median(nys[r1==r1[-1]][0:5])
z3 = np.polyfit(nts, nys, 1)
# make the data into a pd series and correctly index
x3 = pd.Series(data=nys-(z3[0]*nts+z3[1]),index=tindex[reind==False])
x3 = x3.reindex(tindex)
# and then use that to update in place
obj_data.updateData(label, x3.index, column, x3) | 0.742422 | 0.626153 |
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools
class MedianFilter(PipelineItem):
'''
A Median filter for table data
'''
def __init__(self, str_description, ap_paramList, interpolate=True,
subtract = False,regular_period=True, min_periods=1):
'''
Initialize MedianFilter
@param str_description: String describing filter
@param ap_paramList[ap_window]: median filter window width
@param interpolate: Interpolate data points before filtering
@param subtract: Subtract filtered result from original
@param regular_period: Assume the data is regularly sampled
@param min_periods: Minimum required number of data points in window
'''
self.interpolate = interpolate
self.subtract = subtract
self.ap_paramNames = ['windowSize']
self.regular_period = regular_period
self.min_periods = min_periods
super(MedianFilter, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply median filter to data set
@param obj_data: Input panda's data series. Changes are made in place.
'''
ap_window = self.ap_paramList[0]()
column_names = obj_data.getDefaultColumns()
for label, data in obj_data.getIterator():
for column in column_names:
if self.interpolate == True or self.regular_period == False:
result = trend_tools.medianFilter(data[column], ap_window, self.interpolate)
else:
result = data[column].rolling(ap_window,min_periods=self.min_periods, center=True).median()
if self.subtract == True:
obj_data.updateData(label, data.index, column, data[column] - result)
else:
obj_data.updateData(label, data.index, column, result) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/median.py | median.py |
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools
class MedianFilter(PipelineItem):
'''
A Median filter for table data
'''
def __init__(self, str_description, ap_paramList, interpolate=True,
subtract = False,regular_period=True, min_periods=1):
'''
Initialize MedianFilter
@param str_description: String describing filter
@param ap_paramList[ap_window]: median filter window width
@param interpolate: Interpolate data points before filtering
@param subtract: Subtract filtered result from original
@param regular_period: Assume the data is regularly sampled
@param min_periods: Minimum required number of data points in window
'''
self.interpolate = interpolate
self.subtract = subtract
self.ap_paramNames = ['windowSize']
self.regular_period = regular_period
self.min_periods = min_periods
super(MedianFilter, self).__init__(str_description, ap_paramList)
def process(self, obj_data):
'''
Apply median filter to data set
@param obj_data: Input panda's data series. Changes are made in place.
'''
ap_window = self.ap_paramList[0]()
column_names = obj_data.getDefaultColumns()
for label, data in obj_data.getIterator():
for column in column_names:
if self.interpolate == True or self.regular_period == False:
result = trend_tools.medianFilter(data[column], ap_window, self.interpolate)
else:
result = data[column].rolling(ap_window,min_periods=self.min_periods, center=True).median()
if self.subtract == True:
obj_data.updateData(label, data.index, column, data[column] - result)
else:
obj_data.updateData(label, data.index, column, result) | 0.897395 | 0.328812 |
from skdiscovery.data_structure.framework.base import PipelineItem
import numpy as np
class WeightedAverage(PipelineItem):
''' This filter performs a rolling weighted average using standard deviations as weight '''
def __init__(self, str_description, ap_paramList, column_names, std_dev_column_names=None, propagate_uncertainties=False):
'''
Initializes a WeightedAverage object
@param str_description: String describing filter
@param ap_paramList[window]: Window to use for computing rolling weighted average
@param column_names: Names of columns to apply the weighted average
@param std_dev_column_names: Names of columns of the standard deviations. If none a regular mean is computed.
@param propagate_uncertainties: Propagate uncertainties assuming uncorrelated errors
'''
super(WeightedAverage,self).__init__(str_description, ap_paramList)
self.column_names = column_names
self.std_dev_column_names = std_dev_column_names
self.propagate_uncertainties = propagate_uncertainties
def process(self, obj_data):
'''
Apply the moving (weighted) average filter to a table data wrapper.n
Changes are made in place.
@param obj_data: Input table data wrapper
'''
window = self.ap_paramList[0]()
for label, data in obj_data.getIterator():
if self.std_dev_column_names != None:
for column, std_dev_column in zip(self.column_names,
self.std_dev_column_names):
weights = 1 / data[std_dev_column]**2
weighted_data = data[column] * weights
scale = weights.rolling(window=window,center=True, min_periods=1).sum()
weighted_average = weighted_data.rolling(window=window, center=True, min_periods=1).sum() / scale
obj_data.updateData(label, weighted_average.index, column,weighted_average)
if self.propagate_uncertainties:
# Uncertainty determined using the standard error propagation technique
# Assumes data is uncorrelated
uncertainty = 1 / np.sqrt(scale)
obj_data.updateData(label, uncertainty.index, std_dev_column, uncertainty)
else:
for column in self.column_names:
weighted_average = data[column].rolling(window=window, center=True, min_periods=1).mean()
obj_data.updateData(label,weighted_average.index,column,weighted_average) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/weighted_average.py | weighted_average.py |
from skdiscovery.data_structure.framework.base import PipelineItem
import numpy as np
class WeightedAverage(PipelineItem):
''' This filter performs a rolling weighted average using standard deviations as weight '''
def __init__(self, str_description, ap_paramList, column_names, std_dev_column_names=None, propagate_uncertainties=False):
'''
Initializes a WeightedAverage object
@param str_description: String describing filter
@param ap_paramList[window]: Window to use for computing rolling weighted average
@param column_names: Names of columns to apply the weighted average
@param std_dev_column_names: Names of columns of the standard deviations. If none a regular mean is computed.
@param propagate_uncertainties: Propagate uncertainties assuming uncorrelated errors
'''
super(WeightedAverage,self).__init__(str_description, ap_paramList)
self.column_names = column_names
self.std_dev_column_names = std_dev_column_names
self.propagate_uncertainties = propagate_uncertainties
def process(self, obj_data):
'''
Apply the moving (weighted) average filter to a table data wrapper.n
Changes are made in place.
@param obj_data: Input table data wrapper
'''
window = self.ap_paramList[0]()
for label, data in obj_data.getIterator():
if self.std_dev_column_names != None:
for column, std_dev_column in zip(self.column_names,
self.std_dev_column_names):
weights = 1 / data[std_dev_column]**2
weighted_data = data[column] * weights
scale = weights.rolling(window=window,center=True, min_periods=1).sum()
weighted_average = weighted_data.rolling(window=window, center=True, min_periods=1).sum() / scale
obj_data.updateData(label, weighted_average.index, column,weighted_average)
if self.propagate_uncertainties:
# Uncertainty determined using the standard error propagation technique
# Assumes data is uncorrelated
uncertainty = 1 / np.sqrt(scale)
obj_data.updateData(label, uncertainty.index, std_dev_column, uncertainty)
else:
for column in self.column_names:
weighted_average = data[column].rolling(window=window, center=True, min_periods=1).mean()
obj_data.updateData(label,weighted_average.index,column,weighted_average) | 0.907093 | 0.369941 |
import pandas as pd
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools
class TrendFilter(PipelineItem):
'''
Trend Filter that removes linear and sinusoidal (annual, semi-annual) trends on series data.
Works on table data
'''
def __init__(self, str_description, ap_paramList, columns = None):
'''
Initialize Trend Filter
@param str_description: String describing filter
@param ap_paramList[list_trendTypes]: List of trend types. List can contain "linear", "annual", or "semiannual"
@param columns: List of column names to filter
'''
super(TrendFilter, self).__init__(str_description, ap_paramList)
self.columns = columns
self.ap_paramNames = ['trend_list']
def process(self, obj_data):
'''
Apply trend filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
if self.columns == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.columns
filter_list = None
if len(self.ap_paramList) != 0:
filter_list = self.ap_paramList[0].val()
for label, dataframe in obj_data.getIterator():
for column in column_names:
data = dataframe.loc[:,column]
good_index = pd.notnull(data)
if good_index.sum() == 0:
continue
if filter_list == None or 'linear' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(trend_tools.getTrend(data)[0], index=data.index)[good_index])
if filter_list == None or 'semiannual' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(trend_tools.sinuFits(data), index=data.index)[good_index])
elif 'annual' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(
trend_tools.sinuFits(data, fitN=1), index=data.index)[good_index]) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/table/filters/trend.py | trend.py |
import pandas as pd
from skdiscovery.data_structure.framework import PipelineItem
from skdiscovery.utilities.patterns import trend_tools
class TrendFilter(PipelineItem):
'''
Trend Filter that removes linear and sinusoidal (annual, semi-annual) trends on series data.
Works on table data
'''
def __init__(self, str_description, ap_paramList, columns = None):
'''
Initialize Trend Filter
@param str_description: String describing filter
@param ap_paramList[list_trendTypes]: List of trend types. List can contain "linear", "annual", or "semiannual"
@param columns: List of column names to filter
'''
super(TrendFilter, self).__init__(str_description, ap_paramList)
self.columns = columns
self.ap_paramNames = ['trend_list']
def process(self, obj_data):
'''
Apply trend filter to data set.
@param obj_data: Input data. Changes are made in place.
'''
if self.columns == None:
column_names = obj_data.getDefaultColumns()
else:
column_names = self.columns
filter_list = None
if len(self.ap_paramList) != 0:
filter_list = self.ap_paramList[0].val()
for label, dataframe in obj_data.getIterator():
for column in column_names:
data = dataframe.loc[:,column]
good_index = pd.notnull(data)
if good_index.sum() == 0:
continue
if filter_list == None or 'linear' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(trend_tools.getTrend(data)[0], index=data.index)[good_index])
if filter_list == None or 'semiannual' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(trend_tools.sinuFits(data), index=data.index)[good_index])
elif 'annual' in filter_list:
obj_data.updateData(label, data.index[good_index], column, pd.Series(
trend_tools.sinuFits(data, fitN=1), index=data.index)[good_index]) | 0.639286 | 0.458652 |
class PipelineItem(object):
'''
The general class used to create pipeline items.
'''
def __init__(self, str_description, ap_paramList=[]):
'''
Initialize an object
@param str_description: String description of filter
@param ap_paramList: List of AutoParam parameters.
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = []
def perturbParams(self):
'''choose other random value for all parameters'''
for param in self.ap_paramList:
param.perturb()
def resetParams(self):
'''set all parameters to initial value'''
for param in self.ap_paramList:
param.reset()
def process(self, obj_data):
'''
The actual filter processing. Empty in this generic filter.
@param obj_data: Data wrapper that will be processed
'''
pass
def __str__(self):
'''
String represntation of object.
@return String listing all currenter parameters
'''
return str([str(p) for p in self.ap_paramList])
def getMetadata(self):
'''
Retrieve metadata about filter
@return String containing the item description and current parameters for filter.
'''
return self.str_description + str([str(p) for p in self.ap_paramList])
class TablePipelineItem(PipelineItem):
"""
Pipeline item for Table data
"""
def __init__(self, str_description, ap_paramList, column_list=None, error_column_list=None):
"""
Initialize Table Pipeline item
@param str_description: String describing filter
@param ap_paramList: List of AutoParams and AutoLists
@param column_list: List of columns to process
@param error_column_list: List of the associated error columns
"""
super(TablePipelineItem, self).__init__(str_description, ap_paramList)
self._column_list = column_list
self._error_column_list = error_column_list
def _getColumns(self, obj_data):
"""
Get the columns that need to be processed
Returns the columns set in this item, otherwise returns
the default columns defined the data wrapper
@param obj_data: Table data wrapper
@return Columns to process
"""
if self._column_list is None:
return obj_data.getDefaultColumns()
else:
return self._column_list
def _getErrorColumns(self, obj_data):
"""
Get the columns that need to be processed
Returns the columns set in this item, otherwise returns
the default columns defined the data wrapper
@param obj_data: Table data wrapper
@return Columns to process
"""
if self._column_list is None:
return obj_data.getDefaultErrorColumns()
else:
return self._error_column_list | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/data_structure/framework/base.py | base.py |
class PipelineItem(object):
'''
The general class used to create pipeline items.
'''
def __init__(self, str_description, ap_paramList=[]):
'''
Initialize an object
@param str_description: String description of filter
@param ap_paramList: List of AutoParam parameters.
'''
self.str_description = str_description
self.ap_paramList = ap_paramList
self.ap_paramNames = []
def perturbParams(self):
'''choose other random value for all parameters'''
for param in self.ap_paramList:
param.perturb()
def resetParams(self):
'''set all parameters to initial value'''
for param in self.ap_paramList:
param.reset()
def process(self, obj_data):
'''
The actual filter processing. Empty in this generic filter.
@param obj_data: Data wrapper that will be processed
'''
pass
def __str__(self):
'''
String represntation of object.
@return String listing all currenter parameters
'''
return str([str(p) for p in self.ap_paramList])
def getMetadata(self):
'''
Retrieve metadata about filter
@return String containing the item description and current parameters for filter.
'''
return self.str_description + str([str(p) for p in self.ap_paramList])
class TablePipelineItem(PipelineItem):
"""
Pipeline item for Table data
"""
def __init__(self, str_description, ap_paramList, column_list=None, error_column_list=None):
"""
Initialize Table Pipeline item
@param str_description: String describing filter
@param ap_paramList: List of AutoParams and AutoLists
@param column_list: List of columns to process
@param error_column_list: List of the associated error columns
"""
super(TablePipelineItem, self).__init__(str_description, ap_paramList)
self._column_list = column_list
self._error_column_list = error_column_list
def _getColumns(self, obj_data):
"""
Get the columns that need to be processed
Returns the columns set in this item, otherwise returns
the default columns defined the data wrapper
@param obj_data: Table data wrapper
@return Columns to process
"""
if self._column_list is None:
return obj_data.getDefaultColumns()
else:
return self._column_list
def _getErrorColumns(self, obj_data):
"""
Get the columns that need to be processed
Returns the columns set in this item, otherwise returns
the default columns defined the data wrapper
@param obj_data: Table data wrapper
@return Columns to process
"""
if self._column_list is None:
return obj_data.getDefaultErrorColumns()
else:
return self._error_column_list | 0.749729 | 0.30243 |
import numpy as np
from shapely.geometry import Polygon, Point
from collections import OrderedDict
def shoelaceArea(in_vertices):
"""
Determine the area of a polygon using the shoelace method
https://en.wikipedia.org/wiki/Shoelace_formula
@param in_vertices: The vertices of a polygon. 2d Array where the first column is the
x coordinates and the second column is the y coordinates
@return: Area of the polygon
"""
x = in_vertices[:,0]
y = in_vertices[:,1]
return 0.5 *(np.sum(x * np.roll(y,shift=-1)) - np.sum(np.roll(x,shift=-1) * y))
def parseBasemapShape(aquifers, aquifers_info):
"""
Create shapely polygons from shapefile read in with basemap
@param aquifers: Data read in shapefile from basemap
@param aquifers_info: Metadata read from shapefile from basemap
@return: Dictionary containing information about shapes and shapely polygon of shapefile data
"""
polygon_data = []
test_list = []
for index,(aquifer,info) in enumerate(zip(aquifers,aquifers_info)):
if shoelaceArea(np.array(aquifer)) < 0:
new_data = OrderedDict()
new_data['shell'] = aquifer
new_data['info'] = info
new_data['holes'] = []
polygon_data.append(new_data)
else:
polygon_data[-1]['holes'].append(aquifer)
for data in polygon_data:
data['polygon'] = Polygon(shell=data['shell'],holes=data['holes'])
return polygon_data
def nearestEdgeDistance(x,y,poly):
"""
Determine the distance to the closest edge of a polygon
@param x: x coordinate
@param y: y coordinate
@param poly: Shapely polygon
@return distance from x,y to nearest edge of the polygon
"""
point = Point(x,y)
ext_dist = poly.exterior.distance(point)
if len(poly.interiors) > 0:
int_dist = np.min([interior.distance(point) for interior in poly.interiors])
return np.min([ext_dist, int_dist])
else:
return ext_dist
def findPolygon(in_data, in_point):
"""
Find the polygon that a point resides in
@param in_data: Input data containing polygons as read in by parseBasemapShape
@param in_point: Shapely point
@return: Index of shape in in_data that contains in_point
"""
result_num = None
for index, data in enumerate(in_data):
if data['polygon'].contains(in_point):
if result_num == None:
result_num = index
else:
raise RuntimeError("Multiple polygons contains point")
if result_num == None:
return -1
return result_num
def getInfo(row, key, fill, polygon_data):
"""
Retrieve information from polygon data:
@param row: Container with key 'ShapeIndex'
@param key: Key of data to retrieve from polygon_data element
@param fill: Value to return if key does not exist in polygon_data element
@param polygon_data: Polygon data as read in by parseBasemapShape
"""
try:
return polygon_data[int(row['ShapeIndex'])]['info'][key]
except KeyError:
return fill
def findClosestPolygonDistance(x,y,polygon_data):
"""
Find the distance to the closest polygon
@param x: x coordinate
@param y: y coordinate
@param polygon_data: Polygon data as read in by parseBasemapShape
@return Distance from x, y to the closest polygon polygon_data
"""
min_dist = np.inf
shape_index = -1
point = Point(x,y)
for index, data in enumerate(polygon_data):
if not data['polygon'].contains(point) and data['info']['AQ_CODE'] != 999:
new_distance = data['polygon'].distance(point)
if new_distance < min_dist:
min_dist = new_distance
shape_index = index
return min_dist, shape_index | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/utilities/patterns/polygon_utils.py | polygon_utils.py | import numpy as np
from shapely.geometry import Polygon, Point
from collections import OrderedDict
def shoelaceArea(in_vertices):
"""
Determine the area of a polygon using the shoelace method
https://en.wikipedia.org/wiki/Shoelace_formula
@param in_vertices: The vertices of a polygon. 2d Array where the first column is the
x coordinates and the second column is the y coordinates
@return: Area of the polygon
"""
x = in_vertices[:,0]
y = in_vertices[:,1]
return 0.5 *(np.sum(x * np.roll(y,shift=-1)) - np.sum(np.roll(x,shift=-1) * y))
def parseBasemapShape(aquifers, aquifers_info):
"""
Create shapely polygons from shapefile read in with basemap
@param aquifers: Data read in shapefile from basemap
@param aquifers_info: Metadata read from shapefile from basemap
@return: Dictionary containing information about shapes and shapely polygon of shapefile data
"""
polygon_data = []
test_list = []
for index,(aquifer,info) in enumerate(zip(aquifers,aquifers_info)):
if shoelaceArea(np.array(aquifer)) < 0:
new_data = OrderedDict()
new_data['shell'] = aquifer
new_data['info'] = info
new_data['holes'] = []
polygon_data.append(new_data)
else:
polygon_data[-1]['holes'].append(aquifer)
for data in polygon_data:
data['polygon'] = Polygon(shell=data['shell'],holes=data['holes'])
return polygon_data
def nearestEdgeDistance(x,y,poly):
"""
Determine the distance to the closest edge of a polygon
@param x: x coordinate
@param y: y coordinate
@param poly: Shapely polygon
@return distance from x,y to nearest edge of the polygon
"""
point = Point(x,y)
ext_dist = poly.exterior.distance(point)
if len(poly.interiors) > 0:
int_dist = np.min([interior.distance(point) for interior in poly.interiors])
return np.min([ext_dist, int_dist])
else:
return ext_dist
def findPolygon(in_data, in_point):
"""
Find the polygon that a point resides in
@param in_data: Input data containing polygons as read in by parseBasemapShape
@param in_point: Shapely point
@return: Index of shape in in_data that contains in_point
"""
result_num = None
for index, data in enumerate(in_data):
if data['polygon'].contains(in_point):
if result_num == None:
result_num = index
else:
raise RuntimeError("Multiple polygons contains point")
if result_num == None:
return -1
return result_num
def getInfo(row, key, fill, polygon_data):
"""
Retrieve information from polygon data:
@param row: Container with key 'ShapeIndex'
@param key: Key of data to retrieve from polygon_data element
@param fill: Value to return if key does not exist in polygon_data element
@param polygon_data: Polygon data as read in by parseBasemapShape
"""
try:
return polygon_data[int(row['ShapeIndex'])]['info'][key]
except KeyError:
return fill
def findClosestPolygonDistance(x,y,polygon_data):
"""
Find the distance to the closest polygon
@param x: x coordinate
@param y: y coordinate
@param polygon_data: Polygon data as read in by parseBasemapShape
@return Distance from x, y to the closest polygon polygon_data
"""
min_dist = np.inf
shape_index = -1
point = Point(x,y)
for index, data in enumerate(polygon_data):
if not data['polygon'].contains(point) and data['info']['AQ_CODE'] != 999:
new_distance = data['polygon'].distance(point)
if new_distance < min_dist:
min_dist = new_distance
shape_index = index
return min_dist, shape_index | 0.758555 | 0.852445 |
import statsmodels.api as sm
import numpy as np
import imreg_dft as ird
import shapely
import scipy as sp
def buildMatchedPoints(in_matches, query_kp, train_kp):
'''
Get postions of matched points
@param in_matches: Input matches
@param query_kp: Query key points
@param train_kp: Training key points
@return Tuple containing the matched query and training positions
'''
query_index = [match.queryIdx for match in in_matches]
train_index = [match.trainIdx for match in in_matches]
sorted_query_kp = [query_kp[i] for i in query_index]
sorted_train_kp = [train_kp[i] for i in train_index]
query_positions = [[kp.pt[0], kp.pt[1]] for kp in sorted_query_kp]
train_positions = [[kp.pt[0], kp.pt[1]] for kp in sorted_train_kp]
return query_positions, train_positions
def scaleImage(input_data, vmin=None, vmax=None):
'''
Scale image values to be within 0 and 255
@param input_data: Input data
@param vmin: Minimum value for scaled data, where smaller values are clipped, defaults to Median - stddev as determined by mad
@param vmax: Maximum value for scaled data, where larger values are clipped, defaults to Median - stddev as determined by mad)
@return input_data scaled to be within 0 and 255 as an 8 bit integer
'''
if vmin==None or vmax==None:
stddev = sm.robust.mad(input_data.ravel())
middle = np.median(input_data.ravel())
if vmin == None:
vmin = middle - 1*stddev
if vmax == None:
vmax = middle + 1*stddev
input_data = input_data.astype(np.float)
input_data[input_data<vmin] = vmin
input_data[input_data>vmax] = vmax
input_data = np.round((input_data - vmin) * 255 / (vmax-vmin)).astype(np.uint8)
return input_data
def divideIntoSquares(image, size, stride):
"""
Create many patches from an image
Will drop any patches that contain NaN's
@param image: Source image
@param size: Size of one side of the square patch
@param stride: Spacing between patches (must be an integer greater than 0)
@return Array containing the extent [x_start, x_end, y_start, y_end] of each patch and an array of the patches
"""
def compute_len(size, stride):
return (size-1) // stride + 1
num_x = compute_len(image.shape[-1]-size, stride)
num_y = compute_len(image.shape[-2]-size, stride)
if image.ndim == 2:
array_data = np.zeros((num_x * num_y, size, size), dtype = image.dtype)
elif image.ndim == 3:
array_data = np.zeros((num_x * num_y, image.shape[0], size, size), dtype = image.dtype)
extent_data = np.zeros((num_x * num_y, 4), dtype = np.int)
index = 0
for x in range(0, image.shape[-1]-size, stride):
for y in range(0, image.shape[-2]-size, stride):
if image.ndim == 2:
cut_box = image[y:y+size, x:x+size]
elif image.ndim == 3:
cut_box = image[:, y:y+size, x:x+size]
array_data[index, ...] = cut_box
extent_data[index, :] = np.array([x, x+size, y, y+size])
index += 1
if image.ndim==2:
valid_index = ~np.any(np.isnan(array_data), axis=(1,2))
else:
valid_index = ~np.any(np.isnan(array_data), axis=(1,2,3))
return extent_data[valid_index], array_data[valid_index]
def generateSquaresAroundPoly(poly, size=100, stride=20):
'''
Generate that may touch a shapely polygon
@param poly: Shapely polygon
@param size: Size of boxes to create
@param stride: Distance between squares
@return list of Shapely squares that may touch input polygon
'''
x_start, x_end = np.min(poly.bounds[0]-size).astype(np.int), np.max(poly.bounds[2]+size).astype(np.int)
y_start, y_end = np.min(poly.bounds[1]-size).astype(np.int), np.max(poly.bounds[3]+size).astype(np.int)
x_coords = np.arange(x_start, x_end+1, stride)
y_coords = np.arange(y_start, y_end+1, stride)
x_mesh, y_mesh = np.meshgrid(x_coords, y_coords)
return [shapely.geometry.box(x, y, x+size, y+size) for x, y in zip(x_mesh.ravel(), y_mesh.ravel())] | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/utilities/patterns/image_tools.py | image_tools.py |
import statsmodels.api as sm
import numpy as np
import imreg_dft as ird
import shapely
import scipy as sp
def buildMatchedPoints(in_matches, query_kp, train_kp):
'''
Get postions of matched points
@param in_matches: Input matches
@param query_kp: Query key points
@param train_kp: Training key points
@return Tuple containing the matched query and training positions
'''
query_index = [match.queryIdx for match in in_matches]
train_index = [match.trainIdx for match in in_matches]
sorted_query_kp = [query_kp[i] for i in query_index]
sorted_train_kp = [train_kp[i] for i in train_index]
query_positions = [[kp.pt[0], kp.pt[1]] for kp in sorted_query_kp]
train_positions = [[kp.pt[0], kp.pt[1]] for kp in sorted_train_kp]
return query_positions, train_positions
def scaleImage(input_data, vmin=None, vmax=None):
'''
Scale image values to be within 0 and 255
@param input_data: Input data
@param vmin: Minimum value for scaled data, where smaller values are clipped, defaults to Median - stddev as determined by mad
@param vmax: Maximum value for scaled data, where larger values are clipped, defaults to Median - stddev as determined by mad)
@return input_data scaled to be within 0 and 255 as an 8 bit integer
'''
if vmin==None or vmax==None:
stddev = sm.robust.mad(input_data.ravel())
middle = np.median(input_data.ravel())
if vmin == None:
vmin = middle - 1*stddev
if vmax == None:
vmax = middle + 1*stddev
input_data = input_data.astype(np.float)
input_data[input_data<vmin] = vmin
input_data[input_data>vmax] = vmax
input_data = np.round((input_data - vmin) * 255 / (vmax-vmin)).astype(np.uint8)
return input_data
def divideIntoSquares(image, size, stride):
"""
Create many patches from an image
Will drop any patches that contain NaN's
@param image: Source image
@param size: Size of one side of the square patch
@param stride: Spacing between patches (must be an integer greater than 0)
@return Array containing the extent [x_start, x_end, y_start, y_end] of each patch and an array of the patches
"""
def compute_len(size, stride):
return (size-1) // stride + 1
num_x = compute_len(image.shape[-1]-size, stride)
num_y = compute_len(image.shape[-2]-size, stride)
if image.ndim == 2:
array_data = np.zeros((num_x * num_y, size, size), dtype = image.dtype)
elif image.ndim == 3:
array_data = np.zeros((num_x * num_y, image.shape[0], size, size), dtype = image.dtype)
extent_data = np.zeros((num_x * num_y, 4), dtype = np.int)
index = 0
for x in range(0, image.shape[-1]-size, stride):
for y in range(0, image.shape[-2]-size, stride):
if image.ndim == 2:
cut_box = image[y:y+size, x:x+size]
elif image.ndim == 3:
cut_box = image[:, y:y+size, x:x+size]
array_data[index, ...] = cut_box
extent_data[index, :] = np.array([x, x+size, y, y+size])
index += 1
if image.ndim==2:
valid_index = ~np.any(np.isnan(array_data), axis=(1,2))
else:
valid_index = ~np.any(np.isnan(array_data), axis=(1,2,3))
return extent_data[valid_index], array_data[valid_index]
def generateSquaresAroundPoly(poly, size=100, stride=20):
'''
Generate that may touch a shapely polygon
@param poly: Shapely polygon
@param size: Size of boxes to create
@param stride: Distance between squares
@return list of Shapely squares that may touch input polygon
'''
x_start, x_end = np.min(poly.bounds[0]-size).astype(np.int), np.max(poly.bounds[2]+size).astype(np.int)
y_start, y_end = np.min(poly.bounds[1]-size).astype(np.int), np.max(poly.bounds[3]+size).astype(np.int)
x_coords = np.arange(x_start, x_end+1, stride)
y_coords = np.arange(y_start, y_end+1, stride)
x_mesh, y_mesh = np.meshgrid(x_coords, y_coords)
return [shapely.geometry.box(x, y, x+size, y+size) for x, y in zip(x_mesh.ravel(), y_mesh.ravel())] | 0.589598 | 0.684679 |
# 3rd party imports
import numpy as np
import pandas as pd
def getPCAComponents(pca_results):
'''
Retrieve PCA components from PCA results
@param pca_results: PCA results from a pipeline run
@return Pandas DataFrame containing the pca components
'''
date_range = pd.date_range(pca_results['start_date'], pca_results['end_date'])
column_names = ['PC' + str(i+1) for i in range(pca_results['CA'].n_components)]
pca = pd.DataFrame(data = pca_results['Projection'], index = date_range, columns=column_names)
pca.index.name='Date'
return pca
def rotate(col_vectors, az, ay, ax):
'''
Rotate col vectors in three dimensions
Rx * Ry * Rz * row_vectors
@param col_vectors: Three dimensional Column vectors
@param az: Z angle
@param ay: Y angle
@param ax: X angle
@return rotated col vectors
'''
rz = np.array([[np.cos(az), -np.sin(az), 0], [np.sin(az), np.cos(az), 0], [0, 0, 1]])
ry = np.array([[np.cos(ay), 0, np.sin(ay)], [0, 1, 0], [-np.sin(ay), 0, np.cos(ay)]])
rx = np.array([[ 1, 0, 0], [0, np.cos(ax), -np.sin(ax)], [0, np.sin(ax), np.cos(ax)]])
rot = rx @ ry @ rz
return rot @ col_vector
def translate(col_vectors, delta_x, delta_y, delta_z):
'''
Translate col vectors by x, y, and z
@param col_vectors: Row vectors of positions
@param delta_x: Amount to translate in the x direction
@param delta_y: Amount to translate in the y direction
@param delta_z: Amount to translate in the y direction
'''
col_vectors = col_vectors.copy()
col_vectors[0,:] += delta_x
col_vectors[1,:] += delta_y
col_vectors[2,:] += delta_z
return col_vectors
def formatColorbarLabels(colorbar, pad=29):
"""
Adjust the labels on a colorbar so they are right aligned
@param colorbar: Input matplotlib colorbar
@param pad: Amount of padding to use
"""
for t in colorbar.ax.get_yticklabels():
t.set_horizontalalignment('right')
colorbar.ax.yaxis.set_tick_params(pad=pad) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/utilities/patterns/general_tools.py | general_tools.py |
# 3rd party imports
import numpy as np
import pandas as pd
def getPCAComponents(pca_results):
'''
Retrieve PCA components from PCA results
@param pca_results: PCA results from a pipeline run
@return Pandas DataFrame containing the pca components
'''
date_range = pd.date_range(pca_results['start_date'], pca_results['end_date'])
column_names = ['PC' + str(i+1) for i in range(pca_results['CA'].n_components)]
pca = pd.DataFrame(data = pca_results['Projection'], index = date_range, columns=column_names)
pca.index.name='Date'
return pca
def rotate(col_vectors, az, ay, ax):
'''
Rotate col vectors in three dimensions
Rx * Ry * Rz * row_vectors
@param col_vectors: Three dimensional Column vectors
@param az: Z angle
@param ay: Y angle
@param ax: X angle
@return rotated col vectors
'''
rz = np.array([[np.cos(az), -np.sin(az), 0], [np.sin(az), np.cos(az), 0], [0, 0, 1]])
ry = np.array([[np.cos(ay), 0, np.sin(ay)], [0, 1, 0], [-np.sin(ay), 0, np.cos(ay)]])
rx = np.array([[ 1, 0, 0], [0, np.cos(ax), -np.sin(ax)], [0, np.sin(ax), np.cos(ax)]])
rot = rx @ ry @ rz
return rot @ col_vector
def translate(col_vectors, delta_x, delta_y, delta_z):
'''
Translate col vectors by x, y, and z
@param col_vectors: Row vectors of positions
@param delta_x: Amount to translate in the x direction
@param delta_y: Amount to translate in the y direction
@param delta_z: Amount to translate in the y direction
'''
col_vectors = col_vectors.copy()
col_vectors[0,:] += delta_x
col_vectors[1,:] += delta_y
col_vectors[2,:] += delta_z
return col_vectors
def formatColorbarLabels(colorbar, pad=29):
"""
Adjust the labels on a colorbar so they are right aligned
@param colorbar: Input matplotlib colorbar
@param pad: Amount of padding to use
"""
for t in colorbar.ax.get_yticklabels():
t.set_horizontalalignment('right')
colorbar.ax.yaxis.set_tick_params(pad=pad) | 0.882326 | 0.52975 |
import socket
import threading
import select
import paramiko
def print_verbose(s, verbose=False):
'''
Print statement if verbose is True
@param s: Statement to print
@param verbose: Print only if verbose is True
'''
if verbose:
print(s)
def handler(chan, host, port, verbose=False):
'''
Handler is responsible for sending and receiving data through ssh tunnel
@param chan: SSH Channel for transferring data
@param host: Address of remote host
@param port: Port to forward
@param verbose: Print status information
'''
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
print_verbose('Forwarding request to %s:%d failed: %r' % (host, port, e), verbose)
return
print_verbose('Connected! Tunnel open %r -> %r -> %r' % (chan.origin_addr,
chan.getpeername(), (host, port)), verbose)
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
print_verbose('Tunnel closed from %r' % (chan.origin_addr,), verbose)
def reverse_forward_tunnel(server_port, remote_host, remote_port, transport, check=30, verbose=False):
'''
Creates a reverse ssh tunnel
@param server_port: Port on local host
@param remote_host: Address of remote host
@param remote_port: Port of remote host
@param transport: SSH Transport
@param check: Amount of time to wait in seconds when opening up a channel
@param verbose: Print status information
@return Thread running reverse ssh tunnel, event used to close ssh tunnel,
list of child threads started by main thread
'''
transport.request_port_forward('', server_port)
event = threading.Event()
child_threads = []
def accept_tunnels(event):
'''
This function spawns new connections as they are needed
@param event: When this event is set, this function will complete
'''
while not event.is_set():
chan = transport.accept(check)
if chan is None:
continue
thr = threading.Thread(target=handler, args=(chan, remote_host, remote_port, verbose))
thr.setDaemon(True)
thr.start()
child_threads.append(thr)
accept_thread = threading.Thread(target=accept_tunnels,args=[event])
accept_thread.setDaemon(True)
accept_thread.start()
return accept_thread, event, child_threads
class ReverseTunnel(object):
'''
Create a reverse ssh tunnel
'''
def __init__(self, server_address, username, key_filename, server_port,
remote_host, remote_port, check=30, verbose=False):
'''
Initialize ReverseTunnel object
@param server_address: Local server address
@param username: Valid username on remote host
@param key_filename: Filename of ssh key associated with remote host
@param server_port: Local port
@param remote_host: Address of remote host
@param remote_port: Remote port
@param check: Amount of time to wait in seconds when opening up a channel
@param verbose: Print status information
'''
self.server_address = server_address
self.username = username
self.key_filename = key_filename
self.server_port = server_port
self.remote_host = remote_host
self.remote_port = remote_port
self.check = check
self.verbose = verbose
self.ssh = None
self.event = None
def create_reverse_tunnel(self):
'''
Create the reverse tunnel
'''
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server_address, username=self.username, key_filename=self.key_filename)
self.running_thread, self.event, self.child_threads = reverse_forward_tunnel(self.server_port, self.remote_host,
self.remote_port, self.ssh.get_transport(),
self.check)
def __del__(self):
'''
Deconstructor
'''
if self.ssh != None:
self.ssh.close()
if self.event != None:
self.event.set() | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/utilities/cloud/ssh_reverse.py | ssh_reverse.py | import socket
import threading
import select
import paramiko
def print_verbose(s, verbose=False):
'''
Print statement if verbose is True
@param s: Statement to print
@param verbose: Print only if verbose is True
'''
if verbose:
print(s)
def handler(chan, host, port, verbose=False):
'''
Handler is responsible for sending and receiving data through ssh tunnel
@param chan: SSH Channel for transferring data
@param host: Address of remote host
@param port: Port to forward
@param verbose: Print status information
'''
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
print_verbose('Forwarding request to %s:%d failed: %r' % (host, port, e), verbose)
return
print_verbose('Connected! Tunnel open %r -> %r -> %r' % (chan.origin_addr,
chan.getpeername(), (host, port)), verbose)
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
print_verbose('Tunnel closed from %r' % (chan.origin_addr,), verbose)
def reverse_forward_tunnel(server_port, remote_host, remote_port, transport, check=30, verbose=False):
'''
Creates a reverse ssh tunnel
@param server_port: Port on local host
@param remote_host: Address of remote host
@param remote_port: Port of remote host
@param transport: SSH Transport
@param check: Amount of time to wait in seconds when opening up a channel
@param verbose: Print status information
@return Thread running reverse ssh tunnel, event used to close ssh tunnel,
list of child threads started by main thread
'''
transport.request_port_forward('', server_port)
event = threading.Event()
child_threads = []
def accept_tunnels(event):
'''
This function spawns new connections as they are needed
@param event: When this event is set, this function will complete
'''
while not event.is_set():
chan = transport.accept(check)
if chan is None:
continue
thr = threading.Thread(target=handler, args=(chan, remote_host, remote_port, verbose))
thr.setDaemon(True)
thr.start()
child_threads.append(thr)
accept_thread = threading.Thread(target=accept_tunnels,args=[event])
accept_thread.setDaemon(True)
accept_thread.start()
return accept_thread, event, child_threads
class ReverseTunnel(object):
'''
Create a reverse ssh tunnel
'''
def __init__(self, server_address, username, key_filename, server_port,
remote_host, remote_port, check=30, verbose=False):
'''
Initialize ReverseTunnel object
@param server_address: Local server address
@param username: Valid username on remote host
@param key_filename: Filename of ssh key associated with remote host
@param server_port: Local port
@param remote_host: Address of remote host
@param remote_port: Remote port
@param check: Amount of time to wait in seconds when opening up a channel
@param verbose: Print status information
'''
self.server_address = server_address
self.username = username
self.key_filename = key_filename
self.server_port = server_port
self.remote_host = remote_host
self.remote_port = remote_port
self.check = check
self.verbose = verbose
self.ssh = None
self.event = None
def create_reverse_tunnel(self):
'''
Create the reverse tunnel
'''
self.ssh = paramiko.SSHClient()
self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh.connect(self.server_address, username=self.username, key_filename=self.key_filename)
self.running_thread, self.event, self.child_threads = reverse_forward_tunnel(self.server_port, self.remote_host,
self.remote_port, self.ssh.get_transport(),
self.check)
def __del__(self):
'''
Deconstructor
'''
if self.ssh != None:
self.ssh.close()
if self.event != None:
self.event.set() | 0.431105 | 0.118717 |
import skdaccess.utilities.pbo_util as pbo_tools
import skdiscovery.data_structure.series.analysis.mogi as mogi
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
def multiCaPlot(pipeline, mogiFlag=False, offset=.15, direction='H',pca_comp=0,scaleFactor=2.5,map_res='i'):
'''
The multiCaPlot function generates a geographic eigenvector plot of several pipeline runs
This function plots multiple pipeline runs over perturbed pipeline
parameters. The various perturbations are plotted more
transparently (alpha=.5), while the median eigen_vector and Mogi
inversion are plotted in solid blue and red
@param pipeline: The pipeline object with multiple runs
@param mogiFlag: Flag to indicate plotting the Mogi source as well as the PCA
@param offset: Offset for padding the corners of the generated map
@param direction: Indicates the eigenvectors to plot. Only Horizontal component is currently supported ('H')
@param pca_comp: Choose the PCA component to use (integer)
@param scaleFactor: Size of the arrow scaling factor
@param map_res: Map data resolution for Basemap ('c', 'i', 'h', 'f', or None)
'''
# as this is a multi_ca_plot function, assumes GPCA
plt.figure();
meta_data = pipeline.data_generator.meta_data
station_list = pipeline.data_generator.station_list
lat_range, lon_range = pbo_tools.getLatLonRange(meta_data, station_list)
coord_list = pbo_tools.getStationCoords(meta_data, station_list)
# Create a map projection of area
bmap = Basemap(llcrnrlat=lat_range[0] - offset, urcrnrlat=lat_range[1] + offset, llcrnrlon=lon_range[0] - offset, urcrnrlon=lon_range[1] + offset,
projection='gnom', lon_0=np.mean(lon_range), lat_0=np.mean(lat_range), resolution=map_res)
# bmap.fillcontinents(color='white')
# bmap.drawmapboundary(fill_color='white')
bmap.drawmapboundary(fill_color='#41BBEC');
bmap.fillcontinents(color='white')
# Draw just coastlines, no lakes
for i,cp in enumerate(bmap.coastpolygons):
if bmap.coastpolygontypes[i]<2:
bmap.plot(cp[0],cp[1],'k-')
parallels = np.arange(np.round(lat_range[0]-offset,decimals=1),np.round(lat_range[1]+offset,decimals=1),.1)
meridians = np.arange(np.round(lon_range[0]-offset,decimals=1),np.round(lon_range[1]+offset,decimals=1),.1)
bmap.drawmeridians(meridians, labels=[0,0,0,1])
bmap.drawparallels(parallels, labels=[1,0,0,0])
# Plot station coords
for coord in coord_list:
bmap.plot(coord[1], coord[0], 'ko', markersize=6, latlon=True,zorder=12)
x,y = bmap(coord[1], coord[0])
plt.text(x+250,y-450,station_list[coord_list.index(coord)],zorder=12)
# loop over each pipeline run
elatmean = np.zeros(len(station_list))
elonmean = np.zeros_like(elatmean)
# check if want to plot Mogi as well
if mogiFlag:
avg_mogi = np.array([0.,0.])
mlatmean = np.zeros_like(elatmean)
mlonmean = np.zeros_like(elatmean)
for nrun in range(len(pipeline.RA_results)):
pca = pipeline.RA_results[nrun]['GPCA']['CA']
station_lat_list, station_lon_list, ev_lat_list, ev_lon_list, dir_sign = pbo_tools.dirEigenvectors(coord_list, pca.components_[pca_comp])
elatmean += ev_lat_list
elonmean += ev_lon_list
# plot each run in light blue
bmap.quiver(station_lon_list, station_lat_list, ev_lon_list, ev_lat_list, latlon=True,
scale = scaleFactor, alpha = .25, color = 'blue',zorder=11)
if mogiFlag:
mogi_res = pipeline.RA_results[nrun]['Mogi']
avg_mogi += np.array([mogi_res['lon'], mogi_res['lat']])
mogi_x_disp, mogi_y_disp = mogi.MogiVectors(mogi_res,station_lat_list,station_lon_list)
mlatmean += mogi_y_disp
mlonmean += mogi_x_disp
bmap.plot(mogi_res['lon'], mogi_res['lat'], "g^", markersize = 10, latlon=True, alpha = .25,zorder=12)
bmap.quiver(station_lon_list, station_lat_list, mogi_x_disp*dir_sign, mogi_y_disp*dir_sign,
latlon=True, scale=scaleFactor,color='red', alpha = .25,zorder=11)
#plot the mean ev in blue
elatmean = elatmean/len(pipeline.RA_results)
elonmean = elonmean/len(pipeline.RA_results)
bmap.quiver(station_lon_list, station_lat_list, elonmean, elatmean,
latlon=True, scale = scaleFactor, color = 'blue', alpha = 1,zorder=11)
if mogiFlag:
# plot mean mogi results
avg_mogi = avg_mogi/len(pipeline.RA_results)
mlatmean = mlatmean/len(pipeline.RA_results)
mlonmean = mlonmean/len(pipeline.RA_results)
bmap.plot(avg_mogi[0], avg_mogi[1], "g^", markersize = 10, latlon=True, alpha = 1,zorder=12)
bmap.quiver(station_lon_list, station_lat_list, mlonmean*dir_sign, mlatmean*dir_sign,
latlon=True, scale=scaleFactor,color='red', alpha = 1,zorder=11)
ax_x = plt.gca().get_xlim()
ax_y = plt.gca().get_ylim()
x,y = bmap(ax_x[0]+.1*(ax_x[1]-ax_x[0]), ax_y[0]+.1*(ax_y[1]-ax_y[0]),inverse=True)
bmap.quiver(x, y, 0, .2, latlon=True, scale = scaleFactor, headwidth=3,headlength=3,zorder=11)
plt.text(ax_x[0]+.1*(ax_x[1]-ax_x[0])-650, ax_y[0]+.1*(ax_y[1]-ax_y[0])-1000,'20%',zorder=11) | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/visualization/multi_ca_plot.py | multi_ca_plot.py |
import skdaccess.utilities.pbo_util as pbo_tools
import skdiscovery.data_structure.series.analysis.mogi as mogi
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
def multiCaPlot(pipeline, mogiFlag=False, offset=.15, direction='H',pca_comp=0,scaleFactor=2.5,map_res='i'):
'''
The multiCaPlot function generates a geographic eigenvector plot of several pipeline runs
This function plots multiple pipeline runs over perturbed pipeline
parameters. The various perturbations are plotted more
transparently (alpha=.5), while the median eigen_vector and Mogi
inversion are plotted in solid blue and red
@param pipeline: The pipeline object with multiple runs
@param mogiFlag: Flag to indicate plotting the Mogi source as well as the PCA
@param offset: Offset for padding the corners of the generated map
@param direction: Indicates the eigenvectors to plot. Only Horizontal component is currently supported ('H')
@param pca_comp: Choose the PCA component to use (integer)
@param scaleFactor: Size of the arrow scaling factor
@param map_res: Map data resolution for Basemap ('c', 'i', 'h', 'f', or None)
'''
# as this is a multi_ca_plot function, assumes GPCA
plt.figure();
meta_data = pipeline.data_generator.meta_data
station_list = pipeline.data_generator.station_list
lat_range, lon_range = pbo_tools.getLatLonRange(meta_data, station_list)
coord_list = pbo_tools.getStationCoords(meta_data, station_list)
# Create a map projection of area
bmap = Basemap(llcrnrlat=lat_range[0] - offset, urcrnrlat=lat_range[1] + offset, llcrnrlon=lon_range[0] - offset, urcrnrlon=lon_range[1] + offset,
projection='gnom', lon_0=np.mean(lon_range), lat_0=np.mean(lat_range), resolution=map_res)
# bmap.fillcontinents(color='white')
# bmap.drawmapboundary(fill_color='white')
bmap.drawmapboundary(fill_color='#41BBEC');
bmap.fillcontinents(color='white')
# Draw just coastlines, no lakes
for i,cp in enumerate(bmap.coastpolygons):
if bmap.coastpolygontypes[i]<2:
bmap.plot(cp[0],cp[1],'k-')
parallels = np.arange(np.round(lat_range[0]-offset,decimals=1),np.round(lat_range[1]+offset,decimals=1),.1)
meridians = np.arange(np.round(lon_range[0]-offset,decimals=1),np.round(lon_range[1]+offset,decimals=1),.1)
bmap.drawmeridians(meridians, labels=[0,0,0,1])
bmap.drawparallels(parallels, labels=[1,0,0,0])
# Plot station coords
for coord in coord_list:
bmap.plot(coord[1], coord[0], 'ko', markersize=6, latlon=True,zorder=12)
x,y = bmap(coord[1], coord[0])
plt.text(x+250,y-450,station_list[coord_list.index(coord)],zorder=12)
# loop over each pipeline run
elatmean = np.zeros(len(station_list))
elonmean = np.zeros_like(elatmean)
# check if want to plot Mogi as well
if mogiFlag:
avg_mogi = np.array([0.,0.])
mlatmean = np.zeros_like(elatmean)
mlonmean = np.zeros_like(elatmean)
for nrun in range(len(pipeline.RA_results)):
pca = pipeline.RA_results[nrun]['GPCA']['CA']
station_lat_list, station_lon_list, ev_lat_list, ev_lon_list, dir_sign = pbo_tools.dirEigenvectors(coord_list, pca.components_[pca_comp])
elatmean += ev_lat_list
elonmean += ev_lon_list
# plot each run in light blue
bmap.quiver(station_lon_list, station_lat_list, ev_lon_list, ev_lat_list, latlon=True,
scale = scaleFactor, alpha = .25, color = 'blue',zorder=11)
if mogiFlag:
mogi_res = pipeline.RA_results[nrun]['Mogi']
avg_mogi += np.array([mogi_res['lon'], mogi_res['lat']])
mogi_x_disp, mogi_y_disp = mogi.MogiVectors(mogi_res,station_lat_list,station_lon_list)
mlatmean += mogi_y_disp
mlonmean += mogi_x_disp
bmap.plot(mogi_res['lon'], mogi_res['lat'], "g^", markersize = 10, latlon=True, alpha = .25,zorder=12)
bmap.quiver(station_lon_list, station_lat_list, mogi_x_disp*dir_sign, mogi_y_disp*dir_sign,
latlon=True, scale=scaleFactor,color='red', alpha = .25,zorder=11)
#plot the mean ev in blue
elatmean = elatmean/len(pipeline.RA_results)
elonmean = elonmean/len(pipeline.RA_results)
bmap.quiver(station_lon_list, station_lat_list, elonmean, elatmean,
latlon=True, scale = scaleFactor, color = 'blue', alpha = 1,zorder=11)
if mogiFlag:
# plot mean mogi results
avg_mogi = avg_mogi/len(pipeline.RA_results)
mlatmean = mlatmean/len(pipeline.RA_results)
mlonmean = mlonmean/len(pipeline.RA_results)
bmap.plot(avg_mogi[0], avg_mogi[1], "g^", markersize = 10, latlon=True, alpha = 1,zorder=12)
bmap.quiver(station_lon_list, station_lat_list, mlonmean*dir_sign, mlatmean*dir_sign,
latlon=True, scale=scaleFactor,color='red', alpha = 1,zorder=11)
ax_x = plt.gca().get_xlim()
ax_y = plt.gca().get_ylim()
x,y = bmap(ax_x[0]+.1*(ax_x[1]-ax_x[0]), ax_y[0]+.1*(ax_y[1]-ax_y[0]),inverse=True)
bmap.quiver(x, y, 0, .2, latlon=True, scale = scaleFactor, headwidth=3,headlength=3,zorder=11)
plt.text(ax_x[0]+.1*(ax_x[1]-ax_x[0])-650, ax_y[0]+.1*(ax_y[1]-ax_y[0])-1000,'20%',zorder=11) | 0.582372 | 0.563078 |
import numpy as np
import pandas as pd
import matplotlib
from matplotlib.patches import Polygon
from scipy.spatial import SphericalVoronoi
import pyproj
# utility functions for generating the spherical voronoi tesselation.
def sphericalToXYZ(lat,lon,radius=1):
'''
Convert spherical coordinates to x,y,z
@param lat: Latitude, scalar or array
@param lon: Longitude, scalar or array
@param radius: Sphere's radius
@return Numpy array of x,y,z coordinates
'''
phi = np.deg2rad(90.0 - lat)
theta = np.deg2rad(lon % 360)
x = radius * np.cos(theta)*np.sin(phi)
y = radius * np.sin(theta)*np.sin(phi)
z = radius * np.cos(phi)
if np.isscalar(x) == False:
return np.vstack([x,y,z]).T
else:
return np.array([x,y,z])
def xyzToSpherical(x,y,z):
'''
Convert x,y,z to spherical coordinates
@param x: Cartesian coordinate x
@param y: Cartesian coordinate y
@param z: Cartesian coordinate z
@return numpy array of latitude,longitude, and radius
'''
radius = np.sqrt(x**2 + y**2 + z**2)
theta = np.rad2deg(np.arctan2(y,x))
phi = np.rad2deg(np.arccos(z/radius))
# lon = (theta + 180) % 360 - 180
# lon = (theta + 360) % 360
lon = theta
lat = 90 - phi
return np.array([lat,lon,radius]).T
def find_match(region_index, region_list):
'''
Find neighboring regions
@param region_index: Numeric index of region to find matches for (number between 0 and len(vertices))
@param region_list: list of lists of vertices that define regions
@return Numeric indices of regions that border the region specified by region_index
'''
regions = region_list[region_index]
matches = []
num_matched_list=[]
for i in range(len(region_list)):
test_regions = region_list[i]
num_matches = 0
found = False
for region in regions:
if region in test_regions:
num_matches += 1
found = True
if found is True:
matches.append(i)
num_matched_list.append(num_matches)
return matches
def getVoronoiCollection(data, lat_name, lon_name, bmap = None, v_name = None, full_sphere = False,
max_v=.3, min_v=-0.3, cmap = matplotlib.cm.get_cmap('jet'), test_point = None,
proj1=None, proj2=None, **kwargs):
'''
Perform a Spherical Voronoi Tessellation on the input data.
In the case where the data is restricted to one part of the globe, a polygon will not be returned
for all objects, as matplotlib polygons won't be able to stretch over half the globe.
@param data: Input pandas data frame
@param lat_name: Name of latitude column
@param lon_name: Name of longitude column
@param bmap: Basemap instance used to convert from lat, lon coordinates to projection coordinates
@param v_name: Name of value column. Use this to color each cell according to a value.
@param full_sphere: Set to true if the data spans the entire globe.
If false, a fictional point is created during tessellation and
removed later to work around issues when polygons are suppose to
span the over half the globe.
@param max_v: Specify a maximum value to use when assigning values to the tessellation
@param min_v: Specify a minimum value to use when assigning values to the tessellation
@param cmap: Matplotlib color map to use
@param test_point: Tuple containing the latitude and longitude of the ficitonal point to used to remove polygons that
wrap around the earth. If none, a point is automatically chosen
@param proj1: PyProj projection of input coordinates
@param proj2: PyProj projection of sphere
@param kwargs: Extra keyword arguments are passed to SphericalVoronoi class in scipy
@return Matplotlib patch collection of tessellation, scipy.spatial.SphericalVoronoi object, integer index of objects in patch collection.
'''
data = data.copy()
if full_sphere == False:
if test_point == None:
test_lat = -1*np.mean(data[lat_name])
test_lon = np.mean(data[lon_name]) + 180
else:
test_lat = test_point[0]
test_lon = test_point[1]
full_data = data
full_data = pd.concat([full_data, pd.DataFrame({lat_name: test_lat, lon_name: test_lon},
index=[full_data.index[0]])])
full_data.set_index(np.arange(len(full_data)), inplace=True)
else:
full_data = data
# print(full_data.tail())
if proj1 != None and proj2 != None:
results = pyproj.transform(proj1, proj2, full_data[lon_name].as_matrix(), full_data[lat_name].as_matrix())
full_data[lon_name] = results[0]
full_data[lat_name] = results[1]
xyz = pd.DataFrame(sphericalToXYZ(full_data[lat_name], full_data[lon_name]),columns=['x','y','z'],index=full_data.index)
if v_name != None:
full_data = pd.concat([full_data.loc[:,[lat_name,lon_name, v_name]],xyz],axis=1)
else:
full_data = pd.concat([full_data.loc[:,[lat_name,lon_name, v_name]],xyz],axis=1)
unique_index = np.unique(full_data.loc[:,lat_name] + 1j*full_data.loc[:,lon_name],return_index=True)[1]
full_data = full_data.iloc[np.sort(unique_index)]
voronoi = SphericalVoronoi(full_data.loc[:,['x','y','z']].as_matrix(), **kwargs)
voronoi.sort_vertices_of_regions()
latlon_verts = xyzToSpherical(voronoi.vertices[:,0],voronoi.vertices[:,1], voronoi.vertices[:,2])
if proj1 != None and proj2 != None:
results = pyproj.transform(proj2, proj1, latlon_verts[:,1], latlon_verts[:,0])
latlon_verts[:, 1] = results[0]
latlon_verts[:, 0] = results[1]
matches = list(map(lambda x: find_match(x, voronoi.regions), range(len(voronoi.regions))))
patch_list = []
patch_index = []
for i, (region,match,(station,row)) in enumerate(zip(voronoi.regions,matches,
full_data.iterrows())):
if full_sphere or (len(matches)-1) not in match:
# Keep an index of regions in patchcollection
patch_index.append(i)
if bmap != None:
xy = np.array(bmap(latlon_verts[region,1],latlon_verts[region,0])).T
else:
xy = np.array([latlon_verts[region,1],latlon_verts[region,0]]).T
if v_name != None:
value = row[v_name]
scaled_value = (value - min_v) / (max_v - min_v)
if scaled_value > 1:
scaled_value = 1.0
elif scaled_value < 0:
scaled_value = 0.0
poly = Polygon(xy, fill=True,facecolor = cmap(scaled_value),edgecolor=cmap(scaled_value))
else:
poly = Polygon(xy, fill=False)
patch_list.append(poly)
return matplotlib.collections.PatchCollection(patch_list,match_original=True), voronoi, patch_index | scikit-discovery | /scikit-discovery-0.9.18.tar.gz/scikit-discovery-0.9.18/skdiscovery/visualization/spherical_voronoi.py | spherical_voronoi.py |
import numpy as np
import pandas as pd
import matplotlib
from matplotlib.patches import Polygon
from scipy.spatial import SphericalVoronoi
import pyproj
# utility functions for generating the spherical voronoi tesselation.
def sphericalToXYZ(lat,lon,radius=1):
'''
Convert spherical coordinates to x,y,z
@param lat: Latitude, scalar or array
@param lon: Longitude, scalar or array
@param radius: Sphere's radius
@return Numpy array of x,y,z coordinates
'''
phi = np.deg2rad(90.0 - lat)
theta = np.deg2rad(lon % 360)
x = radius * np.cos(theta)*np.sin(phi)
y = radius * np.sin(theta)*np.sin(phi)
z = radius * np.cos(phi)
if np.isscalar(x) == False:
return np.vstack([x,y,z]).T
else:
return np.array([x,y,z])
def xyzToSpherical(x,y,z):
'''
Convert x,y,z to spherical coordinates
@param x: Cartesian coordinate x
@param y: Cartesian coordinate y
@param z: Cartesian coordinate z
@return numpy array of latitude,longitude, and radius
'''
radius = np.sqrt(x**2 + y**2 + z**2)
theta = np.rad2deg(np.arctan2(y,x))
phi = np.rad2deg(np.arccos(z/radius))
# lon = (theta + 180) % 360 - 180
# lon = (theta + 360) % 360
lon = theta
lat = 90 - phi
return np.array([lat,lon,radius]).T
def find_match(region_index, region_list):
'''
Find neighboring regions
@param region_index: Numeric index of region to find matches for (number between 0 and len(vertices))
@param region_list: list of lists of vertices that define regions
@return Numeric indices of regions that border the region specified by region_index
'''
regions = region_list[region_index]
matches = []
num_matched_list=[]
for i in range(len(region_list)):
test_regions = region_list[i]
num_matches = 0
found = False
for region in regions:
if region in test_regions:
num_matches += 1
found = True
if found is True:
matches.append(i)
num_matched_list.append(num_matches)
return matches
def getVoronoiCollection(data, lat_name, lon_name, bmap = None, v_name = None, full_sphere = False,
max_v=.3, min_v=-0.3, cmap = matplotlib.cm.get_cmap('jet'), test_point = None,
proj1=None, proj2=None, **kwargs):
'''
Perform a Spherical Voronoi Tessellation on the input data.
In the case where the data is restricted to one part of the globe, a polygon will not be returned
for all objects, as matplotlib polygons won't be able to stretch over half the globe.
@param data: Input pandas data frame
@param lat_name: Name of latitude column
@param lon_name: Name of longitude column
@param bmap: Basemap instance used to convert from lat, lon coordinates to projection coordinates
@param v_name: Name of value column. Use this to color each cell according to a value.
@param full_sphere: Set to true if the data spans the entire globe.
If false, a fictional point is created during tessellation and
removed later to work around issues when polygons are suppose to
span the over half the globe.
@param max_v: Specify a maximum value to use when assigning values to the tessellation
@param min_v: Specify a minimum value to use when assigning values to the tessellation
@param cmap: Matplotlib color map to use
@param test_point: Tuple containing the latitude and longitude of the ficitonal point to used to remove polygons that
wrap around the earth. If none, a point is automatically chosen
@param proj1: PyProj projection of input coordinates
@param proj2: PyProj projection of sphere
@param kwargs: Extra keyword arguments are passed to SphericalVoronoi class in scipy
@return Matplotlib patch collection of tessellation, scipy.spatial.SphericalVoronoi object, integer index of objects in patch collection.
'''
data = data.copy()
if full_sphere == False:
if test_point == None:
test_lat = -1*np.mean(data[lat_name])
test_lon = np.mean(data[lon_name]) + 180
else:
test_lat = test_point[0]
test_lon = test_point[1]
full_data = data
full_data = pd.concat([full_data, pd.DataFrame({lat_name: test_lat, lon_name: test_lon},
index=[full_data.index[0]])])
full_data.set_index(np.arange(len(full_data)), inplace=True)
else:
full_data = data
# print(full_data.tail())
if proj1 != None and proj2 != None:
results = pyproj.transform(proj1, proj2, full_data[lon_name].as_matrix(), full_data[lat_name].as_matrix())
full_data[lon_name] = results[0]
full_data[lat_name] = results[1]
xyz = pd.DataFrame(sphericalToXYZ(full_data[lat_name], full_data[lon_name]),columns=['x','y','z'],index=full_data.index)
if v_name != None:
full_data = pd.concat([full_data.loc[:,[lat_name,lon_name, v_name]],xyz],axis=1)
else:
full_data = pd.concat([full_data.loc[:,[lat_name,lon_name, v_name]],xyz],axis=1)
unique_index = np.unique(full_data.loc[:,lat_name] + 1j*full_data.loc[:,lon_name],return_index=True)[1]
full_data = full_data.iloc[np.sort(unique_index)]
voronoi = SphericalVoronoi(full_data.loc[:,['x','y','z']].as_matrix(), **kwargs)
voronoi.sort_vertices_of_regions()
latlon_verts = xyzToSpherical(voronoi.vertices[:,0],voronoi.vertices[:,1], voronoi.vertices[:,2])
if proj1 != None and proj2 != None:
results = pyproj.transform(proj2, proj1, latlon_verts[:,1], latlon_verts[:,0])
latlon_verts[:, 1] = results[0]
latlon_verts[:, 0] = results[1]
matches = list(map(lambda x: find_match(x, voronoi.regions), range(len(voronoi.regions))))
patch_list = []
patch_index = []
for i, (region,match,(station,row)) in enumerate(zip(voronoi.regions,matches,
full_data.iterrows())):
if full_sphere or (len(matches)-1) not in match:
# Keep an index of regions in patchcollection
patch_index.append(i)
if bmap != None:
xy = np.array(bmap(latlon_verts[region,1],latlon_verts[region,0])).T
else:
xy = np.array([latlon_verts[region,1],latlon_verts[region,0]]).T
if v_name != None:
value = row[v_name]
scaled_value = (value - min_v) / (max_v - min_v)
if scaled_value > 1:
scaled_value = 1.0
elif scaled_value < 0:
scaled_value = 0.0
poly = Polygon(xy, fill=True,facecolor = cmap(scaled_value),edgecolor=cmap(scaled_value))
else:
poly = Polygon(xy, fill=False)
patch_list.append(poly)
return matplotlib.collections.PatchCollection(patch_list,match_original=True), voronoi, patch_index | 0.735831 | 0.734465 |
.. raw:: html
<img alt="scikit-diveMove" src="docs/source/.static/skdiveMove_logo.png"
width=10% align=left>
<h1>scikit-diveMove</h1>
.. image:: https://img.shields.io/pypi/v/scikit-diveMove
:target: https://pypi.python.org/pypi/scikit-diveMove
:alt: PyPI
.. image:: https://github.com/spluque/scikit-diveMove/workflows/TestPyPI/badge.svg
:target: https://github.com/spluque/scikit-diveMove/actions?query=workflow%3ATestPyPI
:alt: TestPyPI
.. image:: https://github.com/spluque/scikit-diveMove/workflows/Python%20build/badge.svg
:target: https://github.com/spluque/scikit-diveMove/actions?query=workflow%3A%22Python+build%22
:alt: Python Build
.. image:: https://codecov.io/gh/spluque/scikit-diveMove/branch/master/graph/badge.svg
:target: https://codecov.io/gh/spluque/scikit-diveMove
.. image:: https://img.shields.io/pypi/dm/scikit-diveMove
:target: https://pypi.python.org/pypi/scikit-diveMove
:alt: PyPI - Downloads
`scikit-diveMove` is a Python interface to R package `diveMove`_ for
scientific data analysis, with a focus on diving behaviour analysis. It
has utilities to represent, visualize, filter, analyse, and summarize
time-depth recorder (TDR) data. Miscellaneous functions for handling
position and 3D kinematics data are also provided. `scikit-diveMove`
communicates with a single `R` instance for access to low-level tools of
package `diveMove`.
.. _diveMove: https://github.com/spluque/diveMove
The table below shows which features of `diveMove` are accessible from
`scikit-diveMove`:
+----------------------------------+--------------------------+--------------------------------+
| `diveMove` |`scikit-diveMove` |Notes |
+---------------+------------------+ | |
|Functionality |Functions/Methods | | |
+===============+==================+==========================+================================+
|Movement |``austFilter`` | |Under consideration. |
| |``rmsDistFilter`` | | |
| |``grpSpeedFilter``| | |
| |``distSpeed`` | | |
| |``readLocs`` | | |
+---------------+------------------+--------------------------+--------------------------------+
|Bout analysis |``boutfreqs`` |``BoutsNLS`` ``BoutsMLE`` |Fully implemented in Python. |
| |``boutinit`` | | |
| |``bouts2.nlsFUN`` | | |
| |``bouts2.nls`` | | |
| |``bouts3.nlsFUN`` | | |
| |``bouts3.nls`` | | |
| |``bouts2.mleFUN`` | | |
| |``bouts2.ll`` | | |
| |``bouts2.LL`` | | |
| |``bouts.mle`` | | |
| |``labelBouts`` | | |
| |``plotBouts`` | | |
| |``plotBouts2.cdf``| | |
| |``bec2`` | | |
| |``bec3`` | | |
+---------------+------------------+--------------------------+--------------------------------+
|Dive analysis |``readTDR`` |``TDR.__init__`` |Fully implemented. Single |
| |``createTDR`` |``TDRSource.__init__`` |``TDR`` class for data with or |
| | | |without speed measurements. |
+---------------+------------------+--------------------------+--------------------------------+
| |``calibrateDepth``|``TDR.calibrate`` |Fully implemented |
| | |``TDR.zoc`` | |
| | |``TDR.detect_wet`` | |
| | |``TDR.detect_dives`` | |
| | |``TDR.detect_dive_phases``| |
+---------------+------------------+--------------------------+--------------------------------+
| |``calibrateSpeed``|``TDR.calibrate_speed`` |New implementation of the |
| |``rqPlot`` | |algorithm entirely in Python. |
| | | |The procedure generates the plot|
| | | |concurrently. |
+---------------+------------------+--------------------------+--------------------------------+
| |``diveStats`` |``TDR.dive_stats`` |Fully implemented |
| |``stampDive`` |``TDR.time_budget`` | |
| |``timeBudget`` |``TDR.stamp_dives`` | |
+---------------+------------------+--------------------------+--------------------------------+
| |``plotTDR`` |``TDR.plot`` |Fully implemented. |
| |``plotDiveModel`` |``TDR.plot_zoc_filters`` |Interactivity is the default, as|
| |``plotZOC`` |``TDR.plot_phases`` |standard `matplotlib`. |
| | |``TDR.plot_dive_model`` | |
+---------------+------------------+--------------------------+--------------------------------+
| |``getTDR`` |``TDR.tdr`` |Fully implemented. |
| |``getDepth`` |``TDR.get_depth`` |``getCCData`` deemed redundant, |
| |``getSpeed`` |``TDR.get_speed`` |as the columns can be accessed |
| |``getTime`` |``TDR.tdr.index`` |directly from the ``TDR.tdr`` |
| |``getCCData`` |``TDR.src_file`` |attribute. |
| |``getDtime`` |``TDR.dtime`` | |
| |``getFileName`` | | |
+---------------+------------------+--------------------------+--------------------------------+
| |``getDAct`` |``TDR.get_wet_activity`` |Fully implemented |
| |``getDPhaseLab`` |``TDR.get_dives_details`` | |
| |``getDiveDeriv`` |``TDR.get_dive_deriv`` | |
| |``getDiveModel`` | | |
| |``getGAct`` | | |
+---------------+------------------+--------------------------+--------------------------------+
| |``extractDive`` | |Fully implemented |
+---------------+------------------+--------------------------+--------------------------------+
`scikit-diveMove` also provides useful tools for processing signals from
tri-axial Inertial Measurement Units (`IMU`_), such as thermal calibration,
corrections for shifts in coordinate frames, as well as computation of
orientation using a variety of current methods. Analyses are fully
tractable by encouraging the use of `xarray`_ data structures that can be
read from and written to NetCDF file format. Using these data structures,
meta-data attributes can be easily appended at all layers as analyses
progress.
.. _xarray: https://xarray.pydata.org
.. _IMU: https://en.wikipedia.org/wiki/Inertial_measurement_unit
Installation
============
Type the following at a terminal command line:
.. code-block:: sh
pip install scikit-diveMove
Or install from source tree by typing the following at the command line:
.. code-block:: sh
python setup.py install
The documentation can also be installed as described in `Documentation`_.
Once installed, `skdiveMove` can be easily imported as: ::
import skdiveMove as skdive
Dependencies
------------
`skdiveMove` depends primarily on ``R`` package `diveMove`, which must be
installed and available to the user running Python. If needed, install
`diveMove` at the ``R`` prompt:
.. code-block:: R
install.packages("diveMove")
Required Python packages are listed in the `requirements
<requirements.txt>`_ file.
Documentation
=============
Available at: https://spluque.github.io/scikit-diveMove
Alternatively, installing the package as follows:
.. code-block:: sh
pip install -e .["docs"]
allows the documentation to be built locally (choosing the desired target
{"html", "pdf", etc.}):
.. code-block:: sh
make -C docs/ html
The `html` tree is at `docs/build/html`.
| scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/README.rst | README.rst | .. raw:: html
<img alt="scikit-diveMove" src="docs/source/.static/skdiveMove_logo.png"
width=10% align=left>
<h1>scikit-diveMove</h1>
.. image:: https://img.shields.io/pypi/v/scikit-diveMove
:target: https://pypi.python.org/pypi/scikit-diveMove
:alt: PyPI
.. image:: https://github.com/spluque/scikit-diveMove/workflows/TestPyPI/badge.svg
:target: https://github.com/spluque/scikit-diveMove/actions?query=workflow%3ATestPyPI
:alt: TestPyPI
.. image:: https://github.com/spluque/scikit-diveMove/workflows/Python%20build/badge.svg
:target: https://github.com/spluque/scikit-diveMove/actions?query=workflow%3A%22Python+build%22
:alt: Python Build
.. image:: https://codecov.io/gh/spluque/scikit-diveMove/branch/master/graph/badge.svg
:target: https://codecov.io/gh/spluque/scikit-diveMove
.. image:: https://img.shields.io/pypi/dm/scikit-diveMove
:target: https://pypi.python.org/pypi/scikit-diveMove
:alt: PyPI - Downloads
`scikit-diveMove` is a Python interface to R package `diveMove`_ for
scientific data analysis, with a focus on diving behaviour analysis. It
has utilities to represent, visualize, filter, analyse, and summarize
time-depth recorder (TDR) data. Miscellaneous functions for handling
position and 3D kinematics data are also provided. `scikit-diveMove`
communicates with a single `R` instance for access to low-level tools of
package `diveMove`.
.. _diveMove: https://github.com/spluque/diveMove
The table below shows which features of `diveMove` are accessible from
`scikit-diveMove`:
+----------------------------------+--------------------------+--------------------------------+
| `diveMove` |`scikit-diveMove` |Notes |
+---------------+------------------+ | |
|Functionality |Functions/Methods | | |
+===============+==================+==========================+================================+
|Movement |``austFilter`` | |Under consideration. |
| |``rmsDistFilter`` | | |
| |``grpSpeedFilter``| | |
| |``distSpeed`` | | |
| |``readLocs`` | | |
+---------------+------------------+--------------------------+--------------------------------+
|Bout analysis |``boutfreqs`` |``BoutsNLS`` ``BoutsMLE`` |Fully implemented in Python. |
| |``boutinit`` | | |
| |``bouts2.nlsFUN`` | | |
| |``bouts2.nls`` | | |
| |``bouts3.nlsFUN`` | | |
| |``bouts3.nls`` | | |
| |``bouts2.mleFUN`` | | |
| |``bouts2.ll`` | | |
| |``bouts2.LL`` | | |
| |``bouts.mle`` | | |
| |``labelBouts`` | | |
| |``plotBouts`` | | |
| |``plotBouts2.cdf``| | |
| |``bec2`` | | |
| |``bec3`` | | |
+---------------+------------------+--------------------------+--------------------------------+
|Dive analysis |``readTDR`` |``TDR.__init__`` |Fully implemented. Single |
| |``createTDR`` |``TDRSource.__init__`` |``TDR`` class for data with or |
| | | |without speed measurements. |
+---------------+------------------+--------------------------+--------------------------------+
| |``calibrateDepth``|``TDR.calibrate`` |Fully implemented |
| | |``TDR.zoc`` | |
| | |``TDR.detect_wet`` | |
| | |``TDR.detect_dives`` | |
| | |``TDR.detect_dive_phases``| |
+---------------+------------------+--------------------------+--------------------------------+
| |``calibrateSpeed``|``TDR.calibrate_speed`` |New implementation of the |
| |``rqPlot`` | |algorithm entirely in Python. |
| | | |The procedure generates the plot|
| | | |concurrently. |
+---------------+------------------+--------------------------+--------------------------------+
| |``diveStats`` |``TDR.dive_stats`` |Fully implemented |
| |``stampDive`` |``TDR.time_budget`` | |
| |``timeBudget`` |``TDR.stamp_dives`` | |
+---------------+------------------+--------------------------+--------------------------------+
| |``plotTDR`` |``TDR.plot`` |Fully implemented. |
| |``plotDiveModel`` |``TDR.plot_zoc_filters`` |Interactivity is the default, as|
| |``plotZOC`` |``TDR.plot_phases`` |standard `matplotlib`. |
| | |``TDR.plot_dive_model`` | |
+---------------+------------------+--------------------------+--------------------------------+
| |``getTDR`` |``TDR.tdr`` |Fully implemented. |
| |``getDepth`` |``TDR.get_depth`` |``getCCData`` deemed redundant, |
| |``getSpeed`` |``TDR.get_speed`` |as the columns can be accessed |
| |``getTime`` |``TDR.tdr.index`` |directly from the ``TDR.tdr`` |
| |``getCCData`` |``TDR.src_file`` |attribute. |
| |``getDtime`` |``TDR.dtime`` | |
| |``getFileName`` | | |
+---------------+------------------+--------------------------+--------------------------------+
| |``getDAct`` |``TDR.get_wet_activity`` |Fully implemented |
| |``getDPhaseLab`` |``TDR.get_dives_details`` | |
| |``getDiveDeriv`` |``TDR.get_dive_deriv`` | |
| |``getDiveModel`` | | |
| |``getGAct`` | | |
+---------------+------------------+--------------------------+--------------------------------+
| |``extractDive`` | |Fully implemented |
+---------------+------------------+--------------------------+--------------------------------+
`scikit-diveMove` also provides useful tools for processing signals from
tri-axial Inertial Measurement Units (`IMU`_), such as thermal calibration,
corrections for shifts in coordinate frames, as well as computation of
orientation using a variety of current methods. Analyses are fully
tractable by encouraging the use of `xarray`_ data structures that can be
read from and written to NetCDF file format. Using these data structures,
meta-data attributes can be easily appended at all layers as analyses
progress.
.. _xarray: https://xarray.pydata.org
.. _IMU: https://en.wikipedia.org/wiki/Inertial_measurement_unit
Installation
============
Type the following at a terminal command line:
.. code-block:: sh
pip install scikit-diveMove
Or install from source tree by typing the following at the command line:
.. code-block:: sh
python setup.py install
The documentation can also be installed as described in `Documentation`_.
Once installed, `skdiveMove` can be easily imported as: ::
import skdiveMove as skdive
Dependencies
------------
`skdiveMove` depends primarily on ``R`` package `diveMove`, which must be
installed and available to the user running Python. If needed, install
`diveMove` at the ``R`` prompt:
.. code-block:: R
install.packages("diveMove")
Required Python packages are listed in the `requirements
<requirements.txt>`_ file.
Documentation
=============
Available at: https://spluque.github.io/scikit-diveMove
Alternatively, installing the package as follows:
.. code-block:: sh
pip install -e .["docs"]
allows the documentation to be built locally (choosing the desired target
{"html", "pdf", etc.}):
.. code-block:: sh
make -C docs/ html
The `html` tree is at `docs/build/html`.
| 0.876172 | 0.744006 |
.. _demo_ellipsoid-label:
==============================================
Ellipsoid modelling for calibration purposes
==============================================
Magnetometers are highly sensitive to local deviations of the magnetic
field, affecting the desired measurement of the Earth geomagnetic field.
Triaxial accelerometers, however, can have slight offsets in and
misalignments of their axes which need to be corrected to properly
interpret their output. One commonly used method for performing these
corrections is done by fitting an ellipsoid model to data collected while
the sensor's axes are exposed to the forces of the fields they measure.
.. jupyter-execute::
# Set up
import pkg_resources as pkg_rsrc
import os.path as osp
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import skdiveMove.imutools as imutools
from mpl_toolkits.mplot3d import Axes3D
.. jupyter-execute::
:hide-code:
# boiler plate stuff to help out
_FIG1X1 = (11, 5)
def gen_sphere(radius=1):
"""Generate coordinates on a sphere"""
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = radius * np.outer(np.cos(u), np.sin(v))
y = radius * np.outer(np.sin(u), np.sin(v))
z = radius * np.outer(np.ones(np.size(u)), np.cos(v))
return (x, y, z)
np.set_printoptions(precision=3, sign="+")
%matplotlib inline
To demonstrate this procedure with utilities from the `allan` submodule,
measurements from a triaxial accelerometer and magnetometer were recorded
at 100 Hz sampling frequency with an `IMU` that was rotated around the main
axes to cover a large surface of the sphere.
.. jupyter-execute::
:linenos:
icdf = (pkg_rsrc
.resource_filename("skdiveMove",
osp.join("tests", "data", "gertrude",
"magnt_accel_calib.nc")))
magnt_accel = xr.load_dataset(icdf)
magnt = magnt_accel["magnetic_density"].to_numpy()
accel = magnt_accel["acceleration"].to_numpy()
The function `fit_ellipsoid` returns the offset, gain, and rotation matrix
(if requested) necessary to correct the sensor's data. There are six types
of constraint to impose on the result, including which radii should be
equal, and whether the data should be rotated.
.. jupyter-execute::
:linenos:
# Here, a symmetrical constraint whereby any plane passing through the
# origin is used, with all radii equal to each other
magnt_off, magnt_gain, _ = imutools.fit_ellipsoid(magnt, f="sxyz")
accel_off, accel_gain, _ = imutools.fit_ellipsoid(accel, f="sxyz")
Inspect the offsets and gains in the uncorrected data:
.. jupyter-execute::
:hide-code:
print("Magnetometer offsets [uT]: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_off),
"gains [uT]: x={:.2f}, y={:.2f}, z={:.2f}".format(*magnt_gain))
print("Accelerometer offsets [g]: x={:.3f}, y={:.3f}, z={:.3f};"
.format(*accel_off),
"gains [g]: x={:.3f}, y={:.3f}, z={:.3f}".format(*accel_gain))
Calibrate the sensors using these estimates:
.. jupyter-execute::
:linenos:
magnt_refr = 56.9
magnt_corr = imutools.apply_ellipsoid(magnt, offset=magnt_off,
gain=magnt_gain,
rotM=np.diag(np.ones(3)),
ref_r=magnt_refr)
accel_corr = imutools.apply_ellipsoid(accel, offset=accel_off,
gain=accel_gain,
rotM=np.diag(np.ones(3)),
ref_r=1.0)
An appreciation of the effect of the calibration can be observed by
comparing the difference between maxima/minima and the reference value for
the magnetic field at the geographic location and time of the
measurements, or 1 $g$ in the case of the accelerometers.
.. jupyter-execute::
:linenos:
magnt_refr_diff = [np.abs(magnt.max(axis=0)) - magnt_refr,
np.abs(magnt.min(axis=0)) - magnt_refr]
magnt_corr_refr_diff = [np.abs(magnt_corr.max(axis=0)) - magnt_refr,
np.abs(magnt_corr.min(axis=0)) - magnt_refr]
accel_refr_diff = [np.abs(accel.max(axis=0)) - 1.0,
np.abs(accel.min(axis=0)) - 1.0]
accel_corr_refr_diff = [np.abs(accel_corr.max(axis=0)) - 1.0,
np.abs(accel_corr.min(axis=0)) - 1.0]
.. jupyter-execute::
:hide-code:
print("Uncorrected magnetometer difference to reference [uT]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*magnt_refr_diff[1]))
print("Corrected magnetometer difference to reference [uT]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_corr_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*magnt_corr_refr_diff[1]))
print("Uncorrected accelerometer difference to reference [g]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*accel_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*accel_refr_diff[1]))
print("Corrected accelerometer difference to reference [g]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*accel_corr_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*accel_corr_refr_diff[1]))
Or compare visually on a 3D plot:
.. jupyter-execute::
:hide-code:
_FIG1X2 = [13, 7]
fig = plt.figure(figsize=_FIG1X2)
ax0 = fig.add_subplot(121, projection="3d")
ax1 = fig.add_subplot(122, projection="3d")
ax0.set_xlabel(r"x [$\mu T$]")
ax0.set_ylabel(r"y [$\mu T$]")
ax0.set_zlabel(r"z [$\mu T$]")
ax1.set_xlabel(r"x [$g$]")
ax1.set_ylabel(r"y [$g$]")
ax1.set_zlabel(r"z [$g$]")
ax0.plot_surface(*gen_sphere(magnt_refr), rstride=4, cstride=4, color="c",
linewidth=0, alpha=0.3)
ax1.plot_surface(*gen_sphere(), rstride=4, cstride=4, color="c",
linewidth=0, alpha=0.3)
ax0.plot(magnt[:, 0], magnt[:, 1], magnt[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="uncorrected")
ax0.plot(magnt_corr[:, 0], magnt_corr[:, 1], magnt_corr[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="corrected")
ax1.plot(accel[:, 0], accel[:, 1], accel[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="uncorrected")
ax1.plot(accel_corr[:, 0], accel_corr[:, 1], accel_corr[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="corrected")
l1, lbl1 = fig.axes[-1].get_legend_handles_labels()
fig.legend(l1, lbl1, loc="lower center", borderaxespad=0, frameon=False,
markerscale=12)
ax0.view_init(22, azim=-142)
ax1.view_init(22, azim=-142)
plt.tight_layout()
Feel free to download a copy of this demo
(:jupyter-download:script:`demo_ellipsoid`).
| scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/docs/source/demo_ellipsoid.rst | demo_ellipsoid.rst | .. _demo_ellipsoid-label:
==============================================
Ellipsoid modelling for calibration purposes
==============================================
Magnetometers are highly sensitive to local deviations of the magnetic
field, affecting the desired measurement of the Earth geomagnetic field.
Triaxial accelerometers, however, can have slight offsets in and
misalignments of their axes which need to be corrected to properly
interpret their output. One commonly used method for performing these
corrections is done by fitting an ellipsoid model to data collected while
the sensor's axes are exposed to the forces of the fields they measure.
.. jupyter-execute::
# Set up
import pkg_resources as pkg_rsrc
import os.path as osp
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import skdiveMove.imutools as imutools
from mpl_toolkits.mplot3d import Axes3D
.. jupyter-execute::
:hide-code:
# boiler plate stuff to help out
_FIG1X1 = (11, 5)
def gen_sphere(radius=1):
"""Generate coordinates on a sphere"""
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = radius * np.outer(np.cos(u), np.sin(v))
y = radius * np.outer(np.sin(u), np.sin(v))
z = radius * np.outer(np.ones(np.size(u)), np.cos(v))
return (x, y, z)
np.set_printoptions(precision=3, sign="+")
%matplotlib inline
To demonstrate this procedure with utilities from the `allan` submodule,
measurements from a triaxial accelerometer and magnetometer were recorded
at 100 Hz sampling frequency with an `IMU` that was rotated around the main
axes to cover a large surface of the sphere.
.. jupyter-execute::
:linenos:
icdf = (pkg_rsrc
.resource_filename("skdiveMove",
osp.join("tests", "data", "gertrude",
"magnt_accel_calib.nc")))
magnt_accel = xr.load_dataset(icdf)
magnt = magnt_accel["magnetic_density"].to_numpy()
accel = magnt_accel["acceleration"].to_numpy()
The function `fit_ellipsoid` returns the offset, gain, and rotation matrix
(if requested) necessary to correct the sensor's data. There are six types
of constraint to impose on the result, including which radii should be
equal, and whether the data should be rotated.
.. jupyter-execute::
:linenos:
# Here, a symmetrical constraint whereby any plane passing through the
# origin is used, with all radii equal to each other
magnt_off, magnt_gain, _ = imutools.fit_ellipsoid(magnt, f="sxyz")
accel_off, accel_gain, _ = imutools.fit_ellipsoid(accel, f="sxyz")
Inspect the offsets and gains in the uncorrected data:
.. jupyter-execute::
:hide-code:
print("Magnetometer offsets [uT]: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_off),
"gains [uT]: x={:.2f}, y={:.2f}, z={:.2f}".format(*magnt_gain))
print("Accelerometer offsets [g]: x={:.3f}, y={:.3f}, z={:.3f};"
.format(*accel_off),
"gains [g]: x={:.3f}, y={:.3f}, z={:.3f}".format(*accel_gain))
Calibrate the sensors using these estimates:
.. jupyter-execute::
:linenos:
magnt_refr = 56.9
magnt_corr = imutools.apply_ellipsoid(magnt, offset=magnt_off,
gain=magnt_gain,
rotM=np.diag(np.ones(3)),
ref_r=magnt_refr)
accel_corr = imutools.apply_ellipsoid(accel, offset=accel_off,
gain=accel_gain,
rotM=np.diag(np.ones(3)),
ref_r=1.0)
An appreciation of the effect of the calibration can be observed by
comparing the difference between maxima/minima and the reference value for
the magnetic field at the geographic location and time of the
measurements, or 1 $g$ in the case of the accelerometers.
.. jupyter-execute::
:linenos:
magnt_refr_diff = [np.abs(magnt.max(axis=0)) - magnt_refr,
np.abs(magnt.min(axis=0)) - magnt_refr]
magnt_corr_refr_diff = [np.abs(magnt_corr.max(axis=0)) - magnt_refr,
np.abs(magnt_corr.min(axis=0)) - magnt_refr]
accel_refr_diff = [np.abs(accel.max(axis=0)) - 1.0,
np.abs(accel.min(axis=0)) - 1.0]
accel_corr_refr_diff = [np.abs(accel_corr.max(axis=0)) - 1.0,
np.abs(accel_corr.min(axis=0)) - 1.0]
.. jupyter-execute::
:hide-code:
print("Uncorrected magnetometer difference to reference [uT]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*magnt_refr_diff[1]))
print("Corrected magnetometer difference to reference [uT]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*magnt_corr_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*magnt_corr_refr_diff[1]))
print("Uncorrected accelerometer difference to reference [g]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*accel_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*accel_refr_diff[1]))
print("Corrected accelerometer difference to reference [g]:")
print("maxima: x={:.2f}, y={:.2f}, z={:.2f};"
.format(*accel_corr_refr_diff[0]),
"minima: x={:.2f}, y={:.2f}, z={:.2f}"
.format(*accel_corr_refr_diff[1]))
Or compare visually on a 3D plot:
.. jupyter-execute::
:hide-code:
_FIG1X2 = [13, 7]
fig = plt.figure(figsize=_FIG1X2)
ax0 = fig.add_subplot(121, projection="3d")
ax1 = fig.add_subplot(122, projection="3d")
ax0.set_xlabel(r"x [$\mu T$]")
ax0.set_ylabel(r"y [$\mu T$]")
ax0.set_zlabel(r"z [$\mu T$]")
ax1.set_xlabel(r"x [$g$]")
ax1.set_ylabel(r"y [$g$]")
ax1.set_zlabel(r"z [$g$]")
ax0.plot_surface(*gen_sphere(magnt_refr), rstride=4, cstride=4, color="c",
linewidth=0, alpha=0.3)
ax1.plot_surface(*gen_sphere(), rstride=4, cstride=4, color="c",
linewidth=0, alpha=0.3)
ax0.plot(magnt[:, 0], magnt[:, 1], magnt[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="uncorrected")
ax0.plot(magnt_corr[:, 0], magnt_corr[:, 1], magnt_corr[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="corrected")
ax1.plot(accel[:, 0], accel[:, 1], accel[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="uncorrected")
ax1.plot(accel_corr[:, 0], accel_corr[:, 1], accel_corr[:, 2],
marker=".", linestyle="none", markersize=0.5,
label="corrected")
l1, lbl1 = fig.axes[-1].get_legend_handles_labels()
fig.legend(l1, lbl1, loc="lower center", borderaxespad=0, frameon=False,
markerscale=12)
ax0.view_init(22, azim=-142)
ax1.view_init(22, azim=-142)
plt.tight_layout()
Feel free to download a copy of this demo
(:jupyter-download:script:`demo_ellipsoid`).
| 0.842118 | 0.715797 |
=================================
scikit-diveMove Documentation
=================================
`scikit-diveMove` is a Python interface to R package `diveMove`_ for
scientific data analysis, with a focus on diving behaviour analysis. It
has utilities to represent, visualize, filter, analyse, and summarize
time-depth recorder (TDR) data. Miscellaneous functions for handling
location data are also provided.
.. _diveMove: https://github.com/spluque/diveMove
`scikit-diveMove` is hosted at https://github.com/spluque/scikit-diveMove
`scikit-diveMove` also provides useful tools for processing signals from
tri-axial Inertial Measurement Units (`IMU`_), such as thermal calibration,
corrections for shifts in coordinate frames, as well as computation of
orientation using a variety of current methods. Analyses are fully
tractable by encouraging the use of `xarray`_ data structures that can be
read from and written to NetCDF file format. Using these data structures,
meta-data attributes can be easily appended at all layers as analyses
progress.
.. _xarray: https://xarray.pydata.org
.. _IMU: https://en.wikipedia.org/wiki/Inertial_measurement_unit
Installation
============
Type the following at a terminal command line:
.. code-block:: sh
pip install scikit-diveMove
Or install from source tree by typing the following at the command line:
.. code-block:: sh
python setup.py install
Once installed, `skdiveMove` can be easily imported as: ::
import skdiveMove as skdive
Dependencies
------------
`skdiveMove` depends primarily on ``R`` package `diveMove`, which must be
installed and available to the user running Python. If needed, install
`diveMove` at the ``R`` prompt:
.. code-block:: R
install.packages("diveMove")
Required Python packages are listed in the `requirements`_ file.
.. _requirements: https://github.com/spluque/scikit-diveMove/blob/master/requirements.txt
Testing
=======
The `skdiveMove` package can be tested with `unittest`:
.. code-block:: sh
python -m unittest -v skdiveMove/tests
or `pytest`:
.. code-block:: sh
pytest -v skdiveMove/tests
Development
===========
Developers can clone the project from Github:
.. code-block:: sh
git clone https://github.com/spluque/scikit-diveMove.git .
and then install with:
.. code-block:: sh
pip install -e .["dev"]
Demos
=====
.. toctree::
:maxdepth: 2
demo_tdr
demo_bouts
demo_simulbouts
imutools_demos
Modules
=======
.. toctree::
:maxdepth: 2
tdr
bouts
metadata
imutools
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/docs/source/index.rst | index.rst | =================================
scikit-diveMove Documentation
=================================
`scikit-diveMove` is a Python interface to R package `diveMove`_ for
scientific data analysis, with a focus on diving behaviour analysis. It
has utilities to represent, visualize, filter, analyse, and summarize
time-depth recorder (TDR) data. Miscellaneous functions for handling
location data are also provided.
.. _diveMove: https://github.com/spluque/diveMove
`scikit-diveMove` is hosted at https://github.com/spluque/scikit-diveMove
`scikit-diveMove` also provides useful tools for processing signals from
tri-axial Inertial Measurement Units (`IMU`_), such as thermal calibration,
corrections for shifts in coordinate frames, as well as computation of
orientation using a variety of current methods. Analyses are fully
tractable by encouraging the use of `xarray`_ data structures that can be
read from and written to NetCDF file format. Using these data structures,
meta-data attributes can be easily appended at all layers as analyses
progress.
.. _xarray: https://xarray.pydata.org
.. _IMU: https://en.wikipedia.org/wiki/Inertial_measurement_unit
Installation
============
Type the following at a terminal command line:
.. code-block:: sh
pip install scikit-diveMove
Or install from source tree by typing the following at the command line:
.. code-block:: sh
python setup.py install
Once installed, `skdiveMove` can be easily imported as: ::
import skdiveMove as skdive
Dependencies
------------
`skdiveMove` depends primarily on ``R`` package `diveMove`, which must be
installed and available to the user running Python. If needed, install
`diveMove` at the ``R`` prompt:
.. code-block:: R
install.packages("diveMove")
Required Python packages are listed in the `requirements`_ file.
.. _requirements: https://github.com/spluque/scikit-diveMove/blob/master/requirements.txt
Testing
=======
The `skdiveMove` package can be tested with `unittest`:
.. code-block:: sh
python -m unittest -v skdiveMove/tests
or `pytest`:
.. code-block:: sh
pytest -v skdiveMove/tests
Development
===========
Developers can clone the project from Github:
.. code-block:: sh
git clone https://github.com/spluque/scikit-diveMove.git .
and then install with:
.. code-block:: sh
pip install -e .["dev"]
Demos
=====
.. toctree::
:maxdepth: 2
demo_tdr
demo_bouts
demo_simulbouts
imutools_demos
Modules
=======
.. toctree::
:maxdepth: 2
tdr
bouts
metadata
imutools
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 0.898291 | 0.765856 |
.. _demo_tdr-label:
===========================
Diving behaviour analysis
===========================
Here is a bird's-eye view of the functionality of `scikit-diveMove`,
loosely following `diveMove`'s `vignette`_.
.. _vignette: https://cran.r-project.org/web/packages/diveMove/vignettes/diveMove.pdf
Set up the environment. Consider loading the `logging` module and setting
up a logger to monitor progress to this section.
.. jupyter-execute::
# Set up
import pkg_resources as pkg_rsrc
import matplotlib.pyplot as plt
import skdiveMove as skdive
# Declare figure sizes
_FIG1X1 = (11, 5)
_FIG2X1 = (10, 8)
_FIG3X1 = (11, 11)
.. jupyter-execute::
:hide-code:
:hide-output:
import numpy as np # only for setting print options here
import pandas as pd # only for setting print options here
import xarray as xr # only for setting print options here
pd.set_option("display.precision", 3)
np.set_printoptions(precision=3, sign="+")
xr.set_options(display_style="html")
%matplotlib inline
Reading data files
==================
Load `diveMove`'s example data, using ``TDR.__init__`` method, and print:
.. jupyter-execute::
:linenos:
ifile = (pkg_rsrc
.resource_filename("skdiveMove",
("tests/data/"
"ag_mk7_2002_022.nc")))
tdrX = skdive.TDR.read_netcdf(ifile, depth_name="depth",
time_name="date_time", has_speed=True)
# Or simply use function ``skdive.tests.diveMove2skd`` to do the
# same with this particular data set.
print(tdrX)
Notice that `TDR` reads files in `NetCDF4`_ format, which is a very
versatile file format, and encourages using properly documented data sets.
`skdiveMove` relies on `xarray.Dataset` objects to represent such data
sets. It is easy to generate a `xarray.Dataset` objects from Pandas
DataFrames by using method :meth:`.to_xarray`. `skdiveMove` documents
processing steps by appending to the `history` attribute, in an effort
towards building metadata standards.
.. _NetCDF4: https://www.unidata.ucar.edu/software/netcdf
Access measured data:
.. jupyter-execute::
:linenos:
tdrX.get_depth("measured")
Plot measured data:
.. jupyter-execute::
:linenos:
tdrX.plot(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
depth_lim=[-1, 95], figsize=_FIG1X1);
Plot concurrent data:
.. jupyter-execute::
:linenos:
ccvars = ["light", "speed"]
tdrX.plot(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
depth_lim=[-1, 95], concur_vars=ccvars, figsize=_FIG3X1);
Calibrate measurements
======================
Calibration of TDR measurements involves the following steps, which rely on
data from pressure sensors (barometers):
Zero offset correction (ZOC) of depth measurements
--------------------------------------------------
Using the "offset" method here for speed performance reasons:
.. jupyter-execute::
:linenos:
# Helper dict to set parameter values
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"knot_factor": 3,
"descent_crit_q": 0,
"ascent_crit_q": 0}
tdrX.zoc("offset", offset=pars["offset_zoc"])
# Plot ZOC job
tdrX.plot_zoc(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
figsize=(13, 6));
Detection of wet vs dry phases
------------------------------
Periods of missing depth measurements longer than `dry_thr` are considered
dry phases, whereas periods that are briefer than `wet_thr` are not
considered to represent a transition to a wet phase.
.. jupyter-execute::
:linenos:
tdrX.detect_wet(dry_thr=pars["dry_thr"], wet_thr=pars["wet_thr"])
Other options, not explored here, include providing a boolean mask Series
to indicate which periods to consider wet phases (argument `wet_cond`), and
whether to linearly interpolate depth through wet phases with duration
below `wet_thr` (argument `interp_wet`).
Detection of dive events
------------------------
When depth measurements are greater than `dive_thr`, a dive event is deemed
to have started, ending when measurements cross that threshold again.
.. jupyter-execute::
:linenos:
tdrX.detect_dives(dive_thr=pars["dive_thr"])
Detection of dive phases
------------------------
Two methods for dive phase detection are available ("unimodal" and
"smooth_spline"), and this demo uses the default "unimodal" method:
.. jupyter-execute::
:linenos:
tdrX.detect_dive_phases(dive_model=pars["dive_model"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
print(tdrX)
Alternatively, all these steps can be performed together via the
`calibrate` function:
.. jupyter-execute::
:linenos:
help(skdive.calibrate)
which is demonstrated in :ref:`demo_bouts-label`.
Plot dive phases
----------------
Once TDR data are properly calibrated and phases detected, results can be
visualized:
.. jupyter-execute::
:linenos:
tdrX.plot_phases(diveNo=list(range(250, 300)), surface=True, figsize=_FIG1X1);
.. jupyter-execute::
:linenos:
# Plot dive model for a dive
tdrX.plot_dive_model(diveNo=20, figsize=(10, 10));
Calibrate speed measurements
----------------------------
In addition to the calibration procedure described above, other variables
in the data set may also need to be calibrated. `skdiveMove` provides
support for calibrating speed sensor data, by taking advantage of its
relationship with the rate of change in depth in the vertical dimension.
.. jupyter-execute::
fig, ax = plt.subplots(figsize=(7, 6))
# Consider only changes in depth larger than 2 m
tdrX.calibrate_speed(z=2, ax=ax)
print(tdrX.speed_calib_fit.summary())
Notice processing steps have been appended to the `history` attribute of
the `DataArray`:
.. jupyter-execute::
print(tdrX.get_depth("zoc"))
.. jupyter-execute::
print(tdrX.get_speed("calibrated"))
Access attributes of `TDR` instance
===================================
Following calibration, use the different accessor methods:
.. jupyter-execute::
# Time series of the wet/dry phases
print(tdrX.wet_dry)
.. jupyter-execute::
print(tdrX.get_phases_params("wet_dry")["dry_thr"])
.. jupyter-execute::
print(tdrX.get_phases_params("wet_dry")["wet_thr"])
.. jupyter-execute::
print(tdrX.get_dives_details("row_ids"))
.. jupyter-execute::
print(tdrX.get_dives_details("spline_derivs"))
.. jupyter-execute::
print(tdrX.get_dives_details("crit_vals"))
Time budgets
============
.. jupyter-execute::
print(tdrX.time_budget(ignore_z=True, ignore_du=False))
.. jupyter-execute::
print(tdrX.time_budget(ignore_z=True, ignore_du=True))
Dive statistics
===============
.. jupyter-execute::
print(tdrX.dive_stats())
Dive stamps
===========
.. jupyter-execute::
print(tdrX.stamp_dives())
Feel free to download a copy of this demo
(:jupyter-download:script:`demo_tdr`).
| scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/docs/source/demo_tdr.rst | demo_tdr.rst | .. _demo_tdr-label:
===========================
Diving behaviour analysis
===========================
Here is a bird's-eye view of the functionality of `scikit-diveMove`,
loosely following `diveMove`'s `vignette`_.
.. _vignette: https://cran.r-project.org/web/packages/diveMove/vignettes/diveMove.pdf
Set up the environment. Consider loading the `logging` module and setting
up a logger to monitor progress to this section.
.. jupyter-execute::
# Set up
import pkg_resources as pkg_rsrc
import matplotlib.pyplot as plt
import skdiveMove as skdive
# Declare figure sizes
_FIG1X1 = (11, 5)
_FIG2X1 = (10, 8)
_FIG3X1 = (11, 11)
.. jupyter-execute::
:hide-code:
:hide-output:
import numpy as np # only for setting print options here
import pandas as pd # only for setting print options here
import xarray as xr # only for setting print options here
pd.set_option("display.precision", 3)
np.set_printoptions(precision=3, sign="+")
xr.set_options(display_style="html")
%matplotlib inline
Reading data files
==================
Load `diveMove`'s example data, using ``TDR.__init__`` method, and print:
.. jupyter-execute::
:linenos:
ifile = (pkg_rsrc
.resource_filename("skdiveMove",
("tests/data/"
"ag_mk7_2002_022.nc")))
tdrX = skdive.TDR.read_netcdf(ifile, depth_name="depth",
time_name="date_time", has_speed=True)
# Or simply use function ``skdive.tests.diveMove2skd`` to do the
# same with this particular data set.
print(tdrX)
Notice that `TDR` reads files in `NetCDF4`_ format, which is a very
versatile file format, and encourages using properly documented data sets.
`skdiveMove` relies on `xarray.Dataset` objects to represent such data
sets. It is easy to generate a `xarray.Dataset` objects from Pandas
DataFrames by using method :meth:`.to_xarray`. `skdiveMove` documents
processing steps by appending to the `history` attribute, in an effort
towards building metadata standards.
.. _NetCDF4: https://www.unidata.ucar.edu/software/netcdf
Access measured data:
.. jupyter-execute::
:linenos:
tdrX.get_depth("measured")
Plot measured data:
.. jupyter-execute::
:linenos:
tdrX.plot(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
depth_lim=[-1, 95], figsize=_FIG1X1);
Plot concurrent data:
.. jupyter-execute::
:linenos:
ccvars = ["light", "speed"]
tdrX.plot(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
depth_lim=[-1, 95], concur_vars=ccvars, figsize=_FIG3X1);
Calibrate measurements
======================
Calibration of TDR measurements involves the following steps, which rely on
data from pressure sensors (barometers):
Zero offset correction (ZOC) of depth measurements
--------------------------------------------------
Using the "offset" method here for speed performance reasons:
.. jupyter-execute::
:linenos:
# Helper dict to set parameter values
pars = {"offset_zoc": 3,
"dry_thr": 70,
"wet_thr": 3610,
"dive_thr": 3,
"dive_model": "unimodal",
"knot_factor": 3,
"descent_crit_q": 0,
"ascent_crit_q": 0}
tdrX.zoc("offset", offset=pars["offset_zoc"])
# Plot ZOC job
tdrX.plot_zoc(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
figsize=(13, 6));
Detection of wet vs dry phases
------------------------------
Periods of missing depth measurements longer than `dry_thr` are considered
dry phases, whereas periods that are briefer than `wet_thr` are not
considered to represent a transition to a wet phase.
.. jupyter-execute::
:linenos:
tdrX.detect_wet(dry_thr=pars["dry_thr"], wet_thr=pars["wet_thr"])
Other options, not explored here, include providing a boolean mask Series
to indicate which periods to consider wet phases (argument `wet_cond`), and
whether to linearly interpolate depth through wet phases with duration
below `wet_thr` (argument `interp_wet`).
Detection of dive events
------------------------
When depth measurements are greater than `dive_thr`, a dive event is deemed
to have started, ending when measurements cross that threshold again.
.. jupyter-execute::
:linenos:
tdrX.detect_dives(dive_thr=pars["dive_thr"])
Detection of dive phases
------------------------
Two methods for dive phase detection are available ("unimodal" and
"smooth_spline"), and this demo uses the default "unimodal" method:
.. jupyter-execute::
:linenos:
tdrX.detect_dive_phases(dive_model=pars["dive_model"],
knot_factor=pars["knot_factor"],
descent_crit_q=pars["descent_crit_q"],
ascent_crit_q=pars["ascent_crit_q"])
print(tdrX)
Alternatively, all these steps can be performed together via the
`calibrate` function:
.. jupyter-execute::
:linenos:
help(skdive.calibrate)
which is demonstrated in :ref:`demo_bouts-label`.
Plot dive phases
----------------
Once TDR data are properly calibrated and phases detected, results can be
visualized:
.. jupyter-execute::
:linenos:
tdrX.plot_phases(diveNo=list(range(250, 300)), surface=True, figsize=_FIG1X1);
.. jupyter-execute::
:linenos:
# Plot dive model for a dive
tdrX.plot_dive_model(diveNo=20, figsize=(10, 10));
Calibrate speed measurements
----------------------------
In addition to the calibration procedure described above, other variables
in the data set may also need to be calibrated. `skdiveMove` provides
support for calibrating speed sensor data, by taking advantage of its
relationship with the rate of change in depth in the vertical dimension.
.. jupyter-execute::
fig, ax = plt.subplots(figsize=(7, 6))
# Consider only changes in depth larger than 2 m
tdrX.calibrate_speed(z=2, ax=ax)
print(tdrX.speed_calib_fit.summary())
Notice processing steps have been appended to the `history` attribute of
the `DataArray`:
.. jupyter-execute::
print(tdrX.get_depth("zoc"))
.. jupyter-execute::
print(tdrX.get_speed("calibrated"))
Access attributes of `TDR` instance
===================================
Following calibration, use the different accessor methods:
.. jupyter-execute::
# Time series of the wet/dry phases
print(tdrX.wet_dry)
.. jupyter-execute::
print(tdrX.get_phases_params("wet_dry")["dry_thr"])
.. jupyter-execute::
print(tdrX.get_phases_params("wet_dry")["wet_thr"])
.. jupyter-execute::
print(tdrX.get_dives_details("row_ids"))
.. jupyter-execute::
print(tdrX.get_dives_details("spline_derivs"))
.. jupyter-execute::
print(tdrX.get_dives_details("crit_vals"))
Time budgets
============
.. jupyter-execute::
print(tdrX.time_budget(ignore_z=True, ignore_du=False))
.. jupyter-execute::
print(tdrX.time_budget(ignore_z=True, ignore_du=True))
Dive statistics
===============
.. jupyter-execute::
print(tdrX.dive_stats())
Dive stamps
===========
.. jupyter-execute::
print(tdrX.stamp_dives())
Feel free to download a copy of this demo
(:jupyter-download:script:`demo_tdr`).
| 0.919572 | 0.516778 |
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.formula.api as smf
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def calibrate_speed(x, tau, contour_level, z=0, bad=[0, 0],
plot=True, ax=None):
"""Calibration based on kernel density estimation
Parameters
----------
x : pandas.DataFrame
DataFrame with depth rate and speed
tau : float
contour_level : float
z : float, optional
bad : array_like, optional
plot : bool, optional
Whether to plot calibration results.
ax : matplotlib.Axes, optional
An Axes instance to use as target. Default is to create one.
Returns
-------
out : 2-tuple
The quantile regression fit object, and `matplotlib.pyplot` `Axes`
instance (if plot=True, otherwise None).
Notes
-----
See `skdiveMove.TDR.calibrate_speed` for details.
"""
# `gaussian_kde` expects variables in rows
n_eval = 51
# Numpy for some operations
xnpy = x.to_numpy()
kde = stats.gaussian_kde(xnpy.T)
# Build the grid for evaluation, mimicking bkde2D
mins = x.min()
maxs = x.max()
x_flat = np.linspace(mins[0], maxs[0], n_eval)
y_flat = np.linspace(mins[1], maxs[1], n_eval)
xx, yy = np.meshgrid(x_flat, y_flat)
grid_coords = np.append(xx.reshape(-1, 1), yy.reshape(-1, 1), axis=1)
# Evaluate kde on the grid
z = kde(grid_coords.T)
z = np.flipud(z.reshape(n_eval, n_eval))
# Fit quantile regression
# -----------------------
# Bin depth rate
drbinned = pd.cut(x.iloc[:, 0], n_eval)
drbin_mids = drbinned.apply(lambda x: x.mid) # mid points
# Use bin mid points as x
binned = np.column_stack((drbin_mids, xnpy[:, 1]))
qdata = pd.DataFrame(binned, columns=list("xy"))
qmod = smf.quantreg("y ~ x", qdata)
qfit = qmod.fit(q=tau)
coefs = qfit.params
logger.info("a={}, b={}".format(*coefs))
if plot:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
ax.set_xlabel("Rate of depth change")
ax.set_ylabel("Speed")
zimg = ax.imshow(z, aspect="auto",
extent=[mins[0], maxs[0], mins[1], maxs[1]],
cmap="gist_earth_r")
fig.colorbar(zimg, fraction=0.1, aspect=30, pad=0.02,
label="Kernel density probability")
cntr = ax.contour(z, extent=[mins[0], maxs[0], mins[1], maxs[1]],
origin="image", levels=[contour_level])
ax.clabel(cntr, fmt="%1.2f")
# Plot the binned data, adding some noise for clarity
xjit_binned = np.random.normal(binned[:, 0],
xnpy[:, 0].ptp() / (2 * n_eval))
ax.scatter(xjit_binned, binned[:, 1], s=6, alpha=0.3)
# Plot line
xnew = np.linspace(mins[0], maxs[0])
yhat = coefs[0] + coefs[1] * xnew
ax.plot(xnew, yhat, "--k",
label=(r"$y = {:.3f} {:+.3f} x$"
.format(coefs[0], coefs[1])))
ax.legend(loc="lower right")
# Adjust limits to compensate for the noise in x
ax.set_xlim([mins[0], maxs[0]])
return (qfit, ax)
if __name__ == '__main__':
from skdiveMove.tests import diveMove2skd
tdrX = diveMove2skd()
print(tdrX) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/calibspeed.py | calibspeed.py | import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import statsmodels.formula.api as smf
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
def calibrate_speed(x, tau, contour_level, z=0, bad=[0, 0],
plot=True, ax=None):
"""Calibration based on kernel density estimation
Parameters
----------
x : pandas.DataFrame
DataFrame with depth rate and speed
tau : float
contour_level : float
z : float, optional
bad : array_like, optional
plot : bool, optional
Whether to plot calibration results.
ax : matplotlib.Axes, optional
An Axes instance to use as target. Default is to create one.
Returns
-------
out : 2-tuple
The quantile regression fit object, and `matplotlib.pyplot` `Axes`
instance (if plot=True, otherwise None).
Notes
-----
See `skdiveMove.TDR.calibrate_speed` for details.
"""
# `gaussian_kde` expects variables in rows
n_eval = 51
# Numpy for some operations
xnpy = x.to_numpy()
kde = stats.gaussian_kde(xnpy.T)
# Build the grid for evaluation, mimicking bkde2D
mins = x.min()
maxs = x.max()
x_flat = np.linspace(mins[0], maxs[0], n_eval)
y_flat = np.linspace(mins[1], maxs[1], n_eval)
xx, yy = np.meshgrid(x_flat, y_flat)
grid_coords = np.append(xx.reshape(-1, 1), yy.reshape(-1, 1), axis=1)
# Evaluate kde on the grid
z = kde(grid_coords.T)
z = np.flipud(z.reshape(n_eval, n_eval))
# Fit quantile regression
# -----------------------
# Bin depth rate
drbinned = pd.cut(x.iloc[:, 0], n_eval)
drbin_mids = drbinned.apply(lambda x: x.mid) # mid points
# Use bin mid points as x
binned = np.column_stack((drbin_mids, xnpy[:, 1]))
qdata = pd.DataFrame(binned, columns=list("xy"))
qmod = smf.quantreg("y ~ x", qdata)
qfit = qmod.fit(q=tau)
coefs = qfit.params
logger.info("a={}, b={}".format(*coefs))
if plot:
fig = plt.gcf()
if ax is None:
ax = plt.gca()
ax.set_xlabel("Rate of depth change")
ax.set_ylabel("Speed")
zimg = ax.imshow(z, aspect="auto",
extent=[mins[0], maxs[0], mins[1], maxs[1]],
cmap="gist_earth_r")
fig.colorbar(zimg, fraction=0.1, aspect=30, pad=0.02,
label="Kernel density probability")
cntr = ax.contour(z, extent=[mins[0], maxs[0], mins[1], maxs[1]],
origin="image", levels=[contour_level])
ax.clabel(cntr, fmt="%1.2f")
# Plot the binned data, adding some noise for clarity
xjit_binned = np.random.normal(binned[:, 0],
xnpy[:, 0].ptp() / (2 * n_eval))
ax.scatter(xjit_binned, binned[:, 1], s=6, alpha=0.3)
# Plot line
xnew = np.linspace(mins[0], maxs[0])
yhat = coefs[0] + coefs[1] * xnew
ax.plot(xnew, yhat, "--k",
label=(r"$y = {:.3f} {:+.3f} x$"
.format(coefs[0], coefs[1])))
ax.legend(loc="lower right")
# Adjust limits to compensate for the noise in x
ax.set_xlim([mins[0], maxs[0]])
return (qfit, ax)
if __name__ == '__main__':
from skdiveMove.tests import diveMove2skd
tdrX = diveMove2skd()
print(tdrX) | 0.876898 | 0.591222 |
import logging
import numpy as np
import pandas as pd
from skdiveMove.zoc import ZOC
from skdiveMove.core import diveMove, robjs, cv, pandas2ri
from skdiveMove.helpers import (get_var_sampling_interval, _cut_dive,
rle_key, _append_xr_attr)
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class TDRPhases(ZOC):
"""Core TDR phase identification routines
See help(TDRSource) for inherited attributes.
Attributes
----------
wet_dry
dives : dict
Dictionary of dive activity data {'row_ids': pandas.DataFrame,
'model': str, 'splines': dict, 'spline_derivs': pandas.DataFrame,
'crit_vals': pandas.DataFrame}.
params : dict
Dictionary with parameters used for detection of wet/dry and dive
phases. {'wet_dry': {'dry_thr': float, 'wet_thr': float}, 'dives':
{'dive_thr': float, 'dive_model': str, 'smooth_par': float,
'knot_factor': int, 'descent_crit_q': float, 'ascent_crit_q':
float}}
"""
def __init__(self, *args, **kwargs):
"""Initialize TDRPhases instance
Parameters
----------
*args : positional arguments
Passed to :meth:`ZOC.__init__`
**kwargs : keyword arguments
Passed to :meth:`ZOC.__init__`
"""
ZOC.__init__(self, *args, **kwargs)
self._wet_dry = None
self.dives = dict(row_ids=None, model=None, splines=None,
spline_derivs=None, crit_vals=None)
self.params = dict(wet_dry={}, dives={})
def __str__(self):
base = ZOC.__str__(self)
wetdry_params = self.get_phases_params("wet_dry")
dives_params = self.get_phases_params("dives")
return (base +
("\n{0:<20} {1}\n{2:<20} {3}"
.format("Wet/Dry parameters:", wetdry_params,
"Dives parameters:", dives_params)))
def detect_wet(self, dry_thr=70, wet_cond=None, wet_thr=3610,
interp_wet=False):
"""Detect wet/dry activity phases
Parameters
----------
dry_thr : float, optional
wet_cond : bool mask, optional
A Pandas.Series bool mask indexed as `depth`. Default is
generated from testing for non-missing `depth`.
wet_thr : float, optional
interp_wet : bool, optional
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``. Unlike
`diveMove`, the beginning/ending times for each phase are not
stored with the class instance, as this information can be
retrieved via the :meth:`~TDR.time_budget` method.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases
>>> tdrX.detect_wet()
Access the "phases" and "dry_thr" attributes
>>> tdrX.wet_dry # doctest: +ELLIPSIS
phase_id phase_label
date_time
2002-01-05 ... 1 L
...
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
time_py = depth_py.index
dtime = get_var_sampling_interval(depth).total_seconds()
if wet_cond is None:
wet_cond = ~depth_py.isna()
phases_l = (diveMove
._detPhase(robjs.vectors.POSIXct(time_py),
robjs.vectors.FloatVector(depth_py),
dry_thr=dry_thr,
wet_thr=wet_thr,
wet_cond=(robjs.vectors
.BoolVector(~depth_py.isna())),
interval=dtime))
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
phases = pd.DataFrame({'phase_id': phases_l.rx2("phase.id"),
'phase_label': phases_l.rx2("activity")},
index=time_py)
phases.loc[:, "phase_id"] = phases.loc[:, "phase_id"].astype(int)
self._wet_dry = phases
wet_dry_params = dict(dry_thr=dry_thr, wet_thr=wet_thr)
self.params["wet_dry"].update(wet_dry_params)
if interp_wet:
zdepth = depth.to_series()
iswet = phases["phase_label"] == "W"
iswetna = iswet & zdepth.isna()
if any(iswetna):
depth_intp = zdepth[iswet].interpolate(method="cubic")
zdepth[iswetna] = np.maximum(np.zeros_like(depth_intp),
depth_intp)
zdepth = zdepth.to_xarray()
zdepth.attrs = depth.attrs
_append_xr_attr(zdepth, "history", "interp_wet")
self._depth_zoc = zdepth
self._zoc_params.update(dict(interp_wet=interp_wet))
logger.info("Finished detecting wet/dry periods")
def detect_dives(self, dive_thr):
"""Identify dive events
Set the ``dives`` attribute's "row_ids" dictionary element, and
update the ``wet_act`` attribute's "phases" dictionary element.
Parameters
----------
dive_thr : float
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases and dives with 3 m threshold
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
act_phases = self.wet_dry["phase_label"]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
phases_df = diveMove._detDive(pd.Series(depth_py),
pd.Series(act_phases),
dive_thr=dive_thr)
# Replace dots with underscore
phases_df.columns = (phases_df.columns.str
.replace(".", "_", regex=False))
phases_df.set_index(depth_py.index, inplace=True)
dive_activity = phases_df.pop("dive_activity")
# Dive and post-dive ID should be integer
phases_df = phases_df.astype(int)
self.dives["row_ids"] = phases_df
self._wet_dry["phase_label"] = dive_activity
self.params["dives"].update({'dive_thr': dive_thr})
logger.info("Finished detecting dives")
def detect_dive_phases(self, dive_model, smooth_par=0.1,
knot_factor=3, descent_crit_q=0,
ascent_crit_q=0):
"""Detect dive phases
Complete filling the ``dives`` attribute.
Parameters
----------
dive_model : {"unimodal", "smooth.spline"}
smooth_par : float, optional
knot_factor : int, optional
descent_crit_q : float, optional
ascent_crit_q : float, optional
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases and dives with 3 m threshold
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
Detect dive phases using the "unimodal" method and selected
parameters
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
phases_df = self.get_dives_details("row_ids")
dive_ids = self.get_dives_details("row_ids", columns="dive_id")
ok = (dive_ids > 0) & ~depth_py.isna()
xx = pd.Categorical(np.repeat(["X"], phases_df.shape[0]),
categories=["D", "DB", "B", "BA",
"DA", "A", "X"])
dive_phases = pd.Series(xx, index=phases_df.index)
if any(ok):
ddepths = depth_py[ok] # diving depths
dtimes = ddepths.index
dids = dive_ids[ok]
idx = np.squeeze(np.argwhere(ok.to_numpy()))
time_num = (dtimes - dtimes[0]).total_seconds().to_numpy()
divedf = pd.DataFrame({'dive_id': dids.to_numpy(),
'idx': idx,
'depth': ddepths.to_numpy(),
'time_num': time_num},
index=ddepths.index)
grouped = divedf.groupby("dive_id")
cval_list = []
spl_der_list = []
spl_list = []
for name, grp in grouped:
res = _cut_dive(grp, dive_model=dive_model,
smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)
dive_phases.loc[grp.index] = (res.pop("label_matrix")[:, 1])
# Splines
spl = res.pop("dive_spline")
# Convert directly into a dict, with each element turned
# into a list of R objects. Access each via
# `_get_dive_spline_slot`
spl_dict = dict(zip(spl.names, list(spl)))
spl_list.append(spl_dict)
# Spline derivatives
spl_der = res.pop("spline_deriv")
spl_der_idx = pd.TimedeltaIndex(spl_der[:, 0], unit="s")
spl_der = pd.DataFrame({'y': spl_der[:, 1]},
index=spl_der_idx)
spl_der_list.append(spl_der)
# Critical values (all that's left in res)
cvals = pd.DataFrame(res, index=[name])
cvals.index.rename("dive_id", inplace=True)
# Adjust critical indices for Python convention and ensure
# integers
cvals.iloc[:, :2] = cvals.iloc[:, :2].astype(int) - 1
cval_list.append(cvals)
self.dives["model"] = dive_model
# Splines
self.dives["splines"] = dict(zip(grouped.groups.keys(),
spl_list))
self.dives["spline_derivs"] = pd.concat(spl_der_list,
keys=(grouped
.groups.keys()))
self.dives["crit_vals"] = pd.concat(cval_list)
else:
logger.warning("No dives found")
# Update the `dives` attribute
self.dives["row_ids"]["dive_phase"] = dive_phases
(self.params["dives"]
.update(dict(dive_model=dive_model, smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)))
logger.info("Finished detecting dive phases")
def get_dives_details(self, key, columns=None):
"""Accessor for the `dives` attribute
Parameters
----------
key : {"row_ids", "model", "splines", "spline_derivs", crit_vals}
Name of the key to retrieve.
columns : array_like, optional
Names of the columns of the dataframe in `key`, when applicable.
"""
try:
okey = self.dives[key]
except KeyError:
msg = ("\'{}\' is not found.\nAvailable keys: {}"
.format(key, self.dives.keys()))
logger.error(msg)
raise KeyError(msg)
else:
if okey is None:
raise KeyError("\'{}\' not available.".format(key))
if columns:
try:
odata = okey[columns]
except KeyError:
msg = ("At least one of the requested columns does not "
"exist.\nAvailable columns: {}").format(okey.columns)
logger.error(msg)
raise KeyError(msg)
else:
odata = okey
return odata
def _get_wet_activity(self):
return self._wet_dry
wet_dry = property(_get_wet_activity)
"""Wet/dry activity labels
Returns
-------
pandas.DataFrame
DataFrame with columns: `phase_id` and `phase_label` for each
measurement.
"""
def get_phases_params(self, key):
"""Return parameters used for identifying wet/dry or diving phases.
Parameters
----------
key: {'wet_dry', 'dives'}
Returns
-------
out : dict
"""
try:
params = self.params[key]
except KeyError:
msg = "key must be one of: {}".format(self.params.keys())
logger.error(msg)
raise KeyError(msg)
return params
def _get_dive_spline_slot(self, diveNo, name):
"""Accessor for the R objects in `dives`["splines"]
Private method to retrieve elements easily. Elements can be
accessed individually as is, but some elements are handled
specially.
Parameters
----------
diveNo : int or float
Which dive number to retrieve spline details for.
name : str
Element to retrieve. {"data", "xy", "knots", "coefficients",
"order", "lambda.opt", "sigmasq", "degree", "g", "a", "b",
"variter"}
"""
# Safe to assume these are all scalars, based on the current
# default settings in diveMove's `.cutDive`
scalars = ["order", "lambda.opt", "sigmasq", "degree",
"g", "a", "b", "variter"]
idata = self.get_dives_details("splines")[diveNo]
if name == "data":
x = pd.TimedeltaIndex(np.array(idata[name][0]), unit="s")
odata = pd.Series(np.array(idata[name][1]), index=x)
elif name == "xy":
x = pd.TimedeltaIndex(np.array(idata["x"]), unit="s")
odata = pd.Series(np.array(idata["y"]), index=x)
elif name in scalars:
odata = np.float(idata[name][0])
else:
odata = np.array(idata[name])
return odata
def get_dive_deriv(self, diveNo, phase=None):
"""Retrieve depth spline derivative for a given dive
Parameters
----------
diveNo : int
Dive number to retrieve derivative for.
phase : {"descent", "bottom", "ascent"}
If provided, the dive phase to retrieve data for.
Returns
-------
out : pandas.DataFrame
"""
der = self.get_dives_details("spline_derivs").loc[diveNo]
crit_vals = self.get_dives_details("crit_vals").loc[diveNo]
spl_data = self.get_dives_details("splines")[diveNo]["data"]
spl_times = np.array(spl_data[0]) # x row is time steps in (s)
if phase == "descent":
descent_crit = int(crit_vals["descent_crit"])
deltat_crit = pd.Timedelta(spl_times[descent_crit], unit="s")
oder = der.loc[:deltat_crit]
elif phase == "bottom":
descent_crit = int(crit_vals["descent_crit"])
deltat1 = pd.Timedelta(spl_times[descent_crit], unit="s")
ascent_crit = int(crit_vals["ascent_crit"])
deltat2 = pd.Timedelta(spl_times[ascent_crit], unit="s")
oder = der[(der.index >= deltat1) & (der.index <= deltat2)]
elif phase == "ascent":
ascent_crit = int(crit_vals["ascent_crit"])
deltat_crit = pd.Timedelta(spl_times[ascent_crit], unit="s")
oder = der.loc[deltat_crit:]
elif phase is None:
oder = der
else:
msg = "`phase` must be 'descent', 'bottom' or 'ascent'"
logger.error(msg)
raise KeyError(msg)
return oder
def _get_dive_deriv_stats(self, diveNo):
"""Calculate stats for the depth derivative of a given dive
"""
desc = self.get_dive_deriv(diveNo, "descent")
bott = self.get_dive_deriv(diveNo, "bottom")
asc = self.get_dive_deriv(diveNo, "ascent")
# Rename DataFrame to match diveNo
desc_sts = (pd.DataFrame(desc.describe().iloc[1:]).transpose()
.add_prefix("descD_").rename({"y": diveNo}))
bott_sts = (pd.DataFrame(bott.describe().iloc[1:]).transpose()
.add_prefix("bottD_").rename({"y": diveNo}))
asc_sts = (pd.DataFrame(asc.describe().iloc[1:]).transpose()
.add_prefix("ascD_").rename({"y": diveNo}))
sts = pd.merge(desc_sts, bott_sts, left_index=True,
right_index=True)
sts = pd.merge(sts, asc_sts, left_index=True, right_index=True)
return sts
def time_budget(self, ignore_z=True, ignore_du=True):
"""Summary of wet/dry activities at the broadest time scale
Parameters
----------
ignore_z : bool, optional
Whether to ignore trivial aquatic periods.
ignore_du : bool, optional
Whether to ignore diving and underwater periods.
Returns
-------
out : pandas.DataFrame
DataFrame indexed by phase id, with categorical activity label
for each phase, and beginning and ending times.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.time_budget(ignore_z=True,
... ignore_du=True) # doctest: +ELLIPSIS
beg phase_label end
phase_id
1 2002-01-05 ... L 2002-01-05 ...
...
"""
phase_lab = self.wet_dry["phase_label"]
idx_name = phase_lab.index.name
labels = phase_lab.reset_index()
if ignore_z:
labels = labels.mask(labels == "Z", "L")
if ignore_du:
labels = labels.mask((labels == "U") | (labels == "D"), "W")
grp_key = rle_key(labels["phase_label"]).rename("phase_id")
labels_grp = labels.groupby(grp_key)
begs = labels_grp.first().rename(columns={idx_name: "beg"})
ends = labels_grp.last()[idx_name].rename("end")
return pd.concat((begs, ends), axis=1)
def stamp_dives(self, ignore_z=True):
"""Identify the wet activity phase corresponding to each dive
Parameters
----------
ignore_z : bool, optional
Whether to ignore trivial aquatic periods.
Returns
-------
out : pandas.DataFrame
DataFrame indexed by dive ID, and three columns identifying
which phase thy are in, and the beginning and ending time
stamps.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.stamp_dives(ignore_z=True) # doctest: +ELLIPSIS
phase_id beg end
dive_id
1 2 2002-01-05 ... 2002-01-06 ...
"""
phase_lab = self.wet_dry["phase_label"]
idx_name = phase_lab.index.name
# "U" and "D" considered as "W" here
phase_lab = phase_lab.mask(phase_lab.isin(["U", "D"]), "W")
if ignore_z:
phase_lab = phase_lab.mask(phase_lab == "Z", "L")
dive_ids = self.get_dives_details("row_ids", columns="dive_id")
grp_key = rle_key(phase_lab).rename("phase_id")
isdive = dive_ids > 0
merged = (pd.concat((grp_key, dive_ids, phase_lab), axis=1)
.loc[isdive, :].reset_index())
# Rest index to use in first() and last()
merged_grp = merged.groupby("phase_id")
dives_ll = []
for name, group in merged_grp:
dives_uniq = pd.Series(group["dive_id"].unique(),
name="dive_id")
beg = [group[idx_name].iloc[0]] * dives_uniq.size
end = [group[idx_name].iloc[-1]] * dives_uniq.size
dive_df = pd.DataFrame({'phase_id': [name] * dives_uniq.size,
'beg': beg,
'end': end}, index=dives_uniq)
dives_ll.append(dive_df)
dives_all = pd.concat(dives_ll)
return dives_all | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/tdrphases.py | tdrphases.py | import logging
import numpy as np
import pandas as pd
from skdiveMove.zoc import ZOC
from skdiveMove.core import diveMove, robjs, cv, pandas2ri
from skdiveMove.helpers import (get_var_sampling_interval, _cut_dive,
rle_key, _append_xr_attr)
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class TDRPhases(ZOC):
"""Core TDR phase identification routines
See help(TDRSource) for inherited attributes.
Attributes
----------
wet_dry
dives : dict
Dictionary of dive activity data {'row_ids': pandas.DataFrame,
'model': str, 'splines': dict, 'spline_derivs': pandas.DataFrame,
'crit_vals': pandas.DataFrame}.
params : dict
Dictionary with parameters used for detection of wet/dry and dive
phases. {'wet_dry': {'dry_thr': float, 'wet_thr': float}, 'dives':
{'dive_thr': float, 'dive_model': str, 'smooth_par': float,
'knot_factor': int, 'descent_crit_q': float, 'ascent_crit_q':
float}}
"""
def __init__(self, *args, **kwargs):
"""Initialize TDRPhases instance
Parameters
----------
*args : positional arguments
Passed to :meth:`ZOC.__init__`
**kwargs : keyword arguments
Passed to :meth:`ZOC.__init__`
"""
ZOC.__init__(self, *args, **kwargs)
self._wet_dry = None
self.dives = dict(row_ids=None, model=None, splines=None,
spline_derivs=None, crit_vals=None)
self.params = dict(wet_dry={}, dives={})
def __str__(self):
base = ZOC.__str__(self)
wetdry_params = self.get_phases_params("wet_dry")
dives_params = self.get_phases_params("dives")
return (base +
("\n{0:<20} {1}\n{2:<20} {3}"
.format("Wet/Dry parameters:", wetdry_params,
"Dives parameters:", dives_params)))
def detect_wet(self, dry_thr=70, wet_cond=None, wet_thr=3610,
interp_wet=False):
"""Detect wet/dry activity phases
Parameters
----------
dry_thr : float, optional
wet_cond : bool mask, optional
A Pandas.Series bool mask indexed as `depth`. Default is
generated from testing for non-missing `depth`.
wet_thr : float, optional
interp_wet : bool, optional
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``. Unlike
`diveMove`, the beginning/ending times for each phase are not
stored with the class instance, as this information can be
retrieved via the :meth:`~TDR.time_budget` method.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases
>>> tdrX.detect_wet()
Access the "phases" and "dry_thr" attributes
>>> tdrX.wet_dry # doctest: +ELLIPSIS
phase_id phase_label
date_time
2002-01-05 ... 1 L
...
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
time_py = depth_py.index
dtime = get_var_sampling_interval(depth).total_seconds()
if wet_cond is None:
wet_cond = ~depth_py.isna()
phases_l = (diveMove
._detPhase(robjs.vectors.POSIXct(time_py),
robjs.vectors.FloatVector(depth_py),
dry_thr=dry_thr,
wet_thr=wet_thr,
wet_cond=(robjs.vectors
.BoolVector(~depth_py.isna())),
interval=dtime))
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
phases = pd.DataFrame({'phase_id': phases_l.rx2("phase.id"),
'phase_label': phases_l.rx2("activity")},
index=time_py)
phases.loc[:, "phase_id"] = phases.loc[:, "phase_id"].astype(int)
self._wet_dry = phases
wet_dry_params = dict(dry_thr=dry_thr, wet_thr=wet_thr)
self.params["wet_dry"].update(wet_dry_params)
if interp_wet:
zdepth = depth.to_series()
iswet = phases["phase_label"] == "W"
iswetna = iswet & zdepth.isna()
if any(iswetna):
depth_intp = zdepth[iswet].interpolate(method="cubic")
zdepth[iswetna] = np.maximum(np.zeros_like(depth_intp),
depth_intp)
zdepth = zdepth.to_xarray()
zdepth.attrs = depth.attrs
_append_xr_attr(zdepth, "history", "interp_wet")
self._depth_zoc = zdepth
self._zoc_params.update(dict(interp_wet=interp_wet))
logger.info("Finished detecting wet/dry periods")
def detect_dives(self, dive_thr):
"""Identify dive events
Set the ``dives`` attribute's "row_ids" dictionary element, and
update the ``wet_act`` attribute's "phases" dictionary element.
Parameters
----------
dive_thr : float
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases and dives with 3 m threshold
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
act_phases = self.wet_dry["phase_label"]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
phases_df = diveMove._detDive(pd.Series(depth_py),
pd.Series(act_phases),
dive_thr=dive_thr)
# Replace dots with underscore
phases_df.columns = (phases_df.columns.str
.replace(".", "_", regex=False))
phases_df.set_index(depth_py.index, inplace=True)
dive_activity = phases_df.pop("dive_activity")
# Dive and post-dive ID should be integer
phases_df = phases_df.astype(int)
self.dives["row_ids"] = phases_df
self._wet_dry["phase_label"] = dive_activity
self.params["dives"].update({'dive_thr': dive_thr})
logger.info("Finished detecting dives")
def detect_dive_phases(self, dive_model, smooth_par=0.1,
knot_factor=3, descent_crit_q=0,
ascent_crit_q=0):
"""Detect dive phases
Complete filling the ``dives`` attribute.
Parameters
----------
dive_model : {"unimodal", "smooth.spline"}
smooth_par : float, optional
knot_factor : int, optional
descent_crit_q : float, optional
ascent_crit_q : float, optional
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``.
Examples
--------
ZOC using the "offset" method for convenience
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
Detect wet/dry phases and dives with 3 m threshold
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
Detect dive phases using the "unimodal" method and selected
parameters
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
"""
# Retrieve copy of depth from our own property
depth = self.depth_zoc
depth_py = depth.to_series()
phases_df = self.get_dives_details("row_ids")
dive_ids = self.get_dives_details("row_ids", columns="dive_id")
ok = (dive_ids > 0) & ~depth_py.isna()
xx = pd.Categorical(np.repeat(["X"], phases_df.shape[0]),
categories=["D", "DB", "B", "BA",
"DA", "A", "X"])
dive_phases = pd.Series(xx, index=phases_df.index)
if any(ok):
ddepths = depth_py[ok] # diving depths
dtimes = ddepths.index
dids = dive_ids[ok]
idx = np.squeeze(np.argwhere(ok.to_numpy()))
time_num = (dtimes - dtimes[0]).total_seconds().to_numpy()
divedf = pd.DataFrame({'dive_id': dids.to_numpy(),
'idx': idx,
'depth': ddepths.to_numpy(),
'time_num': time_num},
index=ddepths.index)
grouped = divedf.groupby("dive_id")
cval_list = []
spl_der_list = []
spl_list = []
for name, grp in grouped:
res = _cut_dive(grp, dive_model=dive_model,
smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)
dive_phases.loc[grp.index] = (res.pop("label_matrix")[:, 1])
# Splines
spl = res.pop("dive_spline")
# Convert directly into a dict, with each element turned
# into a list of R objects. Access each via
# `_get_dive_spline_slot`
spl_dict = dict(zip(spl.names, list(spl)))
spl_list.append(spl_dict)
# Spline derivatives
spl_der = res.pop("spline_deriv")
spl_der_idx = pd.TimedeltaIndex(spl_der[:, 0], unit="s")
spl_der = pd.DataFrame({'y': spl_der[:, 1]},
index=spl_der_idx)
spl_der_list.append(spl_der)
# Critical values (all that's left in res)
cvals = pd.DataFrame(res, index=[name])
cvals.index.rename("dive_id", inplace=True)
# Adjust critical indices for Python convention and ensure
# integers
cvals.iloc[:, :2] = cvals.iloc[:, :2].astype(int) - 1
cval_list.append(cvals)
self.dives["model"] = dive_model
# Splines
self.dives["splines"] = dict(zip(grouped.groups.keys(),
spl_list))
self.dives["spline_derivs"] = pd.concat(spl_der_list,
keys=(grouped
.groups.keys()))
self.dives["crit_vals"] = pd.concat(cval_list)
else:
logger.warning("No dives found")
# Update the `dives` attribute
self.dives["row_ids"]["dive_phase"] = dive_phases
(self.params["dives"]
.update(dict(dive_model=dive_model, smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)))
logger.info("Finished detecting dive phases")
def get_dives_details(self, key, columns=None):
"""Accessor for the `dives` attribute
Parameters
----------
key : {"row_ids", "model", "splines", "spline_derivs", crit_vals}
Name of the key to retrieve.
columns : array_like, optional
Names of the columns of the dataframe in `key`, when applicable.
"""
try:
okey = self.dives[key]
except KeyError:
msg = ("\'{}\' is not found.\nAvailable keys: {}"
.format(key, self.dives.keys()))
logger.error(msg)
raise KeyError(msg)
else:
if okey is None:
raise KeyError("\'{}\' not available.".format(key))
if columns:
try:
odata = okey[columns]
except KeyError:
msg = ("At least one of the requested columns does not "
"exist.\nAvailable columns: {}").format(okey.columns)
logger.error(msg)
raise KeyError(msg)
else:
odata = okey
return odata
def _get_wet_activity(self):
return self._wet_dry
wet_dry = property(_get_wet_activity)
"""Wet/dry activity labels
Returns
-------
pandas.DataFrame
DataFrame with columns: `phase_id` and `phase_label` for each
measurement.
"""
def get_phases_params(self, key):
"""Return parameters used for identifying wet/dry or diving phases.
Parameters
----------
key: {'wet_dry', 'dives'}
Returns
-------
out : dict
"""
try:
params = self.params[key]
except KeyError:
msg = "key must be one of: {}".format(self.params.keys())
logger.error(msg)
raise KeyError(msg)
return params
def _get_dive_spline_slot(self, diveNo, name):
"""Accessor for the R objects in `dives`["splines"]
Private method to retrieve elements easily. Elements can be
accessed individually as is, but some elements are handled
specially.
Parameters
----------
diveNo : int or float
Which dive number to retrieve spline details for.
name : str
Element to retrieve. {"data", "xy", "knots", "coefficients",
"order", "lambda.opt", "sigmasq", "degree", "g", "a", "b",
"variter"}
"""
# Safe to assume these are all scalars, based on the current
# default settings in diveMove's `.cutDive`
scalars = ["order", "lambda.opt", "sigmasq", "degree",
"g", "a", "b", "variter"]
idata = self.get_dives_details("splines")[diveNo]
if name == "data":
x = pd.TimedeltaIndex(np.array(idata[name][0]), unit="s")
odata = pd.Series(np.array(idata[name][1]), index=x)
elif name == "xy":
x = pd.TimedeltaIndex(np.array(idata["x"]), unit="s")
odata = pd.Series(np.array(idata["y"]), index=x)
elif name in scalars:
odata = np.float(idata[name][0])
else:
odata = np.array(idata[name])
return odata
def get_dive_deriv(self, diveNo, phase=None):
"""Retrieve depth spline derivative for a given dive
Parameters
----------
diveNo : int
Dive number to retrieve derivative for.
phase : {"descent", "bottom", "ascent"}
If provided, the dive phase to retrieve data for.
Returns
-------
out : pandas.DataFrame
"""
der = self.get_dives_details("spline_derivs").loc[diveNo]
crit_vals = self.get_dives_details("crit_vals").loc[diveNo]
spl_data = self.get_dives_details("splines")[diveNo]["data"]
spl_times = np.array(spl_data[0]) # x row is time steps in (s)
if phase == "descent":
descent_crit = int(crit_vals["descent_crit"])
deltat_crit = pd.Timedelta(spl_times[descent_crit], unit="s")
oder = der.loc[:deltat_crit]
elif phase == "bottom":
descent_crit = int(crit_vals["descent_crit"])
deltat1 = pd.Timedelta(spl_times[descent_crit], unit="s")
ascent_crit = int(crit_vals["ascent_crit"])
deltat2 = pd.Timedelta(spl_times[ascent_crit], unit="s")
oder = der[(der.index >= deltat1) & (der.index <= deltat2)]
elif phase == "ascent":
ascent_crit = int(crit_vals["ascent_crit"])
deltat_crit = pd.Timedelta(spl_times[ascent_crit], unit="s")
oder = der.loc[deltat_crit:]
elif phase is None:
oder = der
else:
msg = "`phase` must be 'descent', 'bottom' or 'ascent'"
logger.error(msg)
raise KeyError(msg)
return oder
def _get_dive_deriv_stats(self, diveNo):
"""Calculate stats for the depth derivative of a given dive
"""
desc = self.get_dive_deriv(diveNo, "descent")
bott = self.get_dive_deriv(diveNo, "bottom")
asc = self.get_dive_deriv(diveNo, "ascent")
# Rename DataFrame to match diveNo
desc_sts = (pd.DataFrame(desc.describe().iloc[1:]).transpose()
.add_prefix("descD_").rename({"y": diveNo}))
bott_sts = (pd.DataFrame(bott.describe().iloc[1:]).transpose()
.add_prefix("bottD_").rename({"y": diveNo}))
asc_sts = (pd.DataFrame(asc.describe().iloc[1:]).transpose()
.add_prefix("ascD_").rename({"y": diveNo}))
sts = pd.merge(desc_sts, bott_sts, left_index=True,
right_index=True)
sts = pd.merge(sts, asc_sts, left_index=True, right_index=True)
return sts
def time_budget(self, ignore_z=True, ignore_du=True):
"""Summary of wet/dry activities at the broadest time scale
Parameters
----------
ignore_z : bool, optional
Whether to ignore trivial aquatic periods.
ignore_du : bool, optional
Whether to ignore diving and underwater periods.
Returns
-------
out : pandas.DataFrame
DataFrame indexed by phase id, with categorical activity label
for each phase, and beginning and ending times.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.time_budget(ignore_z=True,
... ignore_du=True) # doctest: +ELLIPSIS
beg phase_label end
phase_id
1 2002-01-05 ... L 2002-01-05 ...
...
"""
phase_lab = self.wet_dry["phase_label"]
idx_name = phase_lab.index.name
labels = phase_lab.reset_index()
if ignore_z:
labels = labels.mask(labels == "Z", "L")
if ignore_du:
labels = labels.mask((labels == "U") | (labels == "D"), "W")
grp_key = rle_key(labels["phase_label"]).rename("phase_id")
labels_grp = labels.groupby(grp_key)
begs = labels_grp.first().rename(columns={idx_name: "beg"})
ends = labels_grp.last()[idx_name].rename("end")
return pd.concat((begs, ends), axis=1)
def stamp_dives(self, ignore_z=True):
"""Identify the wet activity phase corresponding to each dive
Parameters
----------
ignore_z : bool, optional
Whether to ignore trivial aquatic periods.
Returns
-------
out : pandas.DataFrame
DataFrame indexed by dive ID, and three columns identifying
which phase thy are in, and the beginning and ending time
stamps.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd("TDRPhases")
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.stamp_dives(ignore_z=True) # doctest: +ELLIPSIS
phase_id beg end
dive_id
1 2 2002-01-05 ... 2002-01-06 ...
"""
phase_lab = self.wet_dry["phase_label"]
idx_name = phase_lab.index.name
# "U" and "D" considered as "W" here
phase_lab = phase_lab.mask(phase_lab.isin(["U", "D"]), "W")
if ignore_z:
phase_lab = phase_lab.mask(phase_lab == "Z", "L")
dive_ids = self.get_dives_details("row_ids", columns="dive_id")
grp_key = rle_key(phase_lab).rename("phase_id")
isdive = dive_ids > 0
merged = (pd.concat((grp_key, dive_ids, phase_lab), axis=1)
.loc[isdive, :].reset_index())
# Rest index to use in first() and last()
merged_grp = merged.groupby("phase_id")
dives_ll = []
for name, group in merged_grp:
dives_uniq = pd.Series(group["dive_id"].unique(),
name="dive_id")
beg = [group[idx_name].iloc[0]] * dives_uniq.size
end = [group[idx_name].iloc[-1]] * dives_uniq.size
dive_df = pd.DataFrame({'phase_id': [name] * dives_uniq.size,
'beg': beg,
'end': end}, index=dives_uniq)
dives_ll.append(dive_df)
dives_all = pd.concat(dives_ll)
return dives_all | 0.862988 | 0.258595 |
import logging
import pandas as pd
from skdiveMove.tdrsource import TDRSource
from skdiveMove.core import robjs, cv, pandas2ri, diveMove
from skdiveMove.helpers import _append_xr_attr
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class ZOC(TDRSource):
"""Perform zero offset correction
See help(ZOC) for inherited attributes.
Attributes
----------
zoc_params
depth_zoc
zoc_method : str
Name of the ZOC method used.
zoc_filters : pandas.DataFrame
DataFrame with output filters for method="filter"
"""
def __init__(self, *args, **kwargs):
"""Initialize ZOC instance
Parameters
----------
*args : positional arguments
Passed to :meth:`TDRSource.__init__`
**kwargs : keyword arguments
Passed to :meth:`TDRSource.__init__`
"""
TDRSource.__init__(self, *args, **kwargs)
self.zoc_method = None
self._zoc_params = None
self._depth_zoc = None
self.zoc_filters = None
def __str__(self):
base = TDRSource.__str__(self)
meth, params = self.zoc_params
return (base +
("\n{0:<20} {1}\n{2:<20} {3}"
.format("ZOC method:", meth, "ZOC parameters:", params)))
def _offset_depth(self, offset=0):
"""Perform ZOC with "offset" method
Parameters
----------
offset : float, optional
Value to subtract from measured depth.
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
"""
# Retrieve copy of depth from our own property
depth = self.depth
self.zoc_method = "offset"
self._zoc_params = dict(offset=offset)
depth_zoc = depth - offset
depth_zoc[depth_zoc < 0] = 0
_append_xr_attr(depth_zoc, "history", "ZOC")
self._depth_zoc = depth_zoc
def _filter_depth(self, k, probs, depth_bounds=None, na_rm=True):
"""Perform ZOC with "filter" method
Parameters
----------
k : array_like
probs : array_like
**kwargs : optional keyword arguments
For this method: ('depth_bounds' (defaults to range), 'na_rm'
(defaults to True)).
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
"""
self.zoc_method = "filter"
# Retrieve copy of depth from our own property
depth = self.depth
depth_ser = depth.to_series()
self._zoc_params = dict(k=k, probs=probs, depth_bounds=depth_bounds,
na_rm=na_rm)
depthmtx = self._depth_filter_r(depth_ser, **self._zoc_params)
depth_zoc = depthmtx.pop("depth_adj")
depth_zoc[depth_zoc < 0] = 0
depth_zoc = depth_zoc.rename("depth").to_xarray()
depth_zoc.attrs = depth.attrs
_append_xr_attr(depth_zoc, "history", "ZOC")
self._depth_zoc = depth_zoc
self.zoc_filters = depthmtx
def zoc(self, method="filter", **kwargs):
"""Apply zero offset correction to depth measurements
Parameters
----------
method : {"filter", "offset"}
Name of method to use for zero offset correction.
**kwargs : optional keyword arguments
Passed to the chosen method (:meth:`offset_depth`,
:meth:`filter_depth`)
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
Examples
--------
ZOC using the "offset" method
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
Using the "filter" method
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc(k=K, probs=P, depth_bounds=DB)
Plot the filters that were applied
>>> tdrX.plot_zoc(ylim=[-1, 10]) # doctest: +ELLIPSIS
(<Figure ... with 3 Axes>, array([<AxesSubplot:...>,
<AxesSubplot:...>, <AxesSubplot:...>], dtype=object))
"""
if method == "offset":
offset = kwargs.pop("offset", 0)
self._offset_depth(offset)
elif method == "filter":
k = kwargs.pop("k") # must exist
P = kwargs.pop("probs") # must exist
# Default depth bounds equal measured depth range
DB = kwargs.pop("depth_bounds",
[self.depth.min(),
self.depth.max()])
# default as in `_depth_filter`
na_rm = kwargs.pop("na_rm", True)
self._filter_depth(k=k, probs=P, depth_bounds=DB, na_rm=na_rm)
else:
logger.warning("Method {} is not implemented"
.format(method))
logger.info("Finished ZOC")
def _depth_filter_r(self, depth, k, probs, depth_bounds, na_rm=True):
"""Filter method for zero offset correction via `diveMove`
Parameters
----------
depth : pandas.Series
k : array_like
probs : array_like
depth_bounds : array_like
na_rm : bool, optional
Returns
-------
out : pandas.DataFrame
Time-indexed DataFrame with a column for each filter applied, and a
column `depth_adj` for corrected depth.
"""
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
depthmtx = diveMove._depthFilter(depth,
pd.Series(k), pd.Series(probs),
pd.Series(depth_bounds),
na_rm)
colnames = ["k{0}_p{1}".format(k, p) for k, p in zip(k, probs)]
colnames.append("depth_adj")
return pd.DataFrame(depthmtx, index=depth.index, columns=colnames)
def _get_depth(self):
return self._depth_zoc
depth_zoc = property(_get_depth)
"""Depth array accessor
Returns
-------
xarray.DataArray
"""
def _get_params(self):
return (self.zoc_method, self._zoc_params)
zoc_params = property(_get_params)
"""Parameters used with method for zero-offset correction
Returns
-------
method : str
Method used for ZOC.
params : dict
Dictionary with parameters and values used for ZOC.
""" | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/zoc.py | zoc.py | import logging
import pandas as pd
from skdiveMove.tdrsource import TDRSource
from skdiveMove.core import robjs, cv, pandas2ri, diveMove
from skdiveMove.helpers import _append_xr_attr
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
class ZOC(TDRSource):
"""Perform zero offset correction
See help(ZOC) for inherited attributes.
Attributes
----------
zoc_params
depth_zoc
zoc_method : str
Name of the ZOC method used.
zoc_filters : pandas.DataFrame
DataFrame with output filters for method="filter"
"""
def __init__(self, *args, **kwargs):
"""Initialize ZOC instance
Parameters
----------
*args : positional arguments
Passed to :meth:`TDRSource.__init__`
**kwargs : keyword arguments
Passed to :meth:`TDRSource.__init__`
"""
TDRSource.__init__(self, *args, **kwargs)
self.zoc_method = None
self._zoc_params = None
self._depth_zoc = None
self.zoc_filters = None
def __str__(self):
base = TDRSource.__str__(self)
meth, params = self.zoc_params
return (base +
("\n{0:<20} {1}\n{2:<20} {3}"
.format("ZOC method:", meth, "ZOC parameters:", params)))
def _offset_depth(self, offset=0):
"""Perform ZOC with "offset" method
Parameters
----------
offset : float, optional
Value to subtract from measured depth.
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
"""
# Retrieve copy of depth from our own property
depth = self.depth
self.zoc_method = "offset"
self._zoc_params = dict(offset=offset)
depth_zoc = depth - offset
depth_zoc[depth_zoc < 0] = 0
_append_xr_attr(depth_zoc, "history", "ZOC")
self._depth_zoc = depth_zoc
def _filter_depth(self, k, probs, depth_bounds=None, na_rm=True):
"""Perform ZOC with "filter" method
Parameters
----------
k : array_like
probs : array_like
**kwargs : optional keyword arguments
For this method: ('depth_bounds' (defaults to range), 'na_rm'
(defaults to True)).
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
"""
self.zoc_method = "filter"
# Retrieve copy of depth from our own property
depth = self.depth
depth_ser = depth.to_series()
self._zoc_params = dict(k=k, probs=probs, depth_bounds=depth_bounds,
na_rm=na_rm)
depthmtx = self._depth_filter_r(depth_ser, **self._zoc_params)
depth_zoc = depthmtx.pop("depth_adj")
depth_zoc[depth_zoc < 0] = 0
depth_zoc = depth_zoc.rename("depth").to_xarray()
depth_zoc.attrs = depth.attrs
_append_xr_attr(depth_zoc, "history", "ZOC")
self._depth_zoc = depth_zoc
self.zoc_filters = depthmtx
def zoc(self, method="filter", **kwargs):
"""Apply zero offset correction to depth measurements
Parameters
----------
method : {"filter", "offset"}
Name of method to use for zero offset correction.
**kwargs : optional keyword arguments
Passed to the chosen method (:meth:`offset_depth`,
:meth:`filter_depth`)
Notes
-----
More details in diveMove's ``calibrateDepth`` function.
Examples
--------
ZOC using the "offset" method
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
Using the "filter" method
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc(k=K, probs=P, depth_bounds=DB)
Plot the filters that were applied
>>> tdrX.plot_zoc(ylim=[-1, 10]) # doctest: +ELLIPSIS
(<Figure ... with 3 Axes>, array([<AxesSubplot:...>,
<AxesSubplot:...>, <AxesSubplot:...>], dtype=object))
"""
if method == "offset":
offset = kwargs.pop("offset", 0)
self._offset_depth(offset)
elif method == "filter":
k = kwargs.pop("k") # must exist
P = kwargs.pop("probs") # must exist
# Default depth bounds equal measured depth range
DB = kwargs.pop("depth_bounds",
[self.depth.min(),
self.depth.max()])
# default as in `_depth_filter`
na_rm = kwargs.pop("na_rm", True)
self._filter_depth(k=k, probs=P, depth_bounds=DB, na_rm=na_rm)
else:
logger.warning("Method {} is not implemented"
.format(method))
logger.info("Finished ZOC")
def _depth_filter_r(self, depth, k, probs, depth_bounds, na_rm=True):
"""Filter method for zero offset correction via `diveMove`
Parameters
----------
depth : pandas.Series
k : array_like
probs : array_like
depth_bounds : array_like
na_rm : bool, optional
Returns
-------
out : pandas.DataFrame
Time-indexed DataFrame with a column for each filter applied, and a
column `depth_adj` for corrected depth.
"""
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
depthmtx = diveMove._depthFilter(depth,
pd.Series(k), pd.Series(probs),
pd.Series(depth_bounds),
na_rm)
colnames = ["k{0}_p{1}".format(k, p) for k, p in zip(k, probs)]
colnames.append("depth_adj")
return pd.DataFrame(depthmtx, index=depth.index, columns=colnames)
def _get_depth(self):
return self._depth_zoc
depth_zoc = property(_get_depth)
"""Depth array accessor
Returns
-------
xarray.DataArray
"""
def _get_params(self):
return (self.zoc_method, self._zoc_params)
zoc_params = property(_get_params)
"""Parameters used with method for zero-offset correction
Returns
-------
method : str
Method used for ZOC.
params : dict
Dictionary with parameters and values used for ZOC.
""" | 0.844088 | 0.329109 |
import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
def _night(times, sunrise_time, sunset_time):
"""Construct Series with sunset and sunrise times for given dates
Parameters
----------
times : pandas.Series
(N,) array with depth measurements.
sunrise_time : str
sunset_time : str
Returns
-------
tuple
Two pandas.Series (sunsets, sunrises)
"""
tmin = times.min().strftime("%Y-%m-%d ")
tmax = times.max().strftime("%Y-%m-%d ")
sunsets = pd.date_range(start=tmin + sunset_time,
end=tmax + sunset_time,
freq="1D")
tmin1 = (times.min() + pd.Timedelta(1, unit="d")).strftime("%Y-%m-%d ")
tmax1 = (times.max() + pd.Timedelta(1, unit="d")).strftime("%Y-%m-%d ")
sunrises = pd.date_range(start=tmin1 + sunrise_time,
end=tmax1 + sunrise_time,
freq="1D")
return (sunsets, sunrises)
def _plot_dry_time(times_dataframe, ax):
"""Fill a vertical span between beginning/ending times in DataFrame
Parameters
----------
times_dataframe : pandas.DataFrame
ax: Axes object
"""
for idx, row in times_dataframe.iterrows():
ax.axvspan(row[0], row[1], ymin=0.99, facecolor="tan",
edgecolor=None, alpha=0.6)
def plot_tdr(depth, concur_vars=None, xlim=None, depth_lim=None,
xlab="time [dd-mmm hh:mm]", ylab_depth="depth [m]",
concur_var_titles=None, xlab_format="%d-%b %H:%M",
sunrise_time="06:00:00", sunset_time="18:00:00",
night_col="gray", dry_time=None, phase_cat=None,
key=True, **kwargs):
"""Plot time, depth, and other concurrent data
Parameters
----------
depth : pandas.Series
(N,) array with depth measurements.
concur_vars : pandas.Series or pandas.Dataframe
(N,) Series or dataframe with additional data to plot in subplot.
xlim : 2-tuple/list, optional
Minimum and maximum limits for ``x`` axis. Ignored when
``concur_vars=None``.
ylim : 2-tuple/list, optional
Minimum and maximum limits for ``y`` axis for data other than depth.
depth_lim : 2-tuple/list, optional
Minimum and maximum limits for depth to plot.
xlab : str, optional
Label for ``x`` axis.
ylab_depth : str, optional
Label for ``y`` axis for depth.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
xlab_format : str, optional
Format string for formatting the x axis.
sunrise_time : str, optional
Time of sunrise, in 24 hr format. This is used for shading night
time.
sunset_time : str, optional
Time of sunset, in 24 hr format. This is used for shading night
time.
night_col : str, optional
Color for shading night time.
dry_time : pandas.DataFrame, optional
Two-column DataFrame with beginning and ending times corresponding
to periods considered to be dry.
phase_cat : pandas.Series, optional
Categorical series dividing rows into sections.
**kwargs : optional keyword arguments
Returns
-------
tuple
Pyplot Figure and Axes instances.
"""
sunsets, sunrises = _night(depth.index,
sunset_time=sunset_time,
sunrise_time=sunrise_time)
def _plot_phase_cat(ser, ax, legend=True):
"""Scatter plot and legend of series coloured by categories"""
cats = phase_cat.cat.categories
cat_codes = phase_cat.cat.codes
isna_ser = ser.isna()
ser_nona = ser.dropna()
scatter = ax.scatter(ser_nona.index, ser_nona, s=12, marker="o",
c=cat_codes[~isna_ser])
if legend:
handles, _ = scatter.legend_elements()
ax.legend(handles, cats, loc="lower right",
ncol=len(cat_codes))
if concur_vars is None:
fig, axs = plt.subplots(1, 1)
axs.set_ylabel(ylab_depth)
depth.plot(ax=axs, color="k", **kwargs)
axs.set_xlabel("")
axs.axhline(0, linestyle="--", linewidth=0.75, color="k")
for beg, end in zip(sunsets, sunrises):
axs.axvspan(beg, end, facecolor=night_col,
edgecolor=None, alpha=0.3)
if (phase_cat is not None):
_plot_phase_cat(depth, axs)
if (dry_time is not None):
_plot_dry_time(dry_time, axs)
if (xlim is not None):
axs.set_xlim(xlim)
if (depth_lim is not None):
axs.set_ylim(depth_lim)
axs.invert_yaxis()
else:
full_df = pd.concat((depth, concur_vars), axis=1)
nplots = full_df.shape[1]
depth_ser = full_df.iloc[:, 0]
concur_df = full_df.iloc[:, 1:]
fig, axs = plt.subplots(nplots, 1, sharex=True)
axs[0].set_ylabel(ylab_depth)
depth_ser.plot(ax=axs[0], color="k", **kwargs)
axs[0].set_xlabel("")
axs[0].axhline(0, linestyle="--", linewidth=0.75, color="k")
concur_df.plot(ax=axs[1:], subplots=True, legend=False, **kwargs)
for i, col in enumerate(concur_df.columns):
if (concur_var_titles is not None):
axs[i + 1].set_ylabel(concur_var_titles[i])
else:
axs[i + 1].set_ylabel(col)
axs[i + 1].axhline(0, linestyle="--",
linewidth=0.75, color="k")
if (xlim is not None):
axs[i + 1].set_xlim(xlim)
for i, ax in enumerate(axs):
for beg, end in zip(sunsets, sunrises):
ax.axvspan(beg, end, facecolor=night_col,
edgecolor=None, alpha=0.3)
if (dry_time is not None):
_plot_dry_time(dry_time, ax)
if (phase_cat is not None):
_plot_phase_cat(depth_ser, axs[0])
for i, col in enumerate(concur_df.columns):
_plot_phase_cat(concur_df.loc[:, col], axs[i + 1], False)
if (depth_lim is not None):
axs[0].set_ylim(depth_lim)
axs[0].invert_yaxis()
fig.tight_layout()
return (fig, axs)
def _plot_zoc_filters(depth, zoc_filters, xlim=None, ylim=None,
ylab="Depth [m]", **kwargs):
"""Plot zero offset correction filters
Parameters
----------
depth : pandas.Series
Measured depth time series, indexed by datetime.
zoc_filters : pandas.DataFrame
DataFrame with ZOC filters in columns. Must have the same number
of records as `depth`.
xlim : 2-tuple/list
ylim : 2-tuple/list
ylab : str
Label for `y` axis.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.subplots`. It can be any keyword,
except for `sharex` or `sharey`.
Returns
-------
tuple
Pyplot Figure and Axes instances.
"""
nfilters = zoc_filters.shape[1]
npanels = 3
lastflts = [1] # col idx of second filters
if nfilters > 2: # append col idx of last filter
lastflts.append(nfilters - 1)
fig, axs = plt.subplots(npanels, 1, sharex=True, sharey=True, **kwargs)
if xlim:
axs[0].set_xlim(xlim)
else:
depth_nona = depth.dropna()
axs[0].set_xlim((depth_nona.index.min(),
depth_nona.index.max()))
if ylim:
axs[0].set_ylim(ylim)
else:
axs[0].set_ylim((depth.min(), depth.max()))
for ax in axs:
ax.set_ylabel(ylab)
ax.invert_yaxis()
ax.axhline(0, linestyle="--", linewidth=0.75, color="k")
depth.plot(ax=axs[0], color="lightgray", label="input")
axs[0].legend(loc="lower left")
# Need to plot legend for input depth here
filter_names = zoc_filters.columns
(zoc_filters.iloc[:, 0]
.plot(ax=axs[1], label=filter_names[0])) # first filter
for i in lastflts:
zoc_filters.iloc[:, i].plot(ax=axs[1], label=filter_names[i])
axs[1].legend(loc="lower left")
# ZOC depth
depth_zoc = depth - zoc_filters.iloc[:, -1]
depth_zoc_label = ("input - {}"
.format(zoc_filters.columns[-1]))
(depth_zoc
.plot(ax=axs[2], color="k", rot=0, label=depth_zoc_label))
axs[2].legend(loc="lower left")
axs[2].set_xlabel("")
fig.tight_layout()
return (fig, axs)
def plot_dive_model(x, depth_s, depth_deriv, d_crit, a_crit,
d_crit_rate, a_crit_rate, leg_title=None, **kwargs):
"""Plot dive model
Parameters
----------
x : pandas.Series
Time-indexed depth measurements.
depth_s : pandas.Series
Time-indexed smoothed depth.
depth_deriv : pandas.Series
Time-indexed derivative of depth smoothing spline.
d_crit : int
Integer denoting the index where the descent ends in the observed
time series.
a_crit : int
Integer denoting the index where the ascent begins in the observed
time series.
d_crit_rate : float
Vertical rate of descent corresponding to the quantile used.
a_crit_rate :
Vertical rate of ascent corresponding to the quantile used.
leg_title : str, optional
Title for the plot legend (e.g. dive number being plotted).
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.subplots`. It can be any keyword,
except `sharex`.
Returns
-------
tuple
Pyplot Figure and Axes instances.
Notes
-----
The function is homologous to diveMove's `plotDiveModel`.
"""
d_crit_time = x.index[d_crit]
a_crit_time = x.index[a_crit]
fig, axs = plt.subplots(2, 1, sharex=True, **kwargs)
ax1, ax2 = axs
ax1.invert_yaxis()
ax1.set_ylabel("Depth")
ax2.set_ylabel("First derivative")
ax1.plot(x, marker="o", linewidth=0.7, color="k", label="input")
ax1.plot(depth_s, "--", label="smooth")
ax1.plot(x.iloc[:d_crit + 1], color="C1", label="descent")
ax1.plot(x.iloc[a_crit:], color="C2", label="ascent")
ax1.legend(loc="upper center", title=leg_title, ncol=2)
ax2.plot(depth_deriv, linewidth=0.5, color="k") # derivative
dstyle = dict(marker=".", linestyle="None")
ax2.plot(depth_deriv[depth_deriv > d_crit_rate].loc[:d_crit_time],
color="C1", **dstyle) # descent
ax2.plot(depth_deriv[depth_deriv < a_crit_rate].loc[a_crit_time:],
color="C2", **dstyle) # ascent
qstyle = dict(linestyle="--", linewidth=0.5, color="k")
ax2.axhline(d_crit_rate, **qstyle)
ax2.axhline(a_crit_rate, **qstyle)
ax2.axvline(d_crit_time, **qstyle)
ax2.axvline(a_crit_time, **qstyle)
# Text annotation
qiter = zip(x.index[[0, 0]],
[d_crit_rate, a_crit_rate],
[r"descent $\hat{q}$", r"ascent $\hat{q}$"],
["bottom", "top"])
for xpos, qval, txt, valign in qiter:
ax2.text(xpos, qval, txt, va=valign)
titer = zip([d_crit_time, a_crit_time], [0, 0],
["descent", "ascent"],
["right", "left"])
for ttime, ypos, txt, halign in titer:
ax2.text(ttime, ypos, txt, ha=halign)
return (fig, (ax1, ax2))
if __name__ == '__main__':
from .tdr import get_diveMove_sample_data
tdrX = get_diveMove_sample_data()
print(tdrX) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/plotting.py | plotting.py | import pandas as pd
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
def _night(times, sunrise_time, sunset_time):
"""Construct Series with sunset and sunrise times for given dates
Parameters
----------
times : pandas.Series
(N,) array with depth measurements.
sunrise_time : str
sunset_time : str
Returns
-------
tuple
Two pandas.Series (sunsets, sunrises)
"""
tmin = times.min().strftime("%Y-%m-%d ")
tmax = times.max().strftime("%Y-%m-%d ")
sunsets = pd.date_range(start=tmin + sunset_time,
end=tmax + sunset_time,
freq="1D")
tmin1 = (times.min() + pd.Timedelta(1, unit="d")).strftime("%Y-%m-%d ")
tmax1 = (times.max() + pd.Timedelta(1, unit="d")).strftime("%Y-%m-%d ")
sunrises = pd.date_range(start=tmin1 + sunrise_time,
end=tmax1 + sunrise_time,
freq="1D")
return (sunsets, sunrises)
def _plot_dry_time(times_dataframe, ax):
"""Fill a vertical span between beginning/ending times in DataFrame
Parameters
----------
times_dataframe : pandas.DataFrame
ax: Axes object
"""
for idx, row in times_dataframe.iterrows():
ax.axvspan(row[0], row[1], ymin=0.99, facecolor="tan",
edgecolor=None, alpha=0.6)
def plot_tdr(depth, concur_vars=None, xlim=None, depth_lim=None,
xlab="time [dd-mmm hh:mm]", ylab_depth="depth [m]",
concur_var_titles=None, xlab_format="%d-%b %H:%M",
sunrise_time="06:00:00", sunset_time="18:00:00",
night_col="gray", dry_time=None, phase_cat=None,
key=True, **kwargs):
"""Plot time, depth, and other concurrent data
Parameters
----------
depth : pandas.Series
(N,) array with depth measurements.
concur_vars : pandas.Series or pandas.Dataframe
(N,) Series or dataframe with additional data to plot in subplot.
xlim : 2-tuple/list, optional
Minimum and maximum limits for ``x`` axis. Ignored when
``concur_vars=None``.
ylim : 2-tuple/list, optional
Minimum and maximum limits for ``y`` axis for data other than depth.
depth_lim : 2-tuple/list, optional
Minimum and maximum limits for depth to plot.
xlab : str, optional
Label for ``x`` axis.
ylab_depth : str, optional
Label for ``y`` axis for depth.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
xlab_format : str, optional
Format string for formatting the x axis.
sunrise_time : str, optional
Time of sunrise, in 24 hr format. This is used for shading night
time.
sunset_time : str, optional
Time of sunset, in 24 hr format. This is used for shading night
time.
night_col : str, optional
Color for shading night time.
dry_time : pandas.DataFrame, optional
Two-column DataFrame with beginning and ending times corresponding
to periods considered to be dry.
phase_cat : pandas.Series, optional
Categorical series dividing rows into sections.
**kwargs : optional keyword arguments
Returns
-------
tuple
Pyplot Figure and Axes instances.
"""
sunsets, sunrises = _night(depth.index,
sunset_time=sunset_time,
sunrise_time=sunrise_time)
def _plot_phase_cat(ser, ax, legend=True):
"""Scatter plot and legend of series coloured by categories"""
cats = phase_cat.cat.categories
cat_codes = phase_cat.cat.codes
isna_ser = ser.isna()
ser_nona = ser.dropna()
scatter = ax.scatter(ser_nona.index, ser_nona, s=12, marker="o",
c=cat_codes[~isna_ser])
if legend:
handles, _ = scatter.legend_elements()
ax.legend(handles, cats, loc="lower right",
ncol=len(cat_codes))
if concur_vars is None:
fig, axs = plt.subplots(1, 1)
axs.set_ylabel(ylab_depth)
depth.plot(ax=axs, color="k", **kwargs)
axs.set_xlabel("")
axs.axhline(0, linestyle="--", linewidth=0.75, color="k")
for beg, end in zip(sunsets, sunrises):
axs.axvspan(beg, end, facecolor=night_col,
edgecolor=None, alpha=0.3)
if (phase_cat is not None):
_plot_phase_cat(depth, axs)
if (dry_time is not None):
_plot_dry_time(dry_time, axs)
if (xlim is not None):
axs.set_xlim(xlim)
if (depth_lim is not None):
axs.set_ylim(depth_lim)
axs.invert_yaxis()
else:
full_df = pd.concat((depth, concur_vars), axis=1)
nplots = full_df.shape[1]
depth_ser = full_df.iloc[:, 0]
concur_df = full_df.iloc[:, 1:]
fig, axs = plt.subplots(nplots, 1, sharex=True)
axs[0].set_ylabel(ylab_depth)
depth_ser.plot(ax=axs[0], color="k", **kwargs)
axs[0].set_xlabel("")
axs[0].axhline(0, linestyle="--", linewidth=0.75, color="k")
concur_df.plot(ax=axs[1:], subplots=True, legend=False, **kwargs)
for i, col in enumerate(concur_df.columns):
if (concur_var_titles is not None):
axs[i + 1].set_ylabel(concur_var_titles[i])
else:
axs[i + 1].set_ylabel(col)
axs[i + 1].axhline(0, linestyle="--",
linewidth=0.75, color="k")
if (xlim is not None):
axs[i + 1].set_xlim(xlim)
for i, ax in enumerate(axs):
for beg, end in zip(sunsets, sunrises):
ax.axvspan(beg, end, facecolor=night_col,
edgecolor=None, alpha=0.3)
if (dry_time is not None):
_plot_dry_time(dry_time, ax)
if (phase_cat is not None):
_plot_phase_cat(depth_ser, axs[0])
for i, col in enumerate(concur_df.columns):
_plot_phase_cat(concur_df.loc[:, col], axs[i + 1], False)
if (depth_lim is not None):
axs[0].set_ylim(depth_lim)
axs[0].invert_yaxis()
fig.tight_layout()
return (fig, axs)
def _plot_zoc_filters(depth, zoc_filters, xlim=None, ylim=None,
ylab="Depth [m]", **kwargs):
"""Plot zero offset correction filters
Parameters
----------
depth : pandas.Series
Measured depth time series, indexed by datetime.
zoc_filters : pandas.DataFrame
DataFrame with ZOC filters in columns. Must have the same number
of records as `depth`.
xlim : 2-tuple/list
ylim : 2-tuple/list
ylab : str
Label for `y` axis.
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.subplots`. It can be any keyword,
except for `sharex` or `sharey`.
Returns
-------
tuple
Pyplot Figure and Axes instances.
"""
nfilters = zoc_filters.shape[1]
npanels = 3
lastflts = [1] # col idx of second filters
if nfilters > 2: # append col idx of last filter
lastflts.append(nfilters - 1)
fig, axs = plt.subplots(npanels, 1, sharex=True, sharey=True, **kwargs)
if xlim:
axs[0].set_xlim(xlim)
else:
depth_nona = depth.dropna()
axs[0].set_xlim((depth_nona.index.min(),
depth_nona.index.max()))
if ylim:
axs[0].set_ylim(ylim)
else:
axs[0].set_ylim((depth.min(), depth.max()))
for ax in axs:
ax.set_ylabel(ylab)
ax.invert_yaxis()
ax.axhline(0, linestyle="--", linewidth=0.75, color="k")
depth.plot(ax=axs[0], color="lightgray", label="input")
axs[0].legend(loc="lower left")
# Need to plot legend for input depth here
filter_names = zoc_filters.columns
(zoc_filters.iloc[:, 0]
.plot(ax=axs[1], label=filter_names[0])) # first filter
for i in lastflts:
zoc_filters.iloc[:, i].plot(ax=axs[1], label=filter_names[i])
axs[1].legend(loc="lower left")
# ZOC depth
depth_zoc = depth - zoc_filters.iloc[:, -1]
depth_zoc_label = ("input - {}"
.format(zoc_filters.columns[-1]))
(depth_zoc
.plot(ax=axs[2], color="k", rot=0, label=depth_zoc_label))
axs[2].legend(loc="lower left")
axs[2].set_xlabel("")
fig.tight_layout()
return (fig, axs)
def plot_dive_model(x, depth_s, depth_deriv, d_crit, a_crit,
d_crit_rate, a_crit_rate, leg_title=None, **kwargs):
"""Plot dive model
Parameters
----------
x : pandas.Series
Time-indexed depth measurements.
depth_s : pandas.Series
Time-indexed smoothed depth.
depth_deriv : pandas.Series
Time-indexed derivative of depth smoothing spline.
d_crit : int
Integer denoting the index where the descent ends in the observed
time series.
a_crit : int
Integer denoting the index where the ascent begins in the observed
time series.
d_crit_rate : float
Vertical rate of descent corresponding to the quantile used.
a_crit_rate :
Vertical rate of ascent corresponding to the quantile used.
leg_title : str, optional
Title for the plot legend (e.g. dive number being plotted).
**kwargs : optional keyword arguments
Passed to `matplotlib.pyplot.subplots`. It can be any keyword,
except `sharex`.
Returns
-------
tuple
Pyplot Figure and Axes instances.
Notes
-----
The function is homologous to diveMove's `plotDiveModel`.
"""
d_crit_time = x.index[d_crit]
a_crit_time = x.index[a_crit]
fig, axs = plt.subplots(2, 1, sharex=True, **kwargs)
ax1, ax2 = axs
ax1.invert_yaxis()
ax1.set_ylabel("Depth")
ax2.set_ylabel("First derivative")
ax1.plot(x, marker="o", linewidth=0.7, color="k", label="input")
ax1.plot(depth_s, "--", label="smooth")
ax1.plot(x.iloc[:d_crit + 1], color="C1", label="descent")
ax1.plot(x.iloc[a_crit:], color="C2", label="ascent")
ax1.legend(loc="upper center", title=leg_title, ncol=2)
ax2.plot(depth_deriv, linewidth=0.5, color="k") # derivative
dstyle = dict(marker=".", linestyle="None")
ax2.plot(depth_deriv[depth_deriv > d_crit_rate].loc[:d_crit_time],
color="C1", **dstyle) # descent
ax2.plot(depth_deriv[depth_deriv < a_crit_rate].loc[a_crit_time:],
color="C2", **dstyle) # ascent
qstyle = dict(linestyle="--", linewidth=0.5, color="k")
ax2.axhline(d_crit_rate, **qstyle)
ax2.axhline(a_crit_rate, **qstyle)
ax2.axvline(d_crit_time, **qstyle)
ax2.axvline(a_crit_time, **qstyle)
# Text annotation
qiter = zip(x.index[[0, 0]],
[d_crit_rate, a_crit_rate],
[r"descent $\hat{q}$", r"ascent $\hat{q}$"],
["bottom", "top"])
for xpos, qval, txt, valign in qiter:
ax2.text(xpos, qval, txt, va=valign)
titer = zip([d_crit_time, a_crit_time], [0, 0],
["descent", "ascent"],
["right", "left"])
for ttime, ypos, txt, halign in titer:
ax2.text(ttime, ypos, txt, ha=halign)
return (fig, (ax1, ax2))
if __name__ == '__main__':
from .tdr import get_diveMove_sample_data
tdrX = get_diveMove_sample_data()
print(tdrX) | 0.867092 | 0.571468 |
import pandas as pd
from skdiveMove.helpers import (get_var_sampling_interval,
_append_xr_attr, _load_dataset)
_SPEED_NAMES = ["velocity", "speed"]
class TDRSource:
"""Define TDR data source
Use xarray.Dataset to ensure pseudo-standard metadata
Attributes
----------
tdr_file : str
String indicating the file where the data comes from.
tdr : xarray.Dataset
Dataset with input data.
depth_name : str
Name of data variable with depth measurements.
time_name : str
Name of the time dimension in the dataset.
has_speed : bool
Whether input data include speed measurements.
speed_name : str
Name of data variable with the speed measurements.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> print(tdrX) # doctest: +ELLIPSIS
Time-Depth Recorder -- Class TDR object ...
"""
def __init__(self, dataset, depth_name="depth", time_name="timestamp",
subsample=None, has_speed=False, tdr_filename=None):
"""Set up attributes for TDRSource objects
Parameters
----------
dataset : xarray.Dataset
Dataset containing depth, and optionally other DataArrays.
depth_name : str, optional
Name of data variable with depth measurements.
time_name : str, optional
Name of the time dimension in the dataset.
subsample : str, optional
Subsample dataset at given frequency specification. See pandas
offset aliases.
has_speed : bool, optional
Weather data includes speed measurements. Column name must be
one of ["velocity", "speed"].
tdr_filename : str
Name of the file from which `dataset` originated.
"""
self.time_name = time_name
if subsample is not None:
self.tdr = (dataset.resample({time_name: subsample})
.interpolate("linear"))
for vname, da in self.tdr.data_vars.items():
da.attrs["sampling_rate"] = (1.0 /
pd.to_timedelta(subsample)
.seconds)
da.attrs["sampling_rate_units"] = "Hz"
_append_xr_attr(da, "history",
"Resampled to {}\n".format(subsample))
else:
self.tdr = dataset
self.depth_name = depth_name
speed_var = [x for x in list(self.tdr.data_vars.keys())
if x in _SPEED_NAMES]
if speed_var and has_speed:
self.has_speed = True
self.speed_name = speed_var[0]
else:
self.has_speed = False
self.speed_name = None
self.tdr_file = tdr_filename
@classmethod
def read_netcdf(cls, tdr_file, depth_name="depth", time_name="timestamp",
subsample=None, has_speed=False, **kwargs):
"""Instantiate object by loading Dataset from NetCDF file
Parameters
----------
tdr_file : str
As first argument for :func:`xarray.load_dataset`.
depth_name : str, optional
Name of data variable with depth measurements. Default: "depth".
time_name : str, optional
Name of the time dimension in the dataset.
subsample : str, optional
Subsample dataset at given frequency specification. See pandas
offset aliases.
has_speed : bool, optional
Weather data includes speed measurements. Column name must be
one of ["velocity", "speed"]. Default: False.
**kwargs : optional keyword arguments
Arguments passed to :func:`xarray.load_dataset`.
Returns
-------
obj : TDRSource, ZOC, TDRPhases, or TDR
Class matches the caller.
"""
dataset = _load_dataset(tdr_file, **kwargs)
return cls(dataset, depth_name=depth_name, time_name=time_name,
subsample=subsample, has_speed=has_speed,
tdr_filename=tdr_file)
def __str__(self):
x = self.tdr
depth_xr = x[self.depth_name]
depth_ser = depth_xr.to_series()
objcls = ("Time-Depth Recorder -- Class {} object\n"
.format(self.__class__.__name__))
src = "{0:<20} {1}\n".format("Source File", self.tdr_file)
itv = ("{0:<20} {1}\n"
.format("Sampling interval",
get_var_sampling_interval(depth_xr)))
nsamples = "{0:<20} {1}\n".format("Number of Samples",
depth_xr.shape[0])
beg = "{0:<20} {1}\n".format("Sampling Begins",
depth_ser.index[0])
end = "{0:<20} {1}\n".format("Sampling Ends",
depth_ser.index[-1])
dur = "{0:<20} {1}\n".format("Total duration",
depth_ser.index[-1] -
depth_ser.index[0])
drange = "{0:<20} [{1},{2}]\n".format("Measured depth range",
depth_ser.min(),
depth_ser.max())
others = "{0:<20} {1}\n".format("Other variables",
[x for x in list(x.keys())
if x != self.depth_name])
attr_list = "Attributes:\n"
for key, val in sorted(x.attrs.items()):
attr_list += "{0:>35}: {1}\n".format(key, val)
attr_list = attr_list.rstrip("\n")
return (objcls + src + itv + nsamples + beg + end + dur + drange +
others + attr_list)
def _get_depth(self):
return self.tdr[self.depth_name]
depth = property(_get_depth)
"""Return depth array
Returns
-------
xarray.DataArray
"""
def _get_speed(self):
return self.tdr[self.speed_name]
speed = property(_get_speed)
"""Return speed array
Returns
-------
xarray.DataArray
""" | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/tdrsource.py | tdrsource.py | import pandas as pd
from skdiveMove.helpers import (get_var_sampling_interval,
_append_xr_attr, _load_dataset)
_SPEED_NAMES = ["velocity", "speed"]
class TDRSource:
"""Define TDR data source
Use xarray.Dataset to ensure pseudo-standard metadata
Attributes
----------
tdr_file : str
String indicating the file where the data comes from.
tdr : xarray.Dataset
Dataset with input data.
depth_name : str
Name of data variable with depth measurements.
time_name : str
Name of the time dimension in the dataset.
has_speed : bool
Whether input data include speed measurements.
speed_name : str
Name of data variable with the speed measurements.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> print(tdrX) # doctest: +ELLIPSIS
Time-Depth Recorder -- Class TDR object ...
"""
def __init__(self, dataset, depth_name="depth", time_name="timestamp",
subsample=None, has_speed=False, tdr_filename=None):
"""Set up attributes for TDRSource objects
Parameters
----------
dataset : xarray.Dataset
Dataset containing depth, and optionally other DataArrays.
depth_name : str, optional
Name of data variable with depth measurements.
time_name : str, optional
Name of the time dimension in the dataset.
subsample : str, optional
Subsample dataset at given frequency specification. See pandas
offset aliases.
has_speed : bool, optional
Weather data includes speed measurements. Column name must be
one of ["velocity", "speed"].
tdr_filename : str
Name of the file from which `dataset` originated.
"""
self.time_name = time_name
if subsample is not None:
self.tdr = (dataset.resample({time_name: subsample})
.interpolate("linear"))
for vname, da in self.tdr.data_vars.items():
da.attrs["sampling_rate"] = (1.0 /
pd.to_timedelta(subsample)
.seconds)
da.attrs["sampling_rate_units"] = "Hz"
_append_xr_attr(da, "history",
"Resampled to {}\n".format(subsample))
else:
self.tdr = dataset
self.depth_name = depth_name
speed_var = [x for x in list(self.tdr.data_vars.keys())
if x in _SPEED_NAMES]
if speed_var and has_speed:
self.has_speed = True
self.speed_name = speed_var[0]
else:
self.has_speed = False
self.speed_name = None
self.tdr_file = tdr_filename
@classmethod
def read_netcdf(cls, tdr_file, depth_name="depth", time_name="timestamp",
subsample=None, has_speed=False, **kwargs):
"""Instantiate object by loading Dataset from NetCDF file
Parameters
----------
tdr_file : str
As first argument for :func:`xarray.load_dataset`.
depth_name : str, optional
Name of data variable with depth measurements. Default: "depth".
time_name : str, optional
Name of the time dimension in the dataset.
subsample : str, optional
Subsample dataset at given frequency specification. See pandas
offset aliases.
has_speed : bool, optional
Weather data includes speed measurements. Column name must be
one of ["velocity", "speed"]. Default: False.
**kwargs : optional keyword arguments
Arguments passed to :func:`xarray.load_dataset`.
Returns
-------
obj : TDRSource, ZOC, TDRPhases, or TDR
Class matches the caller.
"""
dataset = _load_dataset(tdr_file, **kwargs)
return cls(dataset, depth_name=depth_name, time_name=time_name,
subsample=subsample, has_speed=has_speed,
tdr_filename=tdr_file)
def __str__(self):
x = self.tdr
depth_xr = x[self.depth_name]
depth_ser = depth_xr.to_series()
objcls = ("Time-Depth Recorder -- Class {} object\n"
.format(self.__class__.__name__))
src = "{0:<20} {1}\n".format("Source File", self.tdr_file)
itv = ("{0:<20} {1}\n"
.format("Sampling interval",
get_var_sampling_interval(depth_xr)))
nsamples = "{0:<20} {1}\n".format("Number of Samples",
depth_xr.shape[0])
beg = "{0:<20} {1}\n".format("Sampling Begins",
depth_ser.index[0])
end = "{0:<20} {1}\n".format("Sampling Ends",
depth_ser.index[-1])
dur = "{0:<20} {1}\n".format("Total duration",
depth_ser.index[-1] -
depth_ser.index[0])
drange = "{0:<20} [{1},{2}]\n".format("Measured depth range",
depth_ser.min(),
depth_ser.max())
others = "{0:<20} {1}\n".format("Other variables",
[x for x in list(x.keys())
if x != self.depth_name])
attr_list = "Attributes:\n"
for key, val in sorted(x.attrs.items()):
attr_list += "{0:>35}: {1}\n".format(key, val)
attr_list = attr_list.rstrip("\n")
return (objcls + src + itv + nsamples + beg + end + dur + drange +
others + attr_list)
def _get_depth(self):
return self.tdr[self.depth_name]
depth = property(_get_depth)
"""Return depth array
Returns
-------
xarray.DataArray
"""
def _get_speed(self):
return self.tdr[self.speed_name]
speed = property(_get_speed)
"""Return speed array
Returns
-------
xarray.DataArray
""" | 0.884083 | 0.545467 |
import json
__all__ = ["dump_config_template", "assign_xr_attrs"]
_SENSOR_DATA_CONFIG = {
'sampling': "regular",
'sampling_rate': "1",
'sampling_rate_units': "Hz",
'history': "",
'name': "",
'full_name': "",
'description': "",
'units': "",
'units_name': "",
'units_label': "",
'column_name': "",
'frame': "",
'axes': "",
'files': ""
}
_DATASET_CONFIG = {
'dep_id': "",
'dep_device_tzone': "",
'dep_device_regional_settings': "YYYY-mm-dd HH:MM:SS",
'dep_device_time_beg': "",
'deploy': {
'locality': "",
'lon': "",
'lat': "",
'device_time_on': "",
'method': ""
},
'project': {
'name': "",
'date_beg': "",
'date_end': ""
},
'provider': {
'name': "",
'affiliation': "",
'email': "",
'license': "",
'cite': "",
'doi': ""
},
'data': {
'source': "",
'format': "",
'creation_date': "",
'nfiles': ""
},
'device': {
'serial': "",
'make': "",
'type': "",
'model': "",
'url': ""
},
'sensors': {
'firmware': "",
'software': "",
'list': ""
},
'animal': {
'id': "",
'species_common': "",
'species_science': "",
'dbase_url': ""
}
}
def dump_config_template(fname, config_type):
"""Dump configuration file
Dump a json configuration template file to build metadata for a Dataset
or DataArray.
Parameters
----------
fname : str
A valid string path for output file.
config_type : {"dataset", "sensor"}
The type of config to dump.
Examples
--------
>>> import skdiveMove.metadata as metadata
>>> metadata.dump_config_template("mydataset.json",
... "dataset") # doctest: +SKIP
>>> metadata.dump_config_template("mysensor.json",
... "sensor") # doctest: +SKIP
edit the files to your specifications.
"""
with open(fname, "w") as ofile:
if config_type == "dataset":
json.dump(_DATASET_CONFIG, ofile, indent=2)
elif config_type == "sensor":
json.dump(_SENSOR_DATA_CONFIG, ofile, indent=2)
def assign_xr_attrs(obj, config_file):
"""Assign attributes to xarray.Dataset or xarray.DataArray
The `config_file` should have only one-level of nesting.
Parameters
----------
obj : {xarray.Dataset, xarray.DataArray}
Object to assign attributes to.
config_file : str
A valid string path for input json file with metadata attributes.
Returns
-------
out : {xarray.Dataset, xarray.DataArray}
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> import xarray as xr
>>> import skdiveMove.metadata as metadata
Synthetic dataset with depth and speed
>>> nsamples = 60 * 60 * 24
>>> times = pd.date_range("2000-01-01", freq="1s", periods=nsamples,
... name="time")
>>> cycles = np.sin(2 * np.pi * np.arange(nsamples) / (60 * 20))
>>> ds = xr.Dataset({"depth": (("time"), 1 + cycles),
... "speed": (("time"), 3 + cycles)},
... {"time": times})
Dump dataset and sensor templates
>>> metadata.dump_config_template("mydataset.json",
... "dataset") # doctest: +SKIP
>>> metadata.dump_config_template("P_sensor.json",
... "sensor") # doctest: +SKIP
>>> metadata.dump_config_template("S_sensor.json",
... "sensor") # doctest: +SKIP
Edit the templates as appropriate, load and assign to objects
>>> assign_xr_attrs(ds, "mydataset.json") # doctest: +SKIP
>>> assign_xr_attrs(ds.depth, "P_sensor.json") # doctest: +SKIP
>>> assign_xr_attrs(ds.speed, "S_sensor.json") # doctest: +SKIP
"""
with open(config_file) as ifile:
config = json.load(ifile)
# Parse the dict
for key, val in config.items():
top_kname = "{}".format(key)
if not val:
continue
if type(val) is dict:
for key_n, val_n in val.items():
if not val_n:
continue
lower_kname = "{0}_{1}".format(top_kname, key_n)
obj.attrs[lower_kname] = val_n
else:
obj.attrs[top_kname] = val | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/metadata.py | metadata.py | import json
__all__ = ["dump_config_template", "assign_xr_attrs"]
_SENSOR_DATA_CONFIG = {
'sampling': "regular",
'sampling_rate': "1",
'sampling_rate_units': "Hz",
'history': "",
'name': "",
'full_name': "",
'description': "",
'units': "",
'units_name': "",
'units_label': "",
'column_name': "",
'frame': "",
'axes': "",
'files': ""
}
_DATASET_CONFIG = {
'dep_id': "",
'dep_device_tzone': "",
'dep_device_regional_settings': "YYYY-mm-dd HH:MM:SS",
'dep_device_time_beg': "",
'deploy': {
'locality': "",
'lon': "",
'lat': "",
'device_time_on': "",
'method': ""
},
'project': {
'name': "",
'date_beg': "",
'date_end': ""
},
'provider': {
'name': "",
'affiliation': "",
'email': "",
'license': "",
'cite': "",
'doi': ""
},
'data': {
'source': "",
'format': "",
'creation_date': "",
'nfiles': ""
},
'device': {
'serial': "",
'make': "",
'type': "",
'model': "",
'url': ""
},
'sensors': {
'firmware': "",
'software': "",
'list': ""
},
'animal': {
'id': "",
'species_common': "",
'species_science': "",
'dbase_url': ""
}
}
def dump_config_template(fname, config_type):
"""Dump configuration file
Dump a json configuration template file to build metadata for a Dataset
or DataArray.
Parameters
----------
fname : str
A valid string path for output file.
config_type : {"dataset", "sensor"}
The type of config to dump.
Examples
--------
>>> import skdiveMove.metadata as metadata
>>> metadata.dump_config_template("mydataset.json",
... "dataset") # doctest: +SKIP
>>> metadata.dump_config_template("mysensor.json",
... "sensor") # doctest: +SKIP
edit the files to your specifications.
"""
with open(fname, "w") as ofile:
if config_type == "dataset":
json.dump(_DATASET_CONFIG, ofile, indent=2)
elif config_type == "sensor":
json.dump(_SENSOR_DATA_CONFIG, ofile, indent=2)
def assign_xr_attrs(obj, config_file):
"""Assign attributes to xarray.Dataset or xarray.DataArray
The `config_file` should have only one-level of nesting.
Parameters
----------
obj : {xarray.Dataset, xarray.DataArray}
Object to assign attributes to.
config_file : str
A valid string path for input json file with metadata attributes.
Returns
-------
out : {xarray.Dataset, xarray.DataArray}
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> import xarray as xr
>>> import skdiveMove.metadata as metadata
Synthetic dataset with depth and speed
>>> nsamples = 60 * 60 * 24
>>> times = pd.date_range("2000-01-01", freq="1s", periods=nsamples,
... name="time")
>>> cycles = np.sin(2 * np.pi * np.arange(nsamples) / (60 * 20))
>>> ds = xr.Dataset({"depth": (("time"), 1 + cycles),
... "speed": (("time"), 3 + cycles)},
... {"time": times})
Dump dataset and sensor templates
>>> metadata.dump_config_template("mydataset.json",
... "dataset") # doctest: +SKIP
>>> metadata.dump_config_template("P_sensor.json",
... "sensor") # doctest: +SKIP
>>> metadata.dump_config_template("S_sensor.json",
... "sensor") # doctest: +SKIP
Edit the templates as appropriate, load and assign to objects
>>> assign_xr_attrs(ds, "mydataset.json") # doctest: +SKIP
>>> assign_xr_attrs(ds.depth, "P_sensor.json") # doctest: +SKIP
>>> assign_xr_attrs(ds.speed, "S_sensor.json") # doctest: +SKIP
"""
with open(config_file) as ifile:
config = json.load(ifile)
# Parse the dict
for key, val in config.items():
top_kname = "{}".format(key)
if not val:
continue
if type(val) is dict:
for key_n, val_n in val.items():
if not val_n:
continue
lower_kname = "{0}_{1}".format(top_kname, key_n)
obj.attrs[lower_kname] = val_n
else:
obj.attrs[top_kname] = val | 0.586049 | 0.224331 |
import numpy as np
import pandas as pd
import xarray as xr
from skdiveMove.core import robjs, cv, pandas2ri, diveMove
__all__ = ["_load_dataset", "_get_dive_indices", "_append_xr_attr",
"get_var_sampling_interval", "_cut_dive",
"_one_dive_stats", "_speed_stats", "rle_key"]
def _load_dataset(filename_or_obj, **kwargs):
"""Private function to load Dataset object from file name or object
Parameters
----------
filename_or_obj : str, Path or xarray.backends.*DataStore
String indicating the file where the data comes from.
**kwargs :
Arguments passed to `xarray.load_dataset`.
Returns
-------
dataset : Dataset
The output Dataset.
"""
return xr.load_dataset(filename_or_obj, **kwargs)
def _get_dive_indices(indices, diveNo):
"""Mapping to diveMove's `.diveIndices`"""
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
# Subtract 1 for zero-based python
idx_ok = diveMove._diveIndices(indices, diveNo) - 1
return idx_ok
def _append_xr_attr(x, attr, val):
"""Append to attribute to xarray.DataArray or xarray.Dataset
If attribute does not exist, create it. Attribute is assumed to be a
string.
Parameters
----------
x : xarray.DataArray or xarray.Dataset
attr : str
Attribute name to update or add
val : str
Attribute value
"""
if attr in x.attrs:
x.attrs[attr] += "{}".format(val)
else:
x.attrs[attr] = "{}".format(val)
def get_var_sampling_interval(x):
"""Retrieve sampling interval from DataArray attributes
Parameters
----------
x : xarray.DataArray
Returns
-------
pandas.Timedelta
"""
attrs = x.attrs
sampling_rate = attrs["sampling_rate"]
sampling_rate_units = attrs["sampling_rate_units"]
if sampling_rate_units.lower() == "hz":
sampling_rate = 1 / sampling_rate
sampling_rate_units = "s"
intvl = pd.Timedelta("{}{}"
.format(sampling_rate, sampling_rate_units))
return intvl
def _cut_dive(x, dive_model, smooth_par, knot_factor,
descent_crit_q, ascent_crit_q):
"""Private function to retrieve results from `diveModel` object in R
Parameters
----------
x : pandas.DataFrame
Subset with a single dive's data, with first column expected to be
dive ID.
dive_model : str
smooth_par : float
knot_factor : int
descent_crit_q : float
ascent_crit_q : float
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``. This
function maps to ``diveMove:::.cutDive``, and only sets some of the
parameters from the `R` function.
Returns
-------
out : dict
Dictionary with the following keys and corresponding component:
{'label_matrix', 'dive_spline', 'spline_deriv', 'descent_crit',
'ascent_crit', 'descent_crit_rate', 'ascent_crit_rate'}
"""
xx = x.iloc[:, 1:]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
dmodel = diveMove._cutDive(cv.py2rpy(xx), dive_model=dive_model,
smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)
dmodel_slots = ["label.matrix", "dive.spline", "spline.deriv",
"descent.crit", "ascent.crit",
"descent.crit.rate", "ascent.crit.rate"]
lmtx = (np.array(robjs.r.slot(dmodel, dmodel_slots[0]))
.reshape((xx.shape[0], 2), order="F"))
spl = robjs.r.slot(dmodel, dmodel_slots[1])
spl_der = robjs.r.slot(dmodel, dmodel_slots[2])
spl_der = np.column_stack((spl_der[0], spl_der[1]))
desc_crit = robjs.r.slot(dmodel, dmodel_slots[3])[0]
asc_crit = robjs.r.slot(dmodel, dmodel_slots[4])[0]
desc_crit_r = robjs.r.slot(dmodel, dmodel_slots[5])[0]
asc_crit_r = robjs.r.slot(dmodel, dmodel_slots[6])[0]
# Replace dots with underscore for the output
dmodel_slots = [x.replace(".", "_") for x in dmodel_slots]
res = dict(zip(dmodel_slots,
[lmtx, spl, spl_der, desc_crit, asc_crit,
desc_crit_r, asc_crit_r]))
return res
def _one_dive_stats(x, interval, has_speed=False):
"""Calculate dive statistics for a single dive's DataFrame
Parameters
----------
x : pandas.DataFrame
First column expected to be dive ID, the rest as in `diveMove`.
interval : float
has_speed : bool
Returns
-------
out : pandas.DataFrame
"""
xx = x.iloc[:, 1:]
onames_speed = ["begdesc", "enddesc", "begasc", "desctim", "botttim",
"asctim", "divetim", "descdist", "bottdist", "ascdist",
"bottdep_mean", "bottdep_median", "bottdep_sd",
"maxdep", "desc_tdist", "desc_mean_speed",
"desc_angle", "bott_tdist", "bott_mean_speed",
"asc_tdist", "asc_mean_speed", "asc_angle"]
onames_nospeed = onames_speed[:14]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
res = diveMove.oneDiveStats(xx, interval, has_speed)
if has_speed:
onames = onames_speed
else:
onames = onames_nospeed
res_df = pd.DataFrame(res, columns=onames)
for tcol in range(3):
# This is per POSIXct convention in R
res_df.iloc[:, tcol] = pd.to_datetime(res_df.iloc[:, tcol],
unit="s")
return res_df
def _speed_stats(x, vdist=None):
"""Calculate total travel distance, mean speed, and angle from speed
Dive stats for a single segment of a dive.
Parameters
----------
x : pandas.Series
Series with speed measurements.
vdist : float, optional
Vertical distance corresponding to `x`.
Returns
-------
out :
"""
kwargs = dict(x=x)
if vdist is not None:
kwargs.update(vdist=vdist)
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
res = diveMove._speedStats(**kwargs)
return res
def rle_key(x):
"""Emulate a run length encoder
Assigns a numerical sequence identifying run lengths in input Series.
Parameters
----------
x : pandas.Series
Series with data to encode.
Returns
-------
out : pandas.Series
Examples
--------
>>> N = 18
>>> color = np.repeat(list("ABCABC"), 3)
>>> ss = pd.Series(color,
... index=pd.date_range("2020-01-01", periods=N,
... freq="10s", tz="UTC"),
... dtype="category")
>>> rle_key(ss)
2020-01-01 00:00:00+00:00 1
2020-01-01 00:00:10+00:00 1
2020-01-01 00:00:20+00:00 1
2020-01-01 00:00:30+00:00 2
2020-01-01 00:00:40+00:00 2
2020-01-01 00:00:50+00:00 2
2020-01-01 00:01:00+00:00 3
2020-01-01 00:01:10+00:00 3
2020-01-01 00:01:20+00:00 3
2020-01-01 00:01:30+00:00 4
2020-01-01 00:01:40+00:00 4
2020-01-01 00:01:50+00:00 4
2020-01-01 00:02:00+00:00 5
2020-01-01 00:02:10+00:00 5
2020-01-01 00:02:20+00:00 5
2020-01-01 00:02:30+00:00 6
2020-01-01 00:02:40+00:00 6
2020-01-01 00:02:50+00:00 6
Freq: 10S, dtype: int64
"""
xout = x.ne(x.shift()).cumsum()
return xout
if __name__ == '__main__':
N = 18
color = np.repeat(list("ABCABC"), 3)
ss = pd.Series(color,
index=pd.date_range("2020-01-01", periods=N,
freq="10s", tz="UTC"),
dtype="category")
xx = pd.Series(np.random.standard_normal(10))
rle_key(xx > 0) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/helpers.py | helpers.py | import numpy as np
import pandas as pd
import xarray as xr
from skdiveMove.core import robjs, cv, pandas2ri, diveMove
__all__ = ["_load_dataset", "_get_dive_indices", "_append_xr_attr",
"get_var_sampling_interval", "_cut_dive",
"_one_dive_stats", "_speed_stats", "rle_key"]
def _load_dataset(filename_or_obj, **kwargs):
"""Private function to load Dataset object from file name or object
Parameters
----------
filename_or_obj : str, Path or xarray.backends.*DataStore
String indicating the file where the data comes from.
**kwargs :
Arguments passed to `xarray.load_dataset`.
Returns
-------
dataset : Dataset
The output Dataset.
"""
return xr.load_dataset(filename_or_obj, **kwargs)
def _get_dive_indices(indices, diveNo):
"""Mapping to diveMove's `.diveIndices`"""
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
# Subtract 1 for zero-based python
idx_ok = diveMove._diveIndices(indices, diveNo) - 1
return idx_ok
def _append_xr_attr(x, attr, val):
"""Append to attribute to xarray.DataArray or xarray.Dataset
If attribute does not exist, create it. Attribute is assumed to be a
string.
Parameters
----------
x : xarray.DataArray or xarray.Dataset
attr : str
Attribute name to update or add
val : str
Attribute value
"""
if attr in x.attrs:
x.attrs[attr] += "{}".format(val)
else:
x.attrs[attr] = "{}".format(val)
def get_var_sampling_interval(x):
"""Retrieve sampling interval from DataArray attributes
Parameters
----------
x : xarray.DataArray
Returns
-------
pandas.Timedelta
"""
attrs = x.attrs
sampling_rate = attrs["sampling_rate"]
sampling_rate_units = attrs["sampling_rate_units"]
if sampling_rate_units.lower() == "hz":
sampling_rate = 1 / sampling_rate
sampling_rate_units = "s"
intvl = pd.Timedelta("{}{}"
.format(sampling_rate, sampling_rate_units))
return intvl
def _cut_dive(x, dive_model, smooth_par, knot_factor,
descent_crit_q, ascent_crit_q):
"""Private function to retrieve results from `diveModel` object in R
Parameters
----------
x : pandas.DataFrame
Subset with a single dive's data, with first column expected to be
dive ID.
dive_model : str
smooth_par : float
knot_factor : int
descent_crit_q : float
ascent_crit_q : float
Notes
-----
See details for arguments in diveMove's ``calibrateDepth``. This
function maps to ``diveMove:::.cutDive``, and only sets some of the
parameters from the `R` function.
Returns
-------
out : dict
Dictionary with the following keys and corresponding component:
{'label_matrix', 'dive_spline', 'spline_deriv', 'descent_crit',
'ascent_crit', 'descent_crit_rate', 'ascent_crit_rate'}
"""
xx = x.iloc[:, 1:]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
dmodel = diveMove._cutDive(cv.py2rpy(xx), dive_model=dive_model,
smooth_par=smooth_par,
knot_factor=knot_factor,
descent_crit_q=descent_crit_q,
ascent_crit_q=ascent_crit_q)
dmodel_slots = ["label.matrix", "dive.spline", "spline.deriv",
"descent.crit", "ascent.crit",
"descent.crit.rate", "ascent.crit.rate"]
lmtx = (np.array(robjs.r.slot(dmodel, dmodel_slots[0]))
.reshape((xx.shape[0], 2), order="F"))
spl = robjs.r.slot(dmodel, dmodel_slots[1])
spl_der = robjs.r.slot(dmodel, dmodel_slots[2])
spl_der = np.column_stack((spl_der[0], spl_der[1]))
desc_crit = robjs.r.slot(dmodel, dmodel_slots[3])[0]
asc_crit = robjs.r.slot(dmodel, dmodel_slots[4])[0]
desc_crit_r = robjs.r.slot(dmodel, dmodel_slots[5])[0]
asc_crit_r = robjs.r.slot(dmodel, dmodel_slots[6])[0]
# Replace dots with underscore for the output
dmodel_slots = [x.replace(".", "_") for x in dmodel_slots]
res = dict(zip(dmodel_slots,
[lmtx, spl, spl_der, desc_crit, asc_crit,
desc_crit_r, asc_crit_r]))
return res
def _one_dive_stats(x, interval, has_speed=False):
"""Calculate dive statistics for a single dive's DataFrame
Parameters
----------
x : pandas.DataFrame
First column expected to be dive ID, the rest as in `diveMove`.
interval : float
has_speed : bool
Returns
-------
out : pandas.DataFrame
"""
xx = x.iloc[:, 1:]
onames_speed = ["begdesc", "enddesc", "begasc", "desctim", "botttim",
"asctim", "divetim", "descdist", "bottdist", "ascdist",
"bottdep_mean", "bottdep_median", "bottdep_sd",
"maxdep", "desc_tdist", "desc_mean_speed",
"desc_angle", "bott_tdist", "bott_mean_speed",
"asc_tdist", "asc_mean_speed", "asc_angle"]
onames_nospeed = onames_speed[:14]
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
res = diveMove.oneDiveStats(xx, interval, has_speed)
if has_speed:
onames = onames_speed
else:
onames = onames_nospeed
res_df = pd.DataFrame(res, columns=onames)
for tcol in range(3):
# This is per POSIXct convention in R
res_df.iloc[:, tcol] = pd.to_datetime(res_df.iloc[:, tcol],
unit="s")
return res_df
def _speed_stats(x, vdist=None):
"""Calculate total travel distance, mean speed, and angle from speed
Dive stats for a single segment of a dive.
Parameters
----------
x : pandas.Series
Series with speed measurements.
vdist : float, optional
Vertical distance corresponding to `x`.
Returns
-------
out :
"""
kwargs = dict(x=x)
if vdist is not None:
kwargs.update(vdist=vdist)
with cv.localconverter(robjs.default_converter +
pandas2ri.converter):
res = diveMove._speedStats(**kwargs)
return res
def rle_key(x):
"""Emulate a run length encoder
Assigns a numerical sequence identifying run lengths in input Series.
Parameters
----------
x : pandas.Series
Series with data to encode.
Returns
-------
out : pandas.Series
Examples
--------
>>> N = 18
>>> color = np.repeat(list("ABCABC"), 3)
>>> ss = pd.Series(color,
... index=pd.date_range("2020-01-01", periods=N,
... freq="10s", tz="UTC"),
... dtype="category")
>>> rle_key(ss)
2020-01-01 00:00:00+00:00 1
2020-01-01 00:00:10+00:00 1
2020-01-01 00:00:20+00:00 1
2020-01-01 00:00:30+00:00 2
2020-01-01 00:00:40+00:00 2
2020-01-01 00:00:50+00:00 2
2020-01-01 00:01:00+00:00 3
2020-01-01 00:01:10+00:00 3
2020-01-01 00:01:20+00:00 3
2020-01-01 00:01:30+00:00 4
2020-01-01 00:01:40+00:00 4
2020-01-01 00:01:50+00:00 4
2020-01-01 00:02:00+00:00 5
2020-01-01 00:02:10+00:00 5
2020-01-01 00:02:20+00:00 5
2020-01-01 00:02:30+00:00 6
2020-01-01 00:02:40+00:00 6
2020-01-01 00:02:50+00:00 6
Freq: 10S, dtype: int64
"""
xout = x.ne(x.shift()).cumsum()
return xout
if __name__ == '__main__':
N = 18
color = np.repeat(list("ABCABC"), 3)
ss = pd.Series(color,
index=pd.date_range("2020-01-01", periods=N,
freq="10s", tz="UTC"),
dtype="category")
xx = pd.Series(np.random.standard_normal(10))
rle_key(xx > 0) | 0.874118 | 0.423547 |
import logging
import numpy as np
import pandas as pd
from skdiveMove.tdrphases import TDRPhases
import skdiveMove.plotting as plotting
import skdiveMove.calibspeed as speedcal
from skdiveMove.helpers import (get_var_sampling_interval,
_get_dive_indices, _append_xr_attr,
_one_dive_stats, _speed_stats)
import skdiveMove.calibconfig as calibconfig
import xarray as xr
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
# Keep attributes in xarray operations
xr.set_options(keep_attrs=True)
class TDR(TDRPhases):
"""Base class encapsulating TDR objects and processing
TDR subclasses `TDRPhases` to provide comprehensive TDR processing
capabilities.
See help(TDR) for inherited attributes.
Attributes
----------
speed_calib_fit : quantreg model fit
Model object fit by quantile regression for speed calibration.
Examples
--------
Construct an instance from diveMove example dataset
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
Plot the `TDR` object
>>> tdrX.plot() # doctest: +ELLIPSIS
(<Figure ... 1 Axes>, <AxesSubplot:...>)
"""
def __init__(self, *args, **kwargs):
"""Set up attributes for TDR objects
Parameters
----------
*args : positional arguments
Passed to :meth:`TDRPhases.__init__`
**kwargs : keyword arguments
Passed to :meth:`TDRPhases.__init__`
"""
TDRPhases.__init__(self, *args, **kwargs)
# Speed calibration fit
self.speed_calib_fit = None
def __str__(self):
base = TDRPhases.__str__(self)
speed_fmt_pref = "Speed calibration coefficients:"
if self.speed_calib_fit is not None:
speed_ccoef_a, speed_ccoef_b = self.speed_calib_fit.params
speed_coefs_fmt = ("\n{0:<20} (a={1:.4f}, b={2:.4f})"
.format(speed_fmt_pref,
speed_ccoef_a, speed_ccoef_b))
else:
speed_ccoef_a, speed_ccoef_b = (None, None)
speed_coefs_fmt = ("\n{0:<20} (a=None, b=None)"
.format(speed_fmt_pref))
return base + speed_coefs_fmt
def calibrate_speed(self, tau=0.1, contour_level=0.1, z=0, bad=[0, 0],
**kwargs):
"""Calibrate speed measurements
Set the `speed_calib_fit` attribute
Parameters
----------
tau : float, optional
Quantile on which to regress speed on rate of depth change.
contour_level : float, optional
The mesh obtained from the bivariate kernel density estimation
corresponding to this contour will be used for the quantile
regression to define the calibration line.
z : float, optional
Only changes in depth larger than this value will be used for
calibration.
bad : array_like, optional
Two-element `array_like` indicating that only rates of depth
change and speed greater than the given value should be used
for calibration, respectively.
**kwargs : optional keyword arguments
Passed to :func:`~speedcal.calibrate_speed`
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.calibrate_speed(z=2)
"""
depth = self.get_depth("zoc").to_series()
ddiffs = depth.reset_index().diff().set_index(depth.index)
ddepth = ddiffs["depth"].abs()
rddepth = ddepth / ddiffs[depth.index.name].dt.total_seconds()
curspeed = self.get_speed("measured").to_series()
ok = (ddepth > z) & (rddepth > bad[0]) & (curspeed > bad[1])
rddepth = rddepth[ok]
curspeed = curspeed[ok]
kde_data = pd.concat((rddepth.rename("depth_rate"),
curspeed), axis=1)
qfit, ax = speedcal.calibrate_speed(kde_data, tau=tau,
contour_level=contour_level,
z=z, bad=bad, **kwargs)
self.speed_calib_fit = qfit
logger.info("Finished calibrating speed")
def dive_stats(self, depth_deriv=True):
"""Calculate dive statistics in `TDR` records
Parameters
----------
depth_deriv : bool, optional
Whether to compute depth derivative statistics.
Returns
-------
pandas.DataFrame
Notes
-----
This method homologous to diveMove's `diveStats` function.
Examples
--------
ZOC using the "filter" method
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.dive_stats() # doctest: +ELLIPSIS
begdesc ... postdive_mean_speed
1 2002-01-05 ... 1.398859
2 ...
"""
phases_df = self.get_dives_details("row_ids")
idx_name = phases_df.index.name
# calib_speed=False if no fit object
if self.has_speed:
tdr = (self.get_tdr(calib_depth=True,
calib_speed=bool(self.speed_calib_fit))
[[self.depth_name, self.speed_name]])
else:
tdr = (self.get_tdr(calib_depth=True,
calib_speed=bool(self.speed_calib_fit))
[[self.depth_name]])
intvl = (get_var_sampling_interval(tdr[self.depth_name])
.total_seconds())
tdr = tdr.to_dataframe()
dive_ids = phases_df.loc[:, "dive_id"]
postdive_ids = phases_df.loc[:, "postdive_id"]
ok = (dive_ids > 0) & dive_ids.isin(postdive_ids)
okpd = (postdive_ids > 0) & postdive_ids.isin(dive_ids)
postdive_ids = postdive_ids[okpd]
postdive_dur = (postdive_ids.reset_index()
.groupby("postdive_id")
.apply(lambda x: x.iloc[-1] - x.iloc[0]))
# Enforce UTC, as otherwise rpy2 uses our locale in the output of
# OneDiveStats
tdrf = (pd.concat((phases_df[["dive_id", "dive_phase"]][ok],
tdr.loc[ok.index[ok]]), axis=1)
.tz_localize("UTC").reset_index())
# Ugly hack to re-order columns for `diveMove` convention
names0 = ["dive_id", "dive_phase", idx_name, self.depth_name]
colnames = tdrf.columns.to_list()
if self.has_speed:
names0.append(self.speed_name)
colnames = names0 + list(set(colnames) - set(names0))
tdrf = tdrf.reindex(columns=colnames)
tdrf_grp = tdrf.groupby("dive_id")
ones_list = []
for name, grp in tdrf_grp:
res = _one_dive_stats(grp.loc[:, names0], interval=intvl,
has_speed=self.has_speed)
# Rename to match dive number
res = res.rename({0: name})
if depth_deriv:
deriv_stats = self._get_dive_deriv_stats(name)
res = pd.concat((res, deriv_stats), axis=1)
ones_list.append(res)
ones_df = pd.concat(ones_list, ignore_index=True)
ones_df.set_index(dive_ids[ok].unique(), inplace=True)
ones_df.index.rename("dive_id", inplace=True)
ones_df["postdive_dur"] = postdive_dur[idx_name]
# For postdive total distance and mean speed (if available)
if self.has_speed:
speed_postd = (tdr[self.speed_name][okpd]
.groupby(postdive_ids))
pd_speed_ll = []
for name, grp in speed_postd:
res = _speed_stats(grp.reset_index())
onames = ["postdive_tdist", "postdive_mean_speed"]
res_df = pd.DataFrame(res[:, :-1], columns=onames,
index=[name])
pd_speed_ll.append(res_df)
pd_speed_stats = pd.concat(pd_speed_ll)
ones_df = pd.concat((ones_df, pd_speed_stats), axis=1)
return ones_df
def plot(self, concur_vars=None, concur_var_titles=None, **kwargs):
"""Plot TDR object
Parameters
----------
concur_vars : str or list, optional
String or list of strings with names of columns in input to
select additional data to plot.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.plot(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
... depth_lim=[95, -1]) # doctest: +ELLIPSIS
(<Figure ... with 1 Axes>, <AxesSubplot:...'>)
"""
try:
depth = self.get_depth("zoc")
except LookupError:
depth = self.get_depth("measured")
if "ylab_depth" not in kwargs:
ylab_depth = ("{0} [{1}]"
.format(depth.attrs["full_name"],
depth.attrs["units"]))
kwargs.update(ylab_depth=ylab_depth)
depth = depth.to_series()
if concur_vars is None:
fig, ax = plotting.plot_tdr(depth, **kwargs)
elif concur_var_titles is None:
ccvars = self.tdr[concur_vars].to_dataframe()
fig, ax = plotting.plot_tdr(depth, concur_vars=ccvars, **kwargs)
else:
ccvars = self.tdr[concur_vars].to_dataframe()
ccvars_title = concur_var_titles # just to shorten
fig, ax = plotting.plot_tdr(depth,
concur_vars=ccvars,
concur_var_titles=ccvars_title,
**kwargs)
return (fig, ax)
def plot_zoc(self, xlim=None, ylim=None, **kwargs):
"""Plot zero offset correction filters
Parameters
----------
xlim, ylim : 2-tuple/list, optional
Minimum and maximum limits for ``x``- and ``y``-axis,
respectively.
**kwargs : optional keyword arguments
Passed to :func:`~matplotlib.pyplot.subplots`.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc("filter", k=K, probs=P, depth_bounds=DB)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_zoc() # doctest: +ELLIPSIS
(<Figure ... with 3 Axes>, array([<AxesSubplot:...'>,
<AxesSubplot:...'>, <AxesSubplot:...>], dtype=object))
"""
zoc_method = self.zoc_method
depth_msrd = self.get_depth("measured")
ylab = ("{0} [{1}]"
.format(depth_msrd.attrs["full_name"],
depth_msrd.attrs["units"]))
if zoc_method == "filter":
zoc_filters = self.zoc_filters
depth = depth_msrd.to_series()
if "ylab" not in kwargs:
kwargs.update(ylab=ylab)
fig, ax = (plotting
._plot_zoc_filters(depth, zoc_filters, xlim, ylim,
**kwargs))
elif zoc_method == "offset":
depth_msrd = depth_msrd.to_series()
depth_zoc = self.get_depth("zoc").to_series()
fig, ax = plotting.plt.subplots(1, 1, **kwargs)
ax = depth_msrd.plot(ax=ax, rot=0, label="measured")
depth_zoc.plot(ax=ax, label="zoc")
ax.axhline(0, linestyle="--", linewidth=0.75, color="k")
ax.set_xlabel("")
ax.set_ylabel(ylab)
ax.legend(loc="lower right")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
return (fig, ax)
def plot_phases(self, diveNo=None, concur_vars=None,
concur_var_titles=None, surface=False, **kwargs):
"""Plot major phases found on the object
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
concur_vars : str or list, optional
String or list of strings with names of columns in input to
select additional data to plot.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
surface : bool, optional
Whether to plot surface readings.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_phases(list(range(250, 300)),
... surface=True) # doctest: +ELLIPSIS
(<Figure ... with 1 Axes>, <AxesSubplot:...>)
"""
row_ids = self.get_dives_details("row_ids")
dive_ids = row_ids["dive_id"]
dive_ids_uniq = dive_ids.unique()
postdive_ids = row_ids["postdive_id"]
if diveNo is None:
diveNo = np.arange(1, row_ids["dive_id"].max() + 1).tolist()
else:
diveNo = [x for x in sorted(diveNo) if x in dive_ids_uniq]
depth_all = self.get_depth("zoc").to_dataframe() # DataFrame
if concur_vars is None:
dives_all = depth_all
else:
concur_df = self.tdr.to_dataframe().loc[:, concur_vars]
dives_all = pd.concat((depth_all, concur_df), axis=1)
isin_dive_ids = dive_ids.isin(diveNo)
isin_postdive_ids = postdive_ids.isin(diveNo)
if surface:
isin = isin_dive_ids | isin_postdive_ids
dives_in = dives_all[isin]
sfce0_idx = (postdive_ids[postdive_ids == diveNo[0] - 1]
.last_valid_index())
dives_df = pd.concat((dives_all.loc[[sfce0_idx]], dives_in),
axis=0)
details_df = pd.concat((row_ids.loc[[sfce0_idx]], row_ids[isin]),
axis=0)
else:
idx_ok = _get_dive_indices(dive_ids, diveNo)
dives_df = dives_all.iloc[idx_ok, :]
details_df = row_ids.iloc[idx_ok, :]
wet_dry = self.time_budget(ignore_z=True, ignore_du=True)
drys = wet_dry[wet_dry["phase_label"] == "L"][["beg", "end"]]
if (drys.shape[0] > 0):
dry_time = drys
else:
dry_time = None
if concur_vars is None:
fig, ax = (plotting
.plot_tdr(dives_df.iloc[:, 0],
phase_cat=details_df["dive_phase"],
dry_time=dry_time, **kwargs))
else:
fig, ax = (plotting
.plot_tdr(dives_df.iloc[:, 0],
concur_vars=dives_df.iloc[:, 1:],
concur_var_titles=concur_var_titles,
phase_cat=details_df["dive_phase"],
dry_time=dry_time, **kwargs))
return (fig, ax)
def plot_dive_model(self, diveNo=None, **kwargs):
"""Plot dive model for selected dive
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_dive_model(diveNo=20,
... figsize=(10, 10)) # doctest: +ELLIPSIS
(<Figure ... with 2 Axes>, (<AxesSubplot:...>, <AxesSubplot:...>))
"""
dive_ids = self.get_dives_details("row_ids", "dive_id")
crit_vals = self.get_dives_details("crit_vals").loc[diveNo]
idxs = _get_dive_indices(dive_ids, diveNo)
depth = self.get_depth("zoc").to_dataframe().iloc[idxs]
depth_s = self._get_dive_spline_slot(diveNo, "xy")
depth_deriv = (self.get_dives_details("spline_derivs").loc[diveNo])
# Index with time stamp
if depth.shape[0] < 4:
depth_s_idx = pd.date_range(depth.index[0], depth.index[-1],
periods=depth_s.shape[0],
tz=depth.index.tz)
depth_s = pd.Series(depth_s.to_numpy(), index=depth_s_idx)
dderiv_idx = pd.date_range(depth.index[0], depth.index[-1],
periods=depth_deriv.shape[0],
tz=depth.index.tz)
# Extract only the series and index with time stamp
depth_deriv = pd.Series(depth_deriv["y"].to_numpy(),
index=dderiv_idx)
else:
depth_s = pd.Series(depth_s.to_numpy(),
index=depth.index[0] + depth_s.index)
# Extract only the series and index with time stamp
depth_deriv = pd.Series(depth_deriv["y"].to_numpy(),
index=depth.index[0] + depth_deriv.index)
# Force integer again as `loc` coerced to float above
d_crit = crit_vals["descent_crit"].astype(int)
a_crit = crit_vals["ascent_crit"].astype(int)
d_crit_rate = crit_vals["descent_crit_rate"]
a_crit_rate = crit_vals["ascent_crit_rate"]
title = "Dive: {:d}".format(diveNo)
fig, axs = plotting.plot_dive_model(depth, depth_s=depth_s,
depth_deriv=depth_deriv,
d_crit=d_crit, a_crit=a_crit,
d_crit_rate=d_crit_rate,
a_crit_rate=a_crit_rate,
leg_title=title, **kwargs)
return (fig, axs)
def get_depth(self, kind="measured"):
"""Retrieve depth records
Parameters
----------
kind : {"measured", "zoc"}
Which depth to retrieve.
Returns
-------
xarray.DataArray
"""
kinds = ["measured", "zoc"]
if kind == kinds[0]:
odepth = self.depth
elif kind == kinds[1]:
odepth = self.depth_zoc
if odepth is None:
msg = "ZOC depth not available."
logger.error(msg)
raise LookupError(msg)
else:
msg = "kind must be one of: {}".format(kinds)
logger.error(msg)
raise LookupError(msg)
return odepth
def get_speed(self, kind="measured"):
"""Retrieve speed records
Parameters
----------
kind : {"measured", "calibrated"}
Which speed to retrieve.
Returns
-------
xarray.DataArray
"""
kinds = ["measured", "calibrated"]
ispeed = self.speed
if kind == kinds[0]:
ospeed = ispeed
elif kind == kinds[1]:
qfit = self.speed_calib_fit
if qfit is None:
msg = "Calibrated speed not available."
logger.error(msg)
raise LookupError(msg)
else:
coefs = qfit.params
coef_a = coefs[0]
coef_b = coefs[1]
ospeed = (ispeed - coef_a) / coef_b
_append_xr_attr(ospeed, "history", "speed_calib_fit")
else:
msg = "kind must be one of: {}".format(kinds)
logger.error(msg)
raise LookupError(msg)
return ospeed
def get_tdr(self, calib_depth=True, calib_speed=True):
"""Return a copy of tdr Dataset
Parameters
----------
calib_depth : bool, optional
Whether to return calibrated depth measurements.
calib_speed : bool, optional
Whether to return calibrated speed measurements.
Returns
-------
xarray.Dataset
"""
tdr = self.tdr.copy()
if calib_depth:
depth_name = self.depth_name
depth_cal = self.get_depth("zoc")
tdr[depth_name] = depth_cal
if self.has_speed and calib_speed:
speed_name = self.speed_name
speed_cal = self.get_speed("calibrated")
tdr[speed_name] = speed_cal
return tdr
def extract_dives(self, diveNo, **kwargs):
"""Extract TDR data corresponding to a particular set of dives
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
**kwargs : optional keyword arguments
Passed to :meth:`get_tdr`
Returns
-------
xarray.Dataset
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd(has_speed=False)
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.extract_dives(diveNo=20) # doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ...
"""
dive_ids = self.get_dives_details("row_ids", "dive_id")
idx_name = dive_ids.index.name
idxs = _get_dive_indices(dive_ids, diveNo)
tdr = self.get_tdr(**kwargs)
tdr_i = tdr[{idx_name: idxs.astype(int)}]
return tdr_i
def calibrate(tdr_file, config_file=None):
"""Perform all major TDR calibration operations
Detect periods of major activities in a `TDR` object, calibrate depth
readings, and speed if appropriate, in preparation for subsequent
summaries of diving behaviour.
This function is a convenience wrapper around :meth:`~TDR.detect_wet`,
:meth:`~TDR.detect_dives`, :meth:`~TDR.detect_dive_phases`,
:meth:`~TDR.zoc`, and :meth:`~TDR.calibrate_speed`. It performs
wet/dry phase detection, zero-offset correction of depth, detection of
dives, as well as proper labelling of the latter, and calibrates speed
data if appropriate.
Due to the complexity of this procedure, and the number of settings
required for it, a calibration configuration file (JSON) is used to
guide the operations.
Parameters
----------
tdr_file : str, Path or xarray.backends.*DataStore
As first argument for :func:`xarray.load_dataset`.
config_file : str
A valid string path for TDR calibration configuration file.
Returns
-------
out : TDR
See Also
--------
dump_config_template : configuration template
"""
if config_file is None:
config = calibconfig._DEFAULT_CONFIG
else:
config = calibconfig.read_config(config_file)
logger = logging.getLogger(__name__)
logger.setLevel(config["log_level"])
load_dataset_kwargs = config["read"].pop("load_dataset_kwargs")
logger.info("Reading config: {}, {}"
.format(config["read"], load_dataset_kwargs))
tdr = TDR.read_netcdf(tdr_file, **config["read"], **load_dataset_kwargs)
do_zoc = config["zoc"].pop("required")
if do_zoc:
logger.info("ZOC config: {}".format(config["zoc"]))
tdr.zoc(config["zoc"]["method"], **config["zoc"]["parameters"])
logger.info("Wet/Dry config: {}".format(config["wet_dry"]))
tdr.detect_wet(**config["wet_dry"])
logger.info("Dives config: {}".format(config["dives"]))
tdr.detect_dives(config["dives"].pop("dive_thr"))
tdr.detect_dive_phases(**config["dives"])
do_speed_calib = bool(config["speed_calib"].pop("required"))
if tdr.has_speed and do_speed_calib:
logger.info("Speed calibration config: {}"
.format(config["speed_calib"]))
tdr.calibrate_speed(**config["speed_calib"], plot=False)
return tdr
if __name__ == '__main__':
# Set up info level logging
logging.basicConfig(level=logging.INFO)
ifile = r"tests/data/ag_mk7_2002_022.nc"
tdrX = TDR.read_netcdf(ifile, has_speed=True)
# tdrX = TDRSource(ifile, has_speed=True)
# print(tdrX) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/tdr.py | tdr.py | import logging
import numpy as np
import pandas as pd
from skdiveMove.tdrphases import TDRPhases
import skdiveMove.plotting as plotting
import skdiveMove.calibspeed as speedcal
from skdiveMove.helpers import (get_var_sampling_interval,
_get_dive_indices, _append_xr_attr,
_one_dive_stats, _speed_stats)
import skdiveMove.calibconfig as calibconfig
import xarray as xr
logger = logging.getLogger(__name__)
# Add the null handler if importing as library; whatever using this library
# should set up logging.basicConfig() as needed
logger.addHandler(logging.NullHandler())
# Keep attributes in xarray operations
xr.set_options(keep_attrs=True)
class TDR(TDRPhases):
"""Base class encapsulating TDR objects and processing
TDR subclasses `TDRPhases` to provide comprehensive TDR processing
capabilities.
See help(TDR) for inherited attributes.
Attributes
----------
speed_calib_fit : quantreg model fit
Model object fit by quantile regression for speed calibration.
Examples
--------
Construct an instance from diveMove example dataset
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
Plot the `TDR` object
>>> tdrX.plot() # doctest: +ELLIPSIS
(<Figure ... 1 Axes>, <AxesSubplot:...>)
"""
def __init__(self, *args, **kwargs):
"""Set up attributes for TDR objects
Parameters
----------
*args : positional arguments
Passed to :meth:`TDRPhases.__init__`
**kwargs : keyword arguments
Passed to :meth:`TDRPhases.__init__`
"""
TDRPhases.__init__(self, *args, **kwargs)
# Speed calibration fit
self.speed_calib_fit = None
def __str__(self):
base = TDRPhases.__str__(self)
speed_fmt_pref = "Speed calibration coefficients:"
if self.speed_calib_fit is not None:
speed_ccoef_a, speed_ccoef_b = self.speed_calib_fit.params
speed_coefs_fmt = ("\n{0:<20} (a={1:.4f}, b={2:.4f})"
.format(speed_fmt_pref,
speed_ccoef_a, speed_ccoef_b))
else:
speed_ccoef_a, speed_ccoef_b = (None, None)
speed_coefs_fmt = ("\n{0:<20} (a=None, b=None)"
.format(speed_fmt_pref))
return base + speed_coefs_fmt
def calibrate_speed(self, tau=0.1, contour_level=0.1, z=0, bad=[0, 0],
**kwargs):
"""Calibrate speed measurements
Set the `speed_calib_fit` attribute
Parameters
----------
tau : float, optional
Quantile on which to regress speed on rate of depth change.
contour_level : float, optional
The mesh obtained from the bivariate kernel density estimation
corresponding to this contour will be used for the quantile
regression to define the calibration line.
z : float, optional
Only changes in depth larger than this value will be used for
calibration.
bad : array_like, optional
Two-element `array_like` indicating that only rates of depth
change and speed greater than the given value should be used
for calibration, respectively.
**kwargs : optional keyword arguments
Passed to :func:`~speedcal.calibrate_speed`
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.calibrate_speed(z=2)
"""
depth = self.get_depth("zoc").to_series()
ddiffs = depth.reset_index().diff().set_index(depth.index)
ddepth = ddiffs["depth"].abs()
rddepth = ddepth / ddiffs[depth.index.name].dt.total_seconds()
curspeed = self.get_speed("measured").to_series()
ok = (ddepth > z) & (rddepth > bad[0]) & (curspeed > bad[1])
rddepth = rddepth[ok]
curspeed = curspeed[ok]
kde_data = pd.concat((rddepth.rename("depth_rate"),
curspeed), axis=1)
qfit, ax = speedcal.calibrate_speed(kde_data, tau=tau,
contour_level=contour_level,
z=z, bad=bad, **kwargs)
self.speed_calib_fit = qfit
logger.info("Finished calibrating speed")
def dive_stats(self, depth_deriv=True):
"""Calculate dive statistics in `TDR` records
Parameters
----------
depth_deriv : bool, optional
Whether to compute depth derivative statistics.
Returns
-------
pandas.DataFrame
Notes
-----
This method homologous to diveMove's `diveStats` function.
Examples
--------
ZOC using the "filter" method
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.dive_stats() # doctest: +ELLIPSIS
begdesc ... postdive_mean_speed
1 2002-01-05 ... 1.398859
2 ...
"""
phases_df = self.get_dives_details("row_ids")
idx_name = phases_df.index.name
# calib_speed=False if no fit object
if self.has_speed:
tdr = (self.get_tdr(calib_depth=True,
calib_speed=bool(self.speed_calib_fit))
[[self.depth_name, self.speed_name]])
else:
tdr = (self.get_tdr(calib_depth=True,
calib_speed=bool(self.speed_calib_fit))
[[self.depth_name]])
intvl = (get_var_sampling_interval(tdr[self.depth_name])
.total_seconds())
tdr = tdr.to_dataframe()
dive_ids = phases_df.loc[:, "dive_id"]
postdive_ids = phases_df.loc[:, "postdive_id"]
ok = (dive_ids > 0) & dive_ids.isin(postdive_ids)
okpd = (postdive_ids > 0) & postdive_ids.isin(dive_ids)
postdive_ids = postdive_ids[okpd]
postdive_dur = (postdive_ids.reset_index()
.groupby("postdive_id")
.apply(lambda x: x.iloc[-1] - x.iloc[0]))
# Enforce UTC, as otherwise rpy2 uses our locale in the output of
# OneDiveStats
tdrf = (pd.concat((phases_df[["dive_id", "dive_phase"]][ok],
tdr.loc[ok.index[ok]]), axis=1)
.tz_localize("UTC").reset_index())
# Ugly hack to re-order columns for `diveMove` convention
names0 = ["dive_id", "dive_phase", idx_name, self.depth_name]
colnames = tdrf.columns.to_list()
if self.has_speed:
names0.append(self.speed_name)
colnames = names0 + list(set(colnames) - set(names0))
tdrf = tdrf.reindex(columns=colnames)
tdrf_grp = tdrf.groupby("dive_id")
ones_list = []
for name, grp in tdrf_grp:
res = _one_dive_stats(grp.loc[:, names0], interval=intvl,
has_speed=self.has_speed)
# Rename to match dive number
res = res.rename({0: name})
if depth_deriv:
deriv_stats = self._get_dive_deriv_stats(name)
res = pd.concat((res, deriv_stats), axis=1)
ones_list.append(res)
ones_df = pd.concat(ones_list, ignore_index=True)
ones_df.set_index(dive_ids[ok].unique(), inplace=True)
ones_df.index.rename("dive_id", inplace=True)
ones_df["postdive_dur"] = postdive_dur[idx_name]
# For postdive total distance and mean speed (if available)
if self.has_speed:
speed_postd = (tdr[self.speed_name][okpd]
.groupby(postdive_ids))
pd_speed_ll = []
for name, grp in speed_postd:
res = _speed_stats(grp.reset_index())
onames = ["postdive_tdist", "postdive_mean_speed"]
res_df = pd.DataFrame(res[:, :-1], columns=onames,
index=[name])
pd_speed_ll.append(res_df)
pd_speed_stats = pd.concat(pd_speed_ll)
ones_df = pd.concat((ones_df, pd_speed_stats), axis=1)
return ones_df
def plot(self, concur_vars=None, concur_var_titles=None, **kwargs):
"""Plot TDR object
Parameters
----------
concur_vars : str or list, optional
String or list of strings with names of columns in input to
select additional data to plot.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.plot(xlim=["2002-01-05 21:00:00", "2002-01-06 04:10:00"],
... depth_lim=[95, -1]) # doctest: +ELLIPSIS
(<Figure ... with 1 Axes>, <AxesSubplot:...'>)
"""
try:
depth = self.get_depth("zoc")
except LookupError:
depth = self.get_depth("measured")
if "ylab_depth" not in kwargs:
ylab_depth = ("{0} [{1}]"
.format(depth.attrs["full_name"],
depth.attrs["units"]))
kwargs.update(ylab_depth=ylab_depth)
depth = depth.to_series()
if concur_vars is None:
fig, ax = plotting.plot_tdr(depth, **kwargs)
elif concur_var_titles is None:
ccvars = self.tdr[concur_vars].to_dataframe()
fig, ax = plotting.plot_tdr(depth, concur_vars=ccvars, **kwargs)
else:
ccvars = self.tdr[concur_vars].to_dataframe()
ccvars_title = concur_var_titles # just to shorten
fig, ax = plotting.plot_tdr(depth,
concur_vars=ccvars,
concur_var_titles=ccvars_title,
**kwargs)
return (fig, ax)
def plot_zoc(self, xlim=None, ylim=None, **kwargs):
"""Plot zero offset correction filters
Parameters
----------
xlim, ylim : 2-tuple/list, optional
Minimum and maximum limits for ``x``- and ``y``-axis,
respectively.
**kwargs : optional keyword arguments
Passed to :func:`~matplotlib.pyplot.subplots`.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> # Window lengths and probabilities
>>> DB = [-2, 5]
>>> K = [3, 5760]
>>> P = [0.5, 0.02]
>>> tdrX.zoc("filter", k=K, probs=P, depth_bounds=DB)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_zoc() # doctest: +ELLIPSIS
(<Figure ... with 3 Axes>, array([<AxesSubplot:...'>,
<AxesSubplot:...'>, <AxesSubplot:...>], dtype=object))
"""
zoc_method = self.zoc_method
depth_msrd = self.get_depth("measured")
ylab = ("{0} [{1}]"
.format(depth_msrd.attrs["full_name"],
depth_msrd.attrs["units"]))
if zoc_method == "filter":
zoc_filters = self.zoc_filters
depth = depth_msrd.to_series()
if "ylab" not in kwargs:
kwargs.update(ylab=ylab)
fig, ax = (plotting
._plot_zoc_filters(depth, zoc_filters, xlim, ylim,
**kwargs))
elif zoc_method == "offset":
depth_msrd = depth_msrd.to_series()
depth_zoc = self.get_depth("zoc").to_series()
fig, ax = plotting.plt.subplots(1, 1, **kwargs)
ax = depth_msrd.plot(ax=ax, rot=0, label="measured")
depth_zoc.plot(ax=ax, label="zoc")
ax.axhline(0, linestyle="--", linewidth=0.75, color="k")
ax.set_xlabel("")
ax.set_ylabel(ylab)
ax.legend(loc="lower right")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.invert_yaxis()
return (fig, ax)
def plot_phases(self, diveNo=None, concur_vars=None,
concur_var_titles=None, surface=False, **kwargs):
"""Plot major phases found on the object
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
concur_vars : str or list, optional
String or list of strings with names of columns in input to
select additional data to plot.
concur_var_titles : str or list, optional
String or list of strings with y-axis labels for `concur_vars`.
surface : bool, optional
Whether to plot surface readings.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_phases(list(range(250, 300)),
... surface=True) # doctest: +ELLIPSIS
(<Figure ... with 1 Axes>, <AxesSubplot:...>)
"""
row_ids = self.get_dives_details("row_ids")
dive_ids = row_ids["dive_id"]
dive_ids_uniq = dive_ids.unique()
postdive_ids = row_ids["postdive_id"]
if diveNo is None:
diveNo = np.arange(1, row_ids["dive_id"].max() + 1).tolist()
else:
diveNo = [x for x in sorted(diveNo) if x in dive_ids_uniq]
depth_all = self.get_depth("zoc").to_dataframe() # DataFrame
if concur_vars is None:
dives_all = depth_all
else:
concur_df = self.tdr.to_dataframe().loc[:, concur_vars]
dives_all = pd.concat((depth_all, concur_df), axis=1)
isin_dive_ids = dive_ids.isin(diveNo)
isin_postdive_ids = postdive_ids.isin(diveNo)
if surface:
isin = isin_dive_ids | isin_postdive_ids
dives_in = dives_all[isin]
sfce0_idx = (postdive_ids[postdive_ids == diveNo[0] - 1]
.last_valid_index())
dives_df = pd.concat((dives_all.loc[[sfce0_idx]], dives_in),
axis=0)
details_df = pd.concat((row_ids.loc[[sfce0_idx]], row_ids[isin]),
axis=0)
else:
idx_ok = _get_dive_indices(dive_ids, diveNo)
dives_df = dives_all.iloc[idx_ok, :]
details_df = row_ids.iloc[idx_ok, :]
wet_dry = self.time_budget(ignore_z=True, ignore_du=True)
drys = wet_dry[wet_dry["phase_label"] == "L"][["beg", "end"]]
if (drys.shape[0] > 0):
dry_time = drys
else:
dry_time = None
if concur_vars is None:
fig, ax = (plotting
.plot_tdr(dives_df.iloc[:, 0],
phase_cat=details_df["dive_phase"],
dry_time=dry_time, **kwargs))
else:
fig, ax = (plotting
.plot_tdr(dives_df.iloc[:, 0],
concur_vars=dives_df.iloc[:, 1:],
concur_var_titles=concur_var_titles,
phase_cat=details_df["dive_phase"],
dry_time=dry_time, **kwargs))
return (fig, ax)
def plot_dive_model(self, diveNo=None, **kwargs):
"""Plot dive model for selected dive
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
**kwargs : optional keyword arguments
Arguments passed to plotting function.
Returns
-------
tuple
:class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes` instances.
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd()
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.plot_dive_model(diveNo=20,
... figsize=(10, 10)) # doctest: +ELLIPSIS
(<Figure ... with 2 Axes>, (<AxesSubplot:...>, <AxesSubplot:...>))
"""
dive_ids = self.get_dives_details("row_ids", "dive_id")
crit_vals = self.get_dives_details("crit_vals").loc[diveNo]
idxs = _get_dive_indices(dive_ids, diveNo)
depth = self.get_depth("zoc").to_dataframe().iloc[idxs]
depth_s = self._get_dive_spline_slot(diveNo, "xy")
depth_deriv = (self.get_dives_details("spline_derivs").loc[diveNo])
# Index with time stamp
if depth.shape[0] < 4:
depth_s_idx = pd.date_range(depth.index[0], depth.index[-1],
periods=depth_s.shape[0],
tz=depth.index.tz)
depth_s = pd.Series(depth_s.to_numpy(), index=depth_s_idx)
dderiv_idx = pd.date_range(depth.index[0], depth.index[-1],
periods=depth_deriv.shape[0],
tz=depth.index.tz)
# Extract only the series and index with time stamp
depth_deriv = pd.Series(depth_deriv["y"].to_numpy(),
index=dderiv_idx)
else:
depth_s = pd.Series(depth_s.to_numpy(),
index=depth.index[0] + depth_s.index)
# Extract only the series and index with time stamp
depth_deriv = pd.Series(depth_deriv["y"].to_numpy(),
index=depth.index[0] + depth_deriv.index)
# Force integer again as `loc` coerced to float above
d_crit = crit_vals["descent_crit"].astype(int)
a_crit = crit_vals["ascent_crit"].astype(int)
d_crit_rate = crit_vals["descent_crit_rate"]
a_crit_rate = crit_vals["ascent_crit_rate"]
title = "Dive: {:d}".format(diveNo)
fig, axs = plotting.plot_dive_model(depth, depth_s=depth_s,
depth_deriv=depth_deriv,
d_crit=d_crit, a_crit=a_crit,
d_crit_rate=d_crit_rate,
a_crit_rate=a_crit_rate,
leg_title=title, **kwargs)
return (fig, axs)
def get_depth(self, kind="measured"):
"""Retrieve depth records
Parameters
----------
kind : {"measured", "zoc"}
Which depth to retrieve.
Returns
-------
xarray.DataArray
"""
kinds = ["measured", "zoc"]
if kind == kinds[0]:
odepth = self.depth
elif kind == kinds[1]:
odepth = self.depth_zoc
if odepth is None:
msg = "ZOC depth not available."
logger.error(msg)
raise LookupError(msg)
else:
msg = "kind must be one of: {}".format(kinds)
logger.error(msg)
raise LookupError(msg)
return odepth
def get_speed(self, kind="measured"):
"""Retrieve speed records
Parameters
----------
kind : {"measured", "calibrated"}
Which speed to retrieve.
Returns
-------
xarray.DataArray
"""
kinds = ["measured", "calibrated"]
ispeed = self.speed
if kind == kinds[0]:
ospeed = ispeed
elif kind == kinds[1]:
qfit = self.speed_calib_fit
if qfit is None:
msg = "Calibrated speed not available."
logger.error(msg)
raise LookupError(msg)
else:
coefs = qfit.params
coef_a = coefs[0]
coef_b = coefs[1]
ospeed = (ispeed - coef_a) / coef_b
_append_xr_attr(ospeed, "history", "speed_calib_fit")
else:
msg = "kind must be one of: {}".format(kinds)
logger.error(msg)
raise LookupError(msg)
return ospeed
def get_tdr(self, calib_depth=True, calib_speed=True):
"""Return a copy of tdr Dataset
Parameters
----------
calib_depth : bool, optional
Whether to return calibrated depth measurements.
calib_speed : bool, optional
Whether to return calibrated speed measurements.
Returns
-------
xarray.Dataset
"""
tdr = self.tdr.copy()
if calib_depth:
depth_name = self.depth_name
depth_cal = self.get_depth("zoc")
tdr[depth_name] = depth_cal
if self.has_speed and calib_speed:
speed_name = self.speed_name
speed_cal = self.get_speed("calibrated")
tdr[speed_name] = speed_cal
return tdr
def extract_dives(self, diveNo, **kwargs):
"""Extract TDR data corresponding to a particular set of dives
Parameters
----------
diveNo : array_like, optional
List of dive numbers (1-based) to plot.
**kwargs : optional keyword arguments
Passed to :meth:`get_tdr`
Returns
-------
xarray.Dataset
Examples
--------
>>> from skdiveMove.tests import diveMove2skd
>>> tdrX = diveMove2skd(has_speed=False)
>>> tdrX.zoc("offset", offset=3)
>>> tdrX.detect_wet()
>>> tdrX.detect_dives(3)
>>> tdrX.detect_dive_phases("unimodal", descent_crit_q=0.01,
... ascent_crit_q=0, knot_factor=20)
>>> tdrX.extract_dives(diveNo=20) # doctest: +ELLIPSIS
<xarray.Dataset>
Dimensions: ...
"""
dive_ids = self.get_dives_details("row_ids", "dive_id")
idx_name = dive_ids.index.name
idxs = _get_dive_indices(dive_ids, diveNo)
tdr = self.get_tdr(**kwargs)
tdr_i = tdr[{idx_name: idxs.astype(int)}]
return tdr_i
def calibrate(tdr_file, config_file=None):
"""Perform all major TDR calibration operations
Detect periods of major activities in a `TDR` object, calibrate depth
readings, and speed if appropriate, in preparation for subsequent
summaries of diving behaviour.
This function is a convenience wrapper around :meth:`~TDR.detect_wet`,
:meth:`~TDR.detect_dives`, :meth:`~TDR.detect_dive_phases`,
:meth:`~TDR.zoc`, and :meth:`~TDR.calibrate_speed`. It performs
wet/dry phase detection, zero-offset correction of depth, detection of
dives, as well as proper labelling of the latter, and calibrates speed
data if appropriate.
Due to the complexity of this procedure, and the number of settings
required for it, a calibration configuration file (JSON) is used to
guide the operations.
Parameters
----------
tdr_file : str, Path or xarray.backends.*DataStore
As first argument for :func:`xarray.load_dataset`.
config_file : str
A valid string path for TDR calibration configuration file.
Returns
-------
out : TDR
See Also
--------
dump_config_template : configuration template
"""
if config_file is None:
config = calibconfig._DEFAULT_CONFIG
else:
config = calibconfig.read_config(config_file)
logger = logging.getLogger(__name__)
logger.setLevel(config["log_level"])
load_dataset_kwargs = config["read"].pop("load_dataset_kwargs")
logger.info("Reading config: {}, {}"
.format(config["read"], load_dataset_kwargs))
tdr = TDR.read_netcdf(tdr_file, **config["read"], **load_dataset_kwargs)
do_zoc = config["zoc"].pop("required")
if do_zoc:
logger.info("ZOC config: {}".format(config["zoc"]))
tdr.zoc(config["zoc"]["method"], **config["zoc"]["parameters"])
logger.info("Wet/Dry config: {}".format(config["wet_dry"]))
tdr.detect_wet(**config["wet_dry"])
logger.info("Dives config: {}".format(config["dives"]))
tdr.detect_dives(config["dives"].pop("dive_thr"))
tdr.detect_dive_phases(**config["dives"])
do_speed_calib = bool(config["speed_calib"].pop("required"))
if tdr.has_speed and do_speed_calib:
logger.info("Speed calibration config: {}"
.format(config["speed_calib"]))
tdr.calibrate_speed(**config["speed_calib"], plot=False)
return tdr
if __name__ == '__main__':
# Set up info level logging
logging.basicConfig(level=logging.INFO)
ifile = r"tests/data/ag_mk7_2002_022.nc"
tdrX = TDR.read_netcdf(ifile, has_speed=True)
# tdrX = TDRSource(ifile, has_speed=True)
# print(tdrX) | 0.884583 | 0.378947 |
import json
__all__ = ["dump_config_template", "dump_config", "read_config"]
_DEFAULT_CONFIG = {
'log_level': "INFO",
'read': {
'depth_name': "depth",
'time_name': "timestamp",
'subsample': 5,
'has_speed': False,
'load_dataset_kwargs': {}
},
'zoc': {
'required': True,
'method': "offset",
'parameters': {'offset': 0}
},
'wet_dry': {
'dry_thr': 70,
'wet_thr': 3610,
'interp_wet': False
},
'dives': {
'dive_thr': 4,
'dive_model': "unimodal",
'knot_factor': 3,
'descent_crit_q': 0,
'ascent_crit_q': 0
},
'speed_calib': {
'required': False,
'tau': 0.1,
'contour_level': 0.1,
'z': 0,
'bad': [0, 0]
}
}
_DUMP_INDENT = 4
def dump_config_template(fname):
"""Dump configuration template file
Dump a json configuration template file to set up TDR calibration.
Parameters
----------
fname : str or file-like
A valid string path, or `file-like` object, for output file.
Examples
--------
>>> dump_config_template("tdr_config.json") # doctest: +SKIP
Edit the file to your specifications.
"""
with open(fname, "w") as ofile:
json.dump(_DEFAULT_CONFIG, ofile, indent=_DUMP_INDENT)
def read_config(config_file):
"""Read configuration file into dictionary
Parameters
----------
config_file : str or file-like
A valid string path, or `file-like` object, for input file.
Returns
-------
out : dict
"""
with open(config_file, "r") as ifile:
config = json.load(ifile)
return config
def dump_config(fname, config_dict):
"""Dump configuration dictionary to file
Dump a dictionary onto a JSON configuration file to set up TDR
calibration.
Parameters
----------
fname : str or file-like
A valid string path, or `file-like` object, for output file.
config_dict : dict
Dictionary to dump.
"""
with open(fname, "w") as ofile:
json.dump(config_dict, ofile, indent=_DUMP_INDENT)
if __name__ == '__main__':
dump_config_template("tdr_config.json")
config = read_config("tdr_config.json") | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/calibconfig.py | calibconfig.py | import json
__all__ = ["dump_config_template", "dump_config", "read_config"]
_DEFAULT_CONFIG = {
'log_level': "INFO",
'read': {
'depth_name': "depth",
'time_name': "timestamp",
'subsample': 5,
'has_speed': False,
'load_dataset_kwargs': {}
},
'zoc': {
'required': True,
'method': "offset",
'parameters': {'offset': 0}
},
'wet_dry': {
'dry_thr': 70,
'wet_thr': 3610,
'interp_wet': False
},
'dives': {
'dive_thr': 4,
'dive_model': "unimodal",
'knot_factor': 3,
'descent_crit_q': 0,
'ascent_crit_q': 0
},
'speed_calib': {
'required': False,
'tau': 0.1,
'contour_level': 0.1,
'z': 0,
'bad': [0, 0]
}
}
_DUMP_INDENT = 4
def dump_config_template(fname):
"""Dump configuration template file
Dump a json configuration template file to set up TDR calibration.
Parameters
----------
fname : str or file-like
A valid string path, or `file-like` object, for output file.
Examples
--------
>>> dump_config_template("tdr_config.json") # doctest: +SKIP
Edit the file to your specifications.
"""
with open(fname, "w") as ofile:
json.dump(_DEFAULT_CONFIG, ofile, indent=_DUMP_INDENT)
def read_config(config_file):
"""Read configuration file into dictionary
Parameters
----------
config_file : str or file-like
A valid string path, or `file-like` object, for input file.
Returns
-------
out : dict
"""
with open(config_file, "r") as ifile:
config = json.load(ifile)
return config
def dump_config(fname, config_dict):
"""Dump configuration dictionary to file
Dump a dictionary onto a JSON configuration file to set up TDR
calibration.
Parameters
----------
fname : str or file-like
A valid string path, or `file-like` object, for output file.
config_dict : dict
Dictionary to dump.
"""
with open(fname, "w") as ofile:
json.dump(config_dict, ofile, indent=_DUMP_INDENT)
if __name__ == '__main__':
dump_config_template("tdr_config.json")
config = read_config("tdr_config.json") | 0.680879 | 0.126111 |
import numpy as np
from scipy.optimize import curve_fit
# Mapping of error type with corresponding tau and slope
_ERROR_DEFS = {"Q": [np.sqrt(3), -1], "ARW": [1.0, -0.5],
"BI": [np.nan, 0], "RRW": [3.0, 0.5],
"RR": [np.sqrt(2), 1]}
def _armav_nls_fun(x, *args):
coefs = np.array(args).reshape(len(args), 1)
return np.log10(np.dot(x, coefs ** 2)).flatten()
def _armav(taus, adevs):
nsize = taus.size
# Linear regressor matrix
x0 = np.sqrt(np.column_stack([3 / (taus ** 2), 1 / taus,
np.ones(nsize), taus / 3,
taus ** 2 / 2]))
# Ridge regression bias constant
lambda0 = 5e-3
id0 = np.eye(5)
sigma0 = np.linalg.solve((np.dot(x0.T, x0) + lambda0 * id0),
np.dot(x0.T, adevs))
# TODO: need to be able to set bounds
popt, pcov = curve_fit(_armav_nls_fun, x0 ** 2,
np.log10(adevs ** 2), p0=sigma0)
# Compute the bias instability
sigma_hat = np.abs(popt)
adev_reg = np.sqrt(np.dot(x0 ** 2, sigma_hat ** 2))
sigma_hat[2] = np.min(adev_reg) / np.sqrt((2 * np.log(2) / np.pi))
return (sigma_hat, popt, adev_reg)
def _line_fun(t, alpha, tau_crit, adev_crit):
"""Find Allan sigma coefficient from line and point
Log-log parameterization of the point-slope line equation.
Parameters
----------
t : {float, array_like}
Averaging time
alpha : float
Slope of Allan deviation line
tau_crit : float
Observed averaging time
adev_crit : float
Observed Allan deviation at `tau_crit`
"""
return (10 ** (alpha * (np.log10(t) - np.log10(tau_crit)) +
np.log10(adev_crit)))
def allan_coefs(taus, adevs):
"""Compute Allan deviation coefficients for each error type
Given averaging intervals ``taus`` and corresponding Allan deviation
``adevs``, compute the Allan deviation coefficient for each error type:
- Quantization
- (Angle, Velocity) Random Walk
- Bias Instability
- Rate Random Walk
- Rate Ramp
Parameters
----------
taus : array_like
Averaging times
adevs : array_like
Allan deviation
Returns
-------
sigmas_hat: dict
Dictionary with `tau` value and associated Allan deviation
coefficient for each error type.
adev_reg : numpy.ndarray
The array of Allan deviations fitted to `taus`.
"""
# Fit ARMAV model
sigmas_hat, popt, adev_reg = _armav(taus, adevs)
sigmas_d = dict(zip(_ERROR_DEFS.keys(), sigmas_hat))
return (sigmas_d, adev_reg) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/allan.py | allan.py | import numpy as np
from scipy.optimize import curve_fit
# Mapping of error type with corresponding tau and slope
_ERROR_DEFS = {"Q": [np.sqrt(3), -1], "ARW": [1.0, -0.5],
"BI": [np.nan, 0], "RRW": [3.0, 0.5],
"RR": [np.sqrt(2), 1]}
def _armav_nls_fun(x, *args):
coefs = np.array(args).reshape(len(args), 1)
return np.log10(np.dot(x, coefs ** 2)).flatten()
def _armav(taus, adevs):
nsize = taus.size
# Linear regressor matrix
x0 = np.sqrt(np.column_stack([3 / (taus ** 2), 1 / taus,
np.ones(nsize), taus / 3,
taus ** 2 / 2]))
# Ridge regression bias constant
lambda0 = 5e-3
id0 = np.eye(5)
sigma0 = np.linalg.solve((np.dot(x0.T, x0) + lambda0 * id0),
np.dot(x0.T, adevs))
# TODO: need to be able to set bounds
popt, pcov = curve_fit(_armav_nls_fun, x0 ** 2,
np.log10(adevs ** 2), p0=sigma0)
# Compute the bias instability
sigma_hat = np.abs(popt)
adev_reg = np.sqrt(np.dot(x0 ** 2, sigma_hat ** 2))
sigma_hat[2] = np.min(adev_reg) / np.sqrt((2 * np.log(2) / np.pi))
return (sigma_hat, popt, adev_reg)
def _line_fun(t, alpha, tau_crit, adev_crit):
"""Find Allan sigma coefficient from line and point
Log-log parameterization of the point-slope line equation.
Parameters
----------
t : {float, array_like}
Averaging time
alpha : float
Slope of Allan deviation line
tau_crit : float
Observed averaging time
adev_crit : float
Observed Allan deviation at `tau_crit`
"""
return (10 ** (alpha * (np.log10(t) - np.log10(tau_crit)) +
np.log10(adev_crit)))
def allan_coefs(taus, adevs):
"""Compute Allan deviation coefficients for each error type
Given averaging intervals ``taus`` and corresponding Allan deviation
``adevs``, compute the Allan deviation coefficient for each error type:
- Quantization
- (Angle, Velocity) Random Walk
- Bias Instability
- Rate Random Walk
- Rate Ramp
Parameters
----------
taus : array_like
Averaging times
adevs : array_like
Allan deviation
Returns
-------
sigmas_hat: dict
Dictionary with `tau` value and associated Allan deviation
coefficient for each error type.
adev_reg : numpy.ndarray
The array of Allan deviations fitted to `taus`.
"""
# Fit ARMAV model
sigmas_hat, popt, adev_reg = _armav(taus, adevs)
sigmas_d = dict(zip(_ERROR_DEFS.keys(), sigmas_hat))
return (sigmas_d, adev_reg) | 0.786295 | 0.651577 |
import numpy as np
from scipy.spatial.transform import Rotation as R
def normalize(v):
"""Normalize vector
Parameters
----------
v : array_like (N,) or (M,N)
input vector
Returns
-------
numpy.ndarray
Normalized vector having magnitude 1.
"""
return v / np.linalg.norm(v, axis=-1, keepdims=True)
def vangle(v1, v2):
"""Angle between one or more vectors
Parameters
----------
v1 : array_like (N,) or (M,N)
vector 1
v2 : array_like (N,) or (M,N)
vector 2
Returns
-------
angle : double or numpy.ndarray(M,)
angle between v1 and v2
Example
-------
>>> v1 = np.array([[1,2,3],
... [4,5,6]])
>>> v2 = np.array([[1,0,0],
... [0,1,0]])
>>> vangle(v1,v2)
array([1.30024656, 0.96453036])
Notes
-----
.. image:: .static/images/vector_angle.png
:scale: 75%
.. math::
\\alpha =arccos(\\frac{\\vec{v_1} \\cdot \\vec{v_2}}{| \\vec{v_1} |
\\cdot | \\vec{v_2}|})
"""
v1_norm = v1 / np.linalg.norm(v1, axis=-1, keepdims=True)
v2_norm = v2 / np.linalg.norm(v2, axis=-1, keepdims=True)
v1v2 = np.einsum("ij,ij->i", *np.atleast_2d(v1_norm, v2_norm))
angle = np.arccos(v1v2)
if len(angle) == 1:
angle = angle.item()
return angle
def rotate_vector(vector, q, inverse=False):
"""Apply rotations to vector or array of vectors given quaternions
Parameters
----------
vector : array_like
One (1D) or more (2D) array with vectors to rotate.
q : array_like
One (1D) or more (2D) array with quaternion vectors. The scalar
component must be last to match `scipy`'s convention.
Returns
-------
numpy.ndarray
The rotated input vector array.
Notes
-----
.. image:: .static/images/vector_rotate.png
:scale: 75%
.. math::
q \\circ \\left( {\\vec x \\cdot \\vec I} \\right) \\circ {q^{ - 1}} =
\\left( {{\\bf{R}} \\cdot \\vec x} \\right) \\cdot \\vec I
More info under
http://en.wikipedia.org/wiki/Quaternion
"""
rotator = R.from_quat(q)
return rotator.apply(vector, inverse=inverse) | scikit-diveMove | /scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/skdiveMove/imutools/vector.py | vector.py | import numpy as np
from scipy.spatial.transform import Rotation as R
def normalize(v):
"""Normalize vector
Parameters
----------
v : array_like (N,) or (M,N)
input vector
Returns
-------
numpy.ndarray
Normalized vector having magnitude 1.
"""
return v / np.linalg.norm(v, axis=-1, keepdims=True)
def vangle(v1, v2):
"""Angle between one or more vectors
Parameters
----------
v1 : array_like (N,) or (M,N)
vector 1
v2 : array_like (N,) or (M,N)
vector 2
Returns
-------
angle : double or numpy.ndarray(M,)
angle between v1 and v2
Example
-------
>>> v1 = np.array([[1,2,3],
... [4,5,6]])
>>> v2 = np.array([[1,0,0],
... [0,1,0]])
>>> vangle(v1,v2)
array([1.30024656, 0.96453036])
Notes
-----
.. image:: .static/images/vector_angle.png
:scale: 75%
.. math::
\\alpha =arccos(\\frac{\\vec{v_1} \\cdot \\vec{v_2}}{| \\vec{v_1} |
\\cdot | \\vec{v_2}|})
"""
v1_norm = v1 / np.linalg.norm(v1, axis=-1, keepdims=True)
v2_norm = v2 / np.linalg.norm(v2, axis=-1, keepdims=True)
v1v2 = np.einsum("ij,ij->i", *np.atleast_2d(v1_norm, v2_norm))
angle = np.arccos(v1v2)
if len(angle) == 1:
angle = angle.item()
return angle
def rotate_vector(vector, q, inverse=False):
"""Apply rotations to vector or array of vectors given quaternions
Parameters
----------
vector : array_like
One (1D) or more (2D) array with vectors to rotate.
q : array_like
One (1D) or more (2D) array with quaternion vectors. The scalar
component must be last to match `scipy`'s convention.
Returns
-------
numpy.ndarray
The rotated input vector array.
Notes
-----
.. image:: .static/images/vector_rotate.png
:scale: 75%
.. math::
q \\circ \\left( {\\vec x \\cdot \\vec I} \\right) \\circ {q^{ - 1}} =
\\left( {{\\bf{R}} \\cdot \\vec x} \\right) \\cdot \\vec I
More info under
http://en.wikipedia.org/wiki/Quaternion
"""
rotator = R.from_quat(q)
return rotator.apply(vector, inverse=inverse) | 0.940463 | 0.70304 |