# File: pandas-main/generate_pxi.py import argparse import os from Cython import Tempita def process_tempita(pxifile, outfile) -> None: with open(pxifile, encoding='utf-8') as f: tmpl = f.read() pyxcontent = Tempita.sub(tmpl) with open(outfile, 'w', encoding='utf-8') as f: f.write(pyxcontent) def main() -> None: parser = argparse.ArgumentParser() parser.add_argument('infile', type=str, help='Path to the input file') parser.add_argument('-o', '--outdir', type=str, help='Path to the output directory') args = parser.parse_args() if not args.infile.endswith('.in'): raise ValueError(f'Unexpected extension: {args.infile}') outdir_abs = os.path.join(os.getcwd(), args.outdir) outfile = os.path.join(outdir_abs, os.path.splitext(os.path.split(args.infile)[1])[0]) process_tempita(args.infile, outfile) main() # File: pandas-main/generate_version.py import argparse import os import sys import versioneer sys.path.insert(0, '') def write_version_info(path) -> None: version = None git_version = None try: import _version_meson version = _version_meson.__version__ git_version = _version_meson.__git_version__ except ImportError: version = versioneer.get_version() git_version = versioneer.get_versions()['full-revisionid'] if os.environ.get('MESON_DIST_ROOT'): path = os.path.join(os.environ.get('MESON_DIST_ROOT'), path) with open(path, 'w', encoding='utf-8') as file: file.write(f'__version__="{version}"\n') file.write(f'__git_version__="{git_version}"\n') def main() -> None: parser = argparse.ArgumentParser() parser.add_argument('-o', '--outfile', type=str, help='Path to write version info to', required=False) parser.add_argument('--print', default=False, action='store_true', help='Whether to print out the version', required=False) args = parser.parse_args() if args.outfile: if not args.outfile.endswith('.py'): raise ValueError(f'Output file must be a Python file. Got: {args.outfile} as filename instead') write_version_info(args.outfile) if args.print: try: import _version_meson version = _version_meson.__version__ except ImportError: version = versioneer.get_version() print(version) main() # File: pandas-main/pandas/__init__.py from __future__ import annotations __docformat__ = 'restructuredtext' _hard_dependencies = ('numpy', 'dateutil') _missing_dependencies = [] for _dependency in _hard_dependencies: try: __import__(_dependency) except ImportError as _e: _missing_dependencies.append(f'{_dependency}: {_e}') if _missing_dependencies: raise ImportError('Unable to import required dependencies:\n' + '\n'.join(_missing_dependencies)) del _hard_dependencies, _dependency, _missing_dependencies try: from pandas.compat import is_numpy_dev as _is_numpy_dev except ImportError as _err: _module = _err.name raise ImportError(f"C extension: {_module} not built. If you want to import pandas from the source directory, you may need to run 'python -m pip install -ve . --no-build-isolation -Ceditable-verbose=true' to build the C extensions first.") from _err from pandas._config import get_option, set_option, reset_option, describe_option, option_context, options import pandas.core.config_init from pandas.core.api import ArrowDtype, Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype, Float32Dtype, Float64Dtype, CategoricalDtype, PeriodDtype, IntervalDtype, DatetimeTZDtype, StringDtype, BooleanDtype, NA, isna, isnull, notna, notnull, Index, CategoricalIndex, RangeIndex, MultiIndex, IntervalIndex, TimedeltaIndex, DatetimeIndex, PeriodIndex, IndexSlice, NaT, Period, period_range, Timedelta, timedelta_range, Timestamp, date_range, bdate_range, Interval, interval_range, DateOffset, to_numeric, to_datetime, to_timedelta, Flags, Grouper, factorize, unique, NamedAgg, array, Categorical, set_eng_float_format, Series, DataFrame from pandas.core.dtypes.dtypes import SparseDtype from pandas.tseries.api import infer_freq from pandas.tseries import offsets from pandas.core.computation.api import eval from pandas.core.reshape.api import concat, lreshape, melt, wide_to_long, merge, merge_asof, merge_ordered, crosstab, pivot, pivot_table, get_dummies, from_dummies, cut, qcut from pandas import api, arrays, errors, io, plotting, tseries from pandas import testing from pandas.util._print_versions import show_versions from pandas.io.api import ExcelFile, ExcelWriter, read_excel, read_csv, read_fwf, read_table, read_pickle, to_pickle, HDFStore, read_hdf, read_sql, read_sql_query, read_sql_table, read_clipboard, read_parquet, read_orc, read_feather, read_html, read_xml, read_json, read_stata, read_sas, read_spss from pandas.io.json._normalize import json_normalize from pandas.util._tester import test _built_with_meson = False try: from pandas._version_meson import __version__, __git_version__ _built_with_meson = True except ImportError: from pandas._version import get_versions v = get_versions() __version__ = v.get('closest-tag', v['version']) __git_version__ = v.get('full-revisionid') del get_versions, v __doc__ = '\npandas - a powerful data analysis and manipulation library for Python\n=====================================================================\n\n**pandas** is a Python package providing fast, flexible, and expressive data\nstructures designed to make working with "relational" or "labeled" data both\neasy and intuitive. It aims to be the fundamental high-level building block for\ndoing practical, **real world** data analysis in Python. Additionally, it has\nthe broader goal of becoming **the most powerful and flexible open source data\nanalysis / manipulation tool available in any language**. It is already well on\nits way toward this goal.\n\nMain Features\n-------------\nHere are just a few of the things that pandas does well:\n\n - Easy handling of missing data in floating point as well as non-floating\n point data.\n - Size mutability: columns can be inserted and deleted from DataFrame and\n higher dimensional objects\n - Automatic and explicit data alignment: objects can be explicitly aligned\n to a set of labels, or the user can simply ignore the labels and let\n `Series`, `DataFrame`, etc. automatically align the data for you in\n computations.\n - Powerful, flexible group by functionality to perform split-apply-combine\n operations on data sets, for both aggregating and transforming data.\n - Make it easy to convert ragged, differently-indexed data in other Python\n and NumPy data structures into DataFrame objects.\n - Intelligent label-based slicing, fancy indexing, and subsetting of large\n data sets.\n - Intuitive merging and joining data sets.\n - Flexible reshaping and pivoting of data sets.\n - Hierarchical labeling of axes (possible to have multiple labels per tick).\n - Robust IO tools for loading data from flat files (CSV and delimited),\n Excel files, databases, and saving/loading data from the ultrafast HDF5\n format.\n - Time series-specific functionality: date range generation and frequency\n conversion, moving window statistics, date shifting and lagging.\n' __all__ = ['ArrowDtype', 'BooleanDtype', 'Categorical', 'CategoricalDtype', 'CategoricalIndex', 'DataFrame', 'DateOffset', 'DatetimeIndex', 'DatetimeTZDtype', 'ExcelFile', 'ExcelWriter', 'Flags', 'Float32Dtype', 'Float64Dtype', 'Grouper', 'HDFStore', 'Index', 'IndexSlice', 'Int16Dtype', 'Int32Dtype', 'Int64Dtype', 'Int8Dtype', 'Interval', 'IntervalDtype', 'IntervalIndex', 'MultiIndex', 'NA', 'NaT', 'NamedAgg', 'Period', 'PeriodDtype', 'PeriodIndex', 'RangeIndex', 'Series', 'SparseDtype', 'StringDtype', 'Timedelta', 'TimedeltaIndex', 'Timestamp', 'UInt16Dtype', 'UInt32Dtype', 'UInt64Dtype', 'UInt8Dtype', 'api', 'array', 'arrays', 'bdate_range', 'concat', 'crosstab', 'cut', 'date_range', 'describe_option', 'errors', 'eval', 'factorize', 'get_dummies', 'from_dummies', 'get_option', 'infer_freq', 'interval_range', 'io', 'isna', 'isnull', 'json_normalize', 'lreshape', 'melt', 'merge', 'merge_asof', 'merge_ordered', 'notna', 'notnull', 'offsets', 'option_context', 'options', 'period_range', 'pivot', 'pivot_table', 'plotting', 'qcut', 'read_clipboard', 'read_csv', 'read_excel', 'read_feather', 'read_fwf', 'read_hdf', 'read_html', 'read_json', 'read_orc', 'read_parquet', 'read_pickle', 'read_sas', 'read_spss', 'read_sql', 'read_sql_query', 'read_sql_table', 'read_stata', 'read_table', 'read_xml', 'reset_option', 'set_eng_float_format', 'set_option', 'show_versions', 'test', 'testing', 'timedelta_range', 'to_datetime', 'to_numeric', 'to_pickle', 'to_timedelta', 'tseries', 'unique', 'wide_to_long'] # File: pandas-main/pandas/_config/__init__.py """""" __all__ = ['config', 'detect_console_encoding', 'get_option', 'set_option', 'reset_option', 'describe_option', 'option_context', 'options'] from pandas._config import config from pandas._config import dates from pandas._config.config import _global_config, describe_option, get_option, option_context, options, reset_option, set_option from pandas._config.display import detect_console_encoding def using_string_dtype() -> bool: _mode_options = _global_config['future'] return _mode_options['infer_string'] # File: pandas-main/pandas/_config/config.py """""" from __future__ import annotations from contextlib import contextmanager import re from typing import TYPE_CHECKING, Any, NamedTuple, cast import warnings from pandas._typing import F from pandas.util._exceptions import find_stack_level if TYPE_CHECKING: from collections.abc import Callable, Generator, Sequence class DeprecatedOption(NamedTuple): key: str msg: str | None rkey: str | None removal_ver: str | None class RegisteredOption(NamedTuple): key: str defval: Any doc: str validator: Callable[[object], Any] | None cb: Callable[[str], Any] | None _deprecated_options: dict[str, DeprecatedOption] = {} _registered_options: dict[str, RegisteredOption] = {} _global_config: dict[str, Any] = {} _reserved_keys: list[str] = ['all'] class OptionError(AttributeError, KeyError): def _get_single_key(pat: str) -> str: keys = _select_options(pat) if len(keys) == 0: _warn_if_deprecated(pat) raise OptionError(f'No such keys(s): {pat!r}') if len(keys) > 1: raise OptionError('Pattern matched multiple keys') key = keys[0] _warn_if_deprecated(key) key = _translate_key(key) return key def get_option(pat: str) -> Any: key = _get_single_key(pat) (root, k) = _get_root(key) return root[k] def set_option(*args) -> None: nargs = len(args) if not nargs or nargs % 2 != 0: raise ValueError('Must provide an even number of non-keyword arguments') for (k, v) in zip(args[::2], args[1::2]): key = _get_single_key(k) opt = _get_registered_option(key) if opt and opt.validator: opt.validator(v) (root, k_root) = _get_root(key) root[k_root] = v if opt.cb: opt.cb(key) def describe_option(pat: str='', _print_desc: bool=True) -> str | None: keys = _select_options(pat) if len(keys) == 0: raise OptionError(f'No such keys(s) for pat={pat!r}') s = '\n'.join([_build_option_description(k) for k in keys]) if _print_desc: print(s) return None return s def reset_option(pat: str) -> None: keys = _select_options(pat) if len(keys) == 0: raise OptionError(f'No such keys(s) for pat={pat!r}') if len(keys) > 1 and len(pat) < 4 and (pat != 'all'): raise ValueError('You must specify at least 4 characters when resetting multiple keys, use the special keyword "all" to reset all the options to their default value') for k in keys: set_option(k, _registered_options[k].defval) def get_default_val(pat: str): key = _get_single_key(pat) return _get_registered_option(key).defval class DictWrapper: d: dict[str, Any] def __init__(self, d: dict[str, Any], prefix: str='') -> None: object.__setattr__(self, 'd', d) object.__setattr__(self, 'prefix', prefix) def __setattr__(self, key: str, val: Any) -> None: prefix = object.__getattribute__(self, 'prefix') if prefix: prefix += '.' prefix += key if key in self.d and (not isinstance(self.d[key], dict)): set_option(prefix, val) else: raise OptionError('You can only set the value of existing options') def __getattr__(self, key: str): prefix = object.__getattribute__(self, 'prefix') if prefix: prefix += '.' prefix += key try: v = object.__getattribute__(self, 'd')[key] except KeyError as err: raise OptionError('No such option') from err if isinstance(v, dict): return DictWrapper(v, prefix) else: return get_option(prefix) def __dir__(self) -> list[str]: return list(self.d.keys()) options = DictWrapper(_global_config) @contextmanager def option_context(*args) -> Generator[None, None, None]: if len(args) % 2 != 0 or len(args) < 2: raise ValueError('Provide an even amount of arguments as option_context(pat, val, pat, val...).') ops = tuple(zip(args[::2], args[1::2])) try: undo = tuple(((pat, get_option(pat)) for (pat, val) in ops)) for (pat, val) in ops: set_option(pat, val) yield finally: for (pat, val) in undo: set_option(pat, val) def register_option(key: str, defval: object, doc: str='', validator: Callable[[object], Any] | None=None, cb: Callable[[str], Any] | None=None) -> None: import keyword import tokenize key = key.lower() if key in _registered_options: raise OptionError(f"Option '{key}' has already been registered") if key in _reserved_keys: raise OptionError(f"Option '{key}' is a reserved key") if validator: validator(defval) path = key.split('.') for k in path: if not re.match('^' + tokenize.Name + '$', k): raise ValueError(f'{k} is not a valid identifier') if keyword.iskeyword(k): raise ValueError(f'{k} is a python keyword') cursor = _global_config msg = "Path prefix to option '{option}' is already an option" for (i, p) in enumerate(path[:-1]): if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:i]))) if p not in cursor: cursor[p] = {} cursor = cursor[p] if not isinstance(cursor, dict): raise OptionError(msg.format(option='.'.join(path[:-1]))) cursor[path[-1]] = defval _registered_options[key] = RegisteredOption(key=key, defval=defval, doc=doc, validator=validator, cb=cb) def deprecate_option(key: str, msg: str | None=None, rkey: str | None=None, removal_ver: str | None=None) -> None: key = key.lower() if key in _deprecated_options: raise OptionError(f"Option '{key}' has already been defined as deprecated.") _deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver) def _select_options(pat: str) -> list[str]: if pat in _registered_options: return [pat] keys = sorted(_registered_options.keys()) if pat == 'all': return keys return [k for k in keys if re.search(pat, k, re.I)] def _get_root(key: str) -> tuple[dict[str, Any], str]: path = key.split('.') cursor = _global_config for p in path[:-1]: cursor = cursor[p] return (cursor, path[-1]) def _get_deprecated_option(key: str): try: d = _deprecated_options[key] except KeyError: return None else: return d def _get_registered_option(key: str): return _registered_options.get(key) def _translate_key(key: str) -> str: d = _get_deprecated_option(key) if d: return d.rkey or key else: return key def _warn_if_deprecated(key: str) -> bool: d = _get_deprecated_option(key) if d: if d.msg: warnings.warn(d.msg, FutureWarning, stacklevel=find_stack_level()) else: msg = f"'{key}' is deprecated" if d.removal_ver: msg += f' and will be removed in {d.removal_ver}' if d.rkey: msg += f", please use '{d.rkey}' instead." else: msg += ', please refrain from using it.' warnings.warn(msg, FutureWarning, stacklevel=find_stack_level()) return True return False def _build_option_description(k: str) -> str: o = _get_registered_option(k) d = _get_deprecated_option(k) s = f'{k} ' if o.doc: s += '\n'.join(o.doc.strip().split('\n')) else: s += 'No description available.' if o: with warnings.catch_warnings(): warnings.simplefilter('ignore', FutureWarning) warnings.simplefilter('ignore', DeprecationWarning) s += f'\n [default: {o.defval}] [currently: {get_option(k)}]' if d: rkey = d.rkey or '' s += '\n (Deprecated' s += f', use `{rkey}` instead.' s += ')' return s @contextmanager def config_prefix(prefix: str) -> Generator[None, None, None]: global register_option, get_option, set_option def wrap(func: F) -> F: def inner(key: str, *args, **kwds): pkey = f'{prefix}.{key}' return func(pkey, *args, **kwds) return cast(F, inner) _register_option = register_option _get_option = get_option _set_option = set_option set_option = wrap(set_option) get_option = wrap(get_option) register_option = wrap(register_option) try: yield finally: set_option = _set_option get_option = _get_option register_option = _register_option def is_type_factory(_type: type[Any]) -> Callable[[Any], None]: def inner(x) -> None: if type(x) != _type: raise ValueError(f"Value must have type '{_type}'") return inner def is_instance_factory(_type: type | tuple[type, ...]) -> Callable[[Any], None]: if isinstance(_type, tuple): type_repr = '|'.join(map(str, _type)) else: type_repr = f"'{_type}'" def inner(x) -> None: if not isinstance(x, _type): raise ValueError(f'Value must be an instance of {type_repr}') return inner def is_one_of_factory(legal_values: Sequence) -> Callable[[Any], None]: callables = [c for c in legal_values if callable(c)] legal_values = [c for c in legal_values if not callable(c)] def inner(x) -> None: if x not in legal_values: if not any((c(x) for c in callables)): uvals = [str(lval) for lval in legal_values] pp_values = '|'.join(uvals) msg = f'Value must be one of {pp_values}' if len(callables): msg += ' or a callable' raise ValueError(msg) return inner def is_nonnegative_int(value: object) -> None: if value is None: return elif isinstance(value, int): if value >= 0: return msg = 'Value must be a nonnegative integer or None' raise ValueError(msg) is_int = is_type_factory(int) is_bool = is_type_factory(bool) is_float = is_type_factory(float) is_str = is_type_factory(str) is_text = is_instance_factory((str, bytes)) def is_callable(obj: object) -> bool: if not callable(obj): raise ValueError('Value must be a callable') return True # File: pandas-main/pandas/_config/dates.py """""" from __future__ import annotations from pandas._config import config as cf pc_date_dayfirst_doc = '\n: boolean\n When True, prints and parses dates with the day first, eg 20/01/2005\n' pc_date_yearfirst_doc = '\n: boolean\n When True, prints and parses dates with the year first, eg 2005/01/20\n' with cf.config_prefix('display'): cf.register_option('date_dayfirst', False, pc_date_dayfirst_doc, validator=cf.is_bool) cf.register_option('date_yearfirst', False, pc_date_yearfirst_doc, validator=cf.is_bool) # File: pandas-main/pandas/_config/display.py """""" from __future__ import annotations import locale import sys from pandas._config import config as cf _initial_defencoding: str | None = None def detect_console_encoding() -> str: global _initial_defencoding encoding = None try: encoding = sys.stdout.encoding or sys.stdin.encoding except (AttributeError, OSError): pass if not encoding or 'ascii' in encoding.lower(): try: encoding = locale.getpreferredencoding() except locale.Error: pass if not encoding or 'ascii' in encoding.lower(): encoding = sys.getdefaultencoding() if not _initial_defencoding: _initial_defencoding = sys.getdefaultencoding() return encoding pc_encoding_doc = '\n: str/unicode\n Defaults to the detected encoding of the console.\n Specifies the encoding to be used for strings returned by to_string,\n these are generally strings meant to be displayed on the console.\n' with cf.config_prefix('display'): cf.register_option('encoding', detect_console_encoding(), pc_encoding_doc, validator=cf.is_text) # File: pandas-main/pandas/_config/localization.py """""" from __future__ import annotations from contextlib import contextmanager import locale import platform import re import subprocess from typing import TYPE_CHECKING, cast from pandas._config.config import options if TYPE_CHECKING: from collections.abc import Generator @contextmanager def set_locale(new_locale: str | tuple[str, str], lc_var: int=locale.LC_ALL) -> Generator[str | tuple[str, str], None, None]: current_locale = locale.setlocale(lc_var) try: locale.setlocale(lc_var, new_locale) (normalized_code, normalized_encoding) = locale.getlocale() if normalized_code is not None and normalized_encoding is not None: yield f'{normalized_code}.{normalized_encoding}' else: yield new_locale finally: locale.setlocale(lc_var, current_locale) def can_set_locale(lc: str, lc_var: int=locale.LC_ALL) -> bool: try: with set_locale(lc, lc_var=lc_var): pass except (ValueError, locale.Error): return False else: return True def _valid_locales(locales: list[str] | str, normalize: bool) -> list[str]: return [loc for loc in (locale.normalize(loc.strip()) if normalize else loc.strip() for loc in locales) if can_set_locale(loc)] def get_locales(prefix: str | None=None, normalize: bool=True) -> list[str]: if platform.system() in ('Linux', 'Darwin'): raw_locales = subprocess.check_output(['locale', '-a']) else: return [] try: split_raw_locales = raw_locales.split(b'\n') out_locales = [] for x in split_raw_locales: try: out_locales.append(str(x, encoding=cast(str, options.display.encoding))) except UnicodeError: out_locales.append(str(x, encoding='windows-1252')) except TypeError: pass if prefix is None: return _valid_locales(out_locales, normalize) pattern = re.compile(f'{prefix}.*') found = pattern.findall('\n'.join(out_locales)) return _valid_locales(found, normalize) # File: pandas-main/pandas/_libs/__init__.py __all__ = ['NaT', 'NaTType', 'OutOfBoundsDatetime', 'Period', 'Timedelta', 'Timestamp', 'iNaT', 'Interval'] import pandas._libs.pandas_parser import pandas._libs.pandas_datetime from pandas._libs.interval import Interval from pandas._libs.tslibs import NaT, NaTType, OutOfBoundsDatetime, Period, Timedelta, Timestamp, iNaT # File: pandas-main/pandas/_libs/tslibs/__init__.py __all__ = ['dtypes', 'localize_pydatetime', 'NaT', 'NaTType', 'iNaT', 'nat_strings', 'OutOfBoundsDatetime', 'OutOfBoundsTimedelta', 'IncompatibleFrequency', 'Period', 'Resolution', 'Timedelta', 'normalize_i8_timestamps', 'is_date_array_normalized', 'dt64arr_to_periodarr', 'delta_to_nanoseconds', 'ints_to_pydatetime', 'ints_to_pytimedelta', 'get_resolution', 'Timestamp', 'tz_convert_from_utc_single', 'tz_convert_from_utc', 'to_offset', 'Tick', 'BaseOffset', 'tz_compare', 'is_unitless', 'astype_overflowsafe', 'get_unit_from_dtype', 'periods_per_day', 'periods_per_second', 'guess_datetime_format', 'add_overflowsafe', 'get_supported_dtype', 'is_supported_dtype'] from pandas._libs.tslibs import dtypes from pandas._libs.tslibs.conversion import localize_pydatetime from pandas._libs.tslibs.dtypes import Resolution, periods_per_day, periods_per_second from pandas._libs.tslibs.nattype import NaT, NaTType, iNaT, nat_strings from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime, OutOfBoundsTimedelta, add_overflowsafe, astype_overflowsafe, get_supported_dtype, is_supported_dtype, is_unitless, py_get_unit_from_dtype as get_unit_from_dtype from pandas._libs.tslibs.offsets import BaseOffset, Tick, to_offset from pandas._libs.tslibs.parsing import guess_datetime_format from pandas._libs.tslibs.period import IncompatibleFrequency, Period from pandas._libs.tslibs.timedeltas import Timedelta, delta_to_nanoseconds, ints_to_pytimedelta from pandas._libs.tslibs.timestamps import Timestamp from pandas._libs.tslibs.timezones import tz_compare from pandas._libs.tslibs.tzconversion import tz_convert_from_utc_single from pandas._libs.tslibs.vectorized import dt64arr_to_periodarr, get_resolution, ints_to_pydatetime, is_date_array_normalized, normalize_i8_timestamps, tz_convert_from_utc # File: pandas-main/pandas/_typing.py from __future__ import annotations from collections.abc import Callable, Hashable, Iterator, Mapping, MutableMapping, Sequence from datetime import date, datetime, timedelta, tzinfo from os import PathLike import sys from typing import TYPE_CHECKING, Any, Literal, Optional, Protocol, Type as type_t, TypeVar, Union, overload import numpy as np if TYPE_CHECKING: import numpy.typing as npt from pandas._libs import NaTType, Period, Timedelta, Timestamp from pandas._libs.tslibs import BaseOffset from pandas.core.dtypes.dtypes import ExtensionDtype from pandas import DatetimeIndex, Interval, PeriodIndex, TimedeltaIndex from pandas.arrays import DatetimeArray, TimedeltaArray from pandas.core.arrays.base import ExtensionArray from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame from pandas.core.groupby.generic import DataFrameGroupBy, GroupBy, SeriesGroupBy from pandas.core.indexes.base import Index from pandas.core.internals import BlockManager, SingleBlockManager from pandas.core.resample import Resampler from pandas.core.series import Series from pandas.core.window.rolling import BaseWindow from pandas.io.formats.format import EngFormatter from pandas.tseries.holiday import AbstractHolidayCalendar ScalarLike_co = Union[int, float, complex, str, bytes, np.generic] NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike] NumpySorter = Optional[npt._ArrayLikeInt_co] from typing import ParamSpec, SupportsIndex from typing import Concatenate from typing import TypeGuard P = ParamSpec('P') if sys.version_info >= (3, 11): from typing import Self from typing import Unpack else: from typing_extensions import Self from typing_extensions import Unpack else: npt: Any = None ParamSpec: Any = None Self: Any = None TypeGuard: Any = None Concatenate: Any = None Unpack: Any = None HashableT = TypeVar('HashableT', bound=Hashable) HashableT2 = TypeVar('HashableT2', bound=Hashable) MutableMappingT = TypeVar('MutableMappingT', bound=MutableMapping) ArrayLike = Union['ExtensionArray', np.ndarray] ArrayLikeT = TypeVar('ArrayLikeT', 'ExtensionArray', np.ndarray) AnyArrayLike = Union[ArrayLike, 'Index', 'Series'] TimeArrayLike = Union['DatetimeArray', 'TimedeltaArray'] _T_co = TypeVar('_T_co', covariant=True) class SequenceNotStr(Protocol[_T_co]): @overload def __getitem__(self, index: SupportsIndex, /) -> _T_co: ... @overload def __getitem__(self, index: slice, /) -> Sequence[_T_co]: ... def __contains__(self, value: object, /) -> bool: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[_T_co]: ... def index(self, value: Any, start: int=..., stop: int=..., /) -> int: ... def count(self, value: Any, /) -> int: ... def __reversed__(self) -> Iterator[_T_co]: ... ListLike = Union[AnyArrayLike, SequenceNotStr, range] PythonScalar = Union[str, float, bool] DatetimeLikeScalar = Union['Period', 'Timestamp', 'Timedelta'] PandasScalar = Union['Period', 'Timestamp', 'Timedelta', 'Interval'] Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, date] IntStrT = TypeVar('IntStrT', bound=Union[int, str]) TimestampConvertibleTypes = Union['Timestamp', date, np.datetime64, np.int64, float, str] TimestampNonexistent = Union[Literal['shift_forward', 'shift_backward', 'NaT', 'raise'], timedelta] TimedeltaConvertibleTypes = Union['Timedelta', timedelta, np.timedelta64, np.int64, float, str] Timezone = Union[str, tzinfo] ToTimestampHow = Literal['s', 'e', 'start', 'end'] NDFrameT = TypeVar('NDFrameT', bound='NDFrame') IndexT = TypeVar('IndexT', bound='Index') FreqIndexT = TypeVar('FreqIndexT', 'DatetimeIndex', 'PeriodIndex', 'TimedeltaIndex') NumpyIndexT = TypeVar('NumpyIndexT', np.ndarray, 'Index') AxisInt = int Axis = Union[AxisInt, Literal['index', 'columns', 'rows']] IndexLabel = Union[Hashable, Sequence[Hashable]] Level = Hashable Shape = tuple[int, ...] Suffixes = Sequence[Optional[str]] Ordered = Optional[bool] JSONSerializable = Optional[Union[PythonScalar, list, dict]] Frequency = Union[str, 'BaseOffset'] Axes = ListLike RandomState = Union[int, np.ndarray, np.random.Generator, np.random.BitGenerator, np.random.RandomState] NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]] Dtype = Union['ExtensionDtype', NpDtype] AstypeArg = Union['ExtensionDtype', 'npt.DTypeLike'] DtypeArg = Union[Dtype, Mapping[Hashable, Dtype]] DtypeObj = Union[np.dtype, 'ExtensionDtype'] ConvertersArg = dict[Hashable, Callable[[Dtype], Dtype]] ParseDatesArg = Union[bool, list[Hashable], list[list[Hashable]], dict[Hashable, list[Hashable]]] Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]] T = TypeVar('T') FuncType = Callable[..., Any] F = TypeVar('F', bound=FuncType) TypeT = TypeVar('TypeT', bound=type) ValueKeyFunc = Optional[Callable[['Series'], Union['Series', AnyArrayLike]]] IndexKeyFunc = Optional[Callable[['Index'], Union['Index', AnyArrayLike]]] AggFuncTypeBase = Union[Callable, str] AggFuncTypeDict = MutableMapping[Hashable, Union[AggFuncTypeBase, list[AggFuncTypeBase]]] AggFuncType = Union[AggFuncTypeBase, list[AggFuncTypeBase], AggFuncTypeDict] AggObjType = Union['Series', 'DataFrame', 'GroupBy', 'SeriesGroupBy', 'DataFrameGroupBy', 'BaseWindow', 'Resampler'] PythonFuncType = Callable[[Any], Any] AnyStr_co = TypeVar('AnyStr_co', str, bytes, covariant=True) AnyStr_contra = TypeVar('AnyStr_contra', str, bytes, contravariant=True) class BaseBuffer(Protocol): @property def mode(self) -> str: ... def seek(self, __offset: int, __whence: int=...) -> int: ... def seekable(self) -> bool: ... def tell(self) -> int: ... class ReadBuffer(BaseBuffer, Protocol[AnyStr_co]): def read(self, __n: int=...) -> AnyStr_co: ... class WriteBuffer(BaseBuffer, Protocol[AnyStr_contra]): def write(self, __b: AnyStr_contra) -> Any: ... def flush(self) -> Any: ... class ReadPickleBuffer(ReadBuffer[bytes], Protocol): def readline(self) -> bytes: ... class WriteExcelBuffer(WriteBuffer[bytes], Protocol): def truncate(self, size: int | None=..., /) -> int: ... class ReadCsvBuffer(ReadBuffer[AnyStr_co], Protocol): def __iter__(self) -> Iterator[AnyStr_co]: ... def fileno(self) -> int: ... def readline(self) -> AnyStr_co: ... @property def closed(self) -> bool: ... FilePath = Union[str, 'PathLike[str]'] StorageOptions = Optional[dict[str, Any]] CompressionDict = dict[str, Any] CompressionOptions = Optional[Union[Literal['infer', 'gzip', 'bz2', 'zip', 'xz', 'zstd', 'tar'], CompressionDict]] FormattersType = Union[list[Callable], tuple[Callable, ...], Mapping[Union[str, int], Callable]] ColspaceType = Mapping[Hashable, Union[str, int]] FloatFormatType = Union[str, Callable, 'EngFormatter'] ColspaceArgType = Union[str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]]] FillnaOptions = Literal['backfill', 'bfill', 'ffill', 'pad'] InterpolateOptions = Literal['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'polynomial', 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima', 'cubicspline', 'from_derivatives'] Manager = Union['BlockManager', 'SingleBlockManager'] ScalarIndexer = Union[int, np.integer] SequenceIndexer = Union[slice, list[int], np.ndarray] PositionalIndexer = Union[ScalarIndexer, SequenceIndexer] PositionalIndexerTuple = tuple[PositionalIndexer, PositionalIndexer] PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple] if TYPE_CHECKING: TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]] else: TakeIndexer = Any IgnoreRaise = Literal['ignore', 'raise'] WindowingRankType = Literal['average', 'min', 'max'] CSVEngine = Literal['c', 'python', 'pyarrow', 'python-fwf'] JSONEngine = Literal['ujson', 'pyarrow'] XMLParsers = Literal['lxml', 'etree'] HTMLFlavors = Literal['lxml', 'html5lib', 'bs4'] IntervalLeftRight = Literal['left', 'right'] IntervalClosedType = Union[IntervalLeftRight, Literal['both', 'neither']] DatetimeNaTType = Union[datetime, 'NaTType'] DateTimeErrorChoices = Literal['raise', 'coerce'] SortKind = Literal['quicksort', 'mergesort', 'heapsort', 'stable'] NaPosition = Literal['first', 'last'] NsmallestNlargestKeep = Literal['first', 'last', 'all'] QuantileInterpolation = Literal['linear', 'lower', 'higher', 'midpoint', 'nearest'] PlottingOrientation = Literal['horizontal', 'vertical'] AnyAll = Literal['any', 'all'] MergeHow = Literal['left', 'right', 'inner', 'outer', 'cross'] MergeValidate = Literal['one_to_one', '1:1', 'one_to_many', '1:m', 'many_to_one', 'm:1', 'many_to_many', 'm:m'] JoinHow = Literal['left', 'right', 'inner', 'outer'] JoinValidate = Literal['one_to_one', '1:1', 'one_to_many', '1:m', 'many_to_one', 'm:1', 'many_to_many', 'm:m'] ReindexMethod = Union[FillnaOptions, Literal['nearest']] MatplotlibColor = Union[str, Sequence[float]] TimeGrouperOrigin = Union['Timestamp', Literal['epoch', 'start', 'start_day', 'end', 'end_day']] TimeAmbiguous = Union[Literal['infer', 'NaT', 'raise'], 'npt.NDArray[np.bool_]'] TimeNonexistent = Union[Literal['shift_forward', 'shift_backward', 'NaT', 'raise'], timedelta] DropKeep = Literal['first', 'last', False] CorrelationMethod = Union[Literal['pearson', 'kendall', 'spearman'], Callable[[np.ndarray, np.ndarray], float]] AlignJoin = Literal['outer', 'inner', 'left', 'right'] DtypeBackend = Literal['pyarrow', 'numpy_nullable'] TimeUnit = Literal['s', 'ms', 'us', 'ns'] OpenFileErrors = Literal['strict', 'ignore', 'replace', 'surrogateescape', 'xmlcharrefreplace', 'backslashreplace', 'namereplace'] UpdateJoin = Literal['left'] NaAction = Literal['ignore'] FromDictOrient = Literal['columns', 'index', 'tight'] ToStataByteorder = Literal['>', '<', 'little', 'big'] ExcelWriterIfSheetExists = Literal['error', 'new', 'replace', 'overlay'] ExcelWriterMergeCells = Union[bool, Literal['columns']] OffsetCalendar = Union[np.busdaycalendar, 'AbstractHolidayCalendar'] UsecolsArgType = Union[SequenceNotStr[Hashable], range, AnyArrayLike, Callable[[HashableT], bool], None] SequenceT = TypeVar('SequenceT', bound=Sequence[Hashable]) SliceType = Optional[Hashable] # File: pandas-main/pandas/_version.py """""" from collections.abc import Callable import errno import functools import os import re import subprocess import sys def get_keywords(): git_refnames = ' (HEAD -> main)' git_full = '3e8ac12d1dacc2308b2f4c2869fa7bc2079bd323' git_date = '2024-09-15 22:00:26 +0200' keywords = {'refnames': git_refnames, 'full': git_full, 'date': git_date} return keywords class VersioneerConfig: def get_config(): cfg = VersioneerConfig() cfg.VCS = 'git' cfg.style = 'pep440' cfg.tag_prefix = 'v' cfg.parentdir_prefix = 'pandas-' cfg.versionfile_source = 'pandas/_version.py' cfg.verbose = False return cfg class NotThisMethod(Exception): LONG_VERSION_PY: dict[str, str] = {} HANDLERS: dict[str, dict[str, Callable]] = {} def register_vcs_handler(vcs, method): def decorate(f): if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): assert isinstance(commands, list) process = None popen_kwargs = {} if sys.platform == 'win32': startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW popen_kwargs['startupinfo'] = startupinfo for command in commands: dispcmd = str([command] + args) try: process = subprocess.Popen([command] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE if hide_stderr else None, **popen_kwargs) break except OSError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print(f'unable to run {dispcmd}') print(e) return (None, None) else: if verbose: print(f'unable to find command, tried {commands}') return (None, None) stdout = process.communicate()[0].strip().decode() if process.returncode != 0: if verbose: print(f'unable to run {dispcmd} (error)') print(f'stdout was {stdout}') return (None, process.returncode) return (stdout, process.returncode) def versions_from_parentdir(parentdir_prefix, root, verbose): rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {'version': dirname[len(parentdir_prefix):], 'full-revisionid': None, 'dirty': False, 'error': None, 'date': None} rootdirs.append(root) root = os.path.dirname(root) if verbose: print(f'Tried directories {rootdirs!s} but none started with prefix {parentdir_prefix}') raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler('git', 'get_keywords') def git_get_keywords(versionfile_abs): keywords = {} try: with open(versionfile_abs, encoding='utf-8') as fobj: for line in fobj: if line.strip().startswith('git_refnames ='): mo = re.search('=\\s*"(.*)"', line) if mo: keywords['refnames'] = mo.group(1) if line.strip().startswith('git_full ='): mo = re.search('=\\s*"(.*)"', line) if mo: keywords['full'] = mo.group(1) if line.strip().startswith('git_date ='): mo = re.search('=\\s*"(.*)"', line) if mo: keywords['date'] = mo.group(1) except OSError: pass return keywords @register_vcs_handler('git', 'keywords') def git_versions_from_keywords(keywords, tag_prefix, verbose): if 'refnames' not in keywords: raise NotThisMethod('Short version file found') date = keywords.get('date') if date is not None: date = date.splitlines()[-1] date = date.strip().replace(' ', 'T', 1).replace(' ', '', 1) refnames = keywords['refnames'].strip() if refnames.startswith('$Format'): if verbose: print('keywords are unexpanded, not using') raise NotThisMethod('unexpanded keywords, not a git-archive tarball') refs = {r.strip() for r in refnames.strip('()').split(',')} TAG = 'tag: ' tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: tags = {r for r in refs if re.search('\\d', r)} if verbose: print(f"discarding '{','.join(refs - tags)}', no digits") if verbose: print(f"likely tags: {','.join(sorted(tags))}") for ref in sorted(tags): if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if not re.match('\\d', r): continue if verbose: print(f'picking {r}') return {'version': r, 'full-revisionid': keywords['full'].strip(), 'dirty': False, 'error': None, 'date': date} if verbose: print('no suitable tags, using unknown + full revision id') return {'version': '0+unknown', 'full-revisionid': keywords['full'].strip(), 'dirty': False, 'error': 'no suitable tags', 'date': None} @register_vcs_handler('git', 'pieces_from_vcs') def git_pieces_from_vcs(tag_prefix, root, verbose, runner=run_command): GITS = ['git'] if sys.platform == 'win32': GITS = ['git.cmd', 'git.exe'] env = os.environ.copy() env.pop('GIT_DIR', None) runner = functools.partial(runner, env=env) (_, rc) = runner(GITS, ['rev-parse', '--git-dir'], cwd=root, hide_stderr=not verbose) if rc != 0: if verbose: print(f'Directory {root} not under git control') raise NotThisMethod("'git rev-parse --git-dir' returned error") (describe_out, rc) = runner(GITS, ['describe', '--tags', '--dirty', '--always', '--long', '--match', f'{tag_prefix}[[:digit:]]*'], cwd=root) if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() (full_out, rc) = runner(GITS, ['rev-parse', 'HEAD'], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces['long'] = full_out pieces['short'] = full_out[:7] pieces['error'] = None (branch_name, rc) = runner(GITS, ['rev-parse', '--abbrev-ref', 'HEAD'], cwd=root) if rc != 0 or branch_name is None: raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") branch_name = branch_name.strip() if branch_name == 'HEAD': (branches, rc) = runner(GITS, ['branch', '--contains'], cwd=root) if rc != 0 or branches is None: raise NotThisMethod("'git branch --contains' returned error") branches = branches.split('\n') if '(' in branches[0]: branches.pop(0) branches = [branch[2:] for branch in branches] if 'master' in branches: branch_name = 'master' elif not branches: branch_name = None else: branch_name = branches[0] pieces['branch'] = branch_name git_describe = describe_out dirty = git_describe.endswith('-dirty') pieces['dirty'] = dirty if dirty: git_describe = git_describe[:git_describe.rindex('-dirty')] if '-' in git_describe: mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe) if not mo: pieces['error'] = f"unable to parse git-describe output: '{describe_out}'" return pieces full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces['error'] = f"tag '{full_tag}' doesn't start with prefix '{tag_prefix}'" return pieces pieces['closest-tag'] = full_tag[len(tag_prefix):] pieces['distance'] = int(mo.group(2)) pieces['short'] = mo.group(3) else: pieces['closest-tag'] = None (out, rc) = runner(GITS, ['rev-list', 'HEAD', '--left-right'], cwd=root) pieces['distance'] = len(out.split()) date = runner(GITS, ['show', '-s', '--format=%ci', 'HEAD'], cwd=root)[0].strip() date = date.splitlines()[-1] pieces['date'] = date.strip().replace(' ', 'T', 1).replace(' ', '', 1) return pieces def plus_or_dot(pieces) -> str: if '+' in pieces.get('closest-tag', ''): return '.' return '+' def render_pep440(pieces): if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: rendered += plus_or_dot(pieces) rendered += f"{pieces['distance']}.g{pieces['short']}" if pieces['dirty']: rendered += '.dirty' else: rendered = f"0+untagged.{pieces['distance']}.g{pieces['short']}" if pieces['dirty']: rendered += '.dirty' return rendered def render_pep440_branch(pieces): if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: if pieces['branch'] != 'master': rendered += '.dev0' rendered += plus_or_dot(pieces) rendered += f"{pieces['distance']}.g{pieces['short']}" if pieces['dirty']: rendered += '.dirty' else: rendered = '0' if pieces['branch'] != 'master': rendered += '.dev0' rendered += f"+untagged.{pieces['distance']}.g{pieces['short']}" if pieces['dirty']: rendered += '.dirty' return rendered def pep440_split_post(ver): vc = str.split(ver, '.post') return (vc[0], int(vc[1] or 0) if len(vc) == 2 else None) def render_pep440_pre(pieces): if pieces['closest-tag']: if pieces['distance']: (tag_version, post_version) = pep440_split_post(pieces['closest-tag']) rendered = tag_version if post_version is not None: rendered += f".post{post_version + 1}.dev{pieces['distance']}" else: rendered += f".post0.dev{pieces['distance']}" else: rendered = pieces['closest-tag'] else: rendered = f"0.post0.dev{pieces['distance']}" return rendered def render_pep440_post(pieces): if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: rendered += f".post{pieces['distance']}" if pieces['dirty']: rendered += '.dev0' rendered += plus_or_dot(pieces) rendered += f"g{pieces['short']}" else: rendered = f"0.post{pieces['distance']}" if pieces['dirty']: rendered += '.dev0' rendered += f"+g{pieces['short']}" return rendered def render_pep440_post_branch(pieces): if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: rendered += f".post{pieces['distance']}" if pieces['branch'] != 'master': rendered += '.dev0' rendered += plus_or_dot(pieces) rendered += f"g{pieces['short']}" if pieces['dirty']: rendered += '.dirty' else: rendered = f"0.post{pieces['distance']}" if pieces['branch'] != 'master': rendered += '.dev0' rendered += f"+g{pieces['short']}" if pieces['dirty']: rendered += '.dirty' return rendered def render_pep440_old(pieces): if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: rendered += f"0.post{pieces['distance']}" if pieces['dirty']: rendered += '.dev0' else: rendered = f"0.post{pieces['distance']}" if pieces['dirty']: rendered += '.dev0' return rendered def render_git_describe(pieces): if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance']: rendered += f"-{pieces['distance']}-g{pieces['short']}" else: rendered = pieces['short'] if pieces['dirty']: rendered += '-dirty' return rendered def render_git_describe_long(pieces): if pieces['closest-tag']: rendered = pieces['closest-tag'] rendered += f"-{pieces['distance']}-g{pieces['short']}" else: rendered = pieces['short'] if pieces['dirty']: rendered += '-dirty' return rendered def render(pieces, style): if pieces['error']: return {'version': 'unknown', 'full-revisionid': pieces.get('long'), 'dirty': None, 'error': pieces['error'], 'date': None} if not style or style == 'default': style = 'pep440' if style == 'pep440': rendered = render_pep440(pieces) elif style == 'pep440-branch': rendered = render_pep440_branch(pieces) elif style == 'pep440-pre': rendered = render_pep440_pre(pieces) elif style == 'pep440-post': rendered = render_pep440_post(pieces) elif style == 'pep440-post-branch': rendered = render_pep440_post_branch(pieces) elif style == 'pep440-old': rendered = render_pep440_old(pieces) elif style == 'git-describe': rendered = render_git_describe(pieces) elif style == 'git-describe-long': rendered = render_git_describe_long(pieces) else: raise ValueError(f"unknown style '{style}'") return {'version': rendered, 'full-revisionid': pieces['long'], 'dirty': pieces['dirty'], 'error': None, 'date': pieces.get('date')} def get_versions(): cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to find root of source tree', 'date': None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {'version': '0+unknown', 'full-revisionid': None, 'dirty': None, 'error': 'unable to compute version', 'date': None} # File: pandas-main/pandas/api/__init__.py """""" from pandas.api import extensions, indexers, interchange, types, typing __all__ = ['interchange', 'extensions', 'indexers', 'types', 'typing'] # File: pandas-main/pandas/api/extensions/__init__.py """""" from pandas._libs.lib import no_default from pandas.core.dtypes.base import ExtensionDtype, register_extension_dtype from pandas.core.accessor import register_dataframe_accessor, register_index_accessor, register_series_accessor from pandas.core.algorithms import take from pandas.core.arrays import ExtensionArray, ExtensionScalarOpsMixin __all__ = ['no_default', 'ExtensionDtype', 'register_extension_dtype', 'register_dataframe_accessor', 'register_index_accessor', 'register_series_accessor', 'take', 'ExtensionArray', 'ExtensionScalarOpsMixin'] # File: pandas-main/pandas/api/indexers/__init__.py """""" from pandas.core.indexers import check_array_indexer from pandas.core.indexers.objects import BaseIndexer, FixedForwardWindowIndexer, VariableOffsetWindowIndexer __all__ = ['check_array_indexer', 'BaseIndexer', 'FixedForwardWindowIndexer', 'VariableOffsetWindowIndexer'] # File: pandas-main/pandas/api/internals.py import numpy as np from pandas._typing import ArrayLike from pandas import DataFrame, Index from pandas.core.internals.api import _make_block from pandas.core.internals.managers import BlockManager as _BlockManager def create_dataframe_from_blocks(blocks: list[tuple[ArrayLike, np.ndarray]], index: Index, columns: Index) -> DataFrame: block_objs = [_make_block(*block) for block in blocks] axes = [columns, index] mgr = _BlockManager(block_objs, axes) return DataFrame._from_mgr(mgr, mgr.axes) # File: pandas-main/pandas/api/types/__init__.py """""" from pandas._libs.lib import infer_dtype from pandas.core.dtypes.api import * from pandas.core.dtypes.concat import union_categoricals from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype __all__ = ['infer_dtype', 'union_categoricals', 'CategoricalDtype', 'DatetimeTZDtype', 'IntervalDtype', 'PeriodDtype'] # File: pandas-main/pandas/api/typing/__init__.py """""" from pandas._libs import NaTType from pandas._libs.missing import NAType from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy from pandas.core.indexes.frozen import FrozenList from pandas.core.resample import DatetimeIndexResamplerGroupby, PeriodIndexResamplerGroupby, Resampler, TimedeltaIndexResamplerGroupby, TimeGrouper from pandas.core.window import Expanding, ExpandingGroupby, ExponentialMovingWindow, ExponentialMovingWindowGroupby, Rolling, RollingGroupby, Window from pandas.io.json._json import JsonReader from pandas.io.sas.sasreader import SASReader from pandas.io.stata import StataReader __all__ = ['DataFrameGroupBy', 'DatetimeIndexResamplerGroupby', 'Expanding', 'ExpandingGroupby', 'ExponentialMovingWindow', 'ExponentialMovingWindowGroupby', 'FrozenList', 'JsonReader', 'NaTType', 'NAType', 'PeriodIndexResamplerGroupby', 'Resampler', 'Rolling', 'RollingGroupby', 'SeriesGroupBy', 'StataReader', 'SASReader', 'TimedeltaIndexResamplerGroupby', 'TimeGrouper', 'Window'] # File: pandas-main/pandas/arrays/__init__.py """""" from pandas.core.arrays import ArrowExtensionArray, ArrowStringArray, BooleanArray, Categorical, DatetimeArray, FloatingArray, IntegerArray, IntervalArray, NumpyExtensionArray, PeriodArray, SparseArray, StringArray, TimedeltaArray __all__ = ['ArrowExtensionArray', 'ArrowStringArray', 'BooleanArray', 'Categorical', 'DatetimeArray', 'FloatingArray', 'IntegerArray', 'IntervalArray', 'NumpyExtensionArray', 'PeriodArray', 'SparseArray', 'StringArray', 'TimedeltaArray'] # File: pandas-main/pandas/compat/__init__.py """""" from __future__ import annotations import os import platform import sys from typing import TYPE_CHECKING from pandas.compat._constants import IS64, ISMUSL, PY311, PY312, PYPY, WASM from pandas.compat.numpy import is_numpy_dev from pandas.compat.pyarrow import HAS_PYARROW, pa_version_under10p1, pa_version_under11p0, pa_version_under13p0, pa_version_under14p0, pa_version_under14p1, pa_version_under16p0, pa_version_under17p0, pa_version_under18p0 if TYPE_CHECKING: from pandas._typing import F def set_function_name(f: F, name: str, cls: type) -> F: f.__name__ = name f.__qualname__ = f'{cls.__name__}.{name}' f.__module__ = cls.__module__ return f def is_platform_little_endian() -> bool: return sys.byteorder == 'little' def is_platform_windows() -> bool: return sys.platform in ['win32', 'cygwin'] def is_platform_linux() -> bool: return sys.platform == 'linux' def is_platform_mac() -> bool: return sys.platform == 'darwin' def is_platform_arm() -> bool: return platform.machine() in ('arm64', 'aarch64') or platform.machine().startswith('armv') def is_platform_power() -> bool: return platform.machine() in ('ppc64', 'ppc64le') def is_platform_riscv64() -> bool: return platform.machine() == 'riscv64' def is_ci_environment() -> bool: return os.environ.get('PANDAS_CI', '0') == '1' __all__ = ['is_numpy_dev', 'pa_version_under10p1', 'pa_version_under11p0', 'pa_version_under13p0', 'pa_version_under14p0', 'pa_version_under14p1', 'pa_version_under16p0', 'pa_version_under17p0', 'pa_version_under18p0', 'HAS_PYARROW', 'IS64', 'ISMUSL', 'PY311', 'PY312', 'PYPY', 'WASM'] # File: pandas-main/pandas/compat/_constants.py """""" from __future__ import annotations import platform import sys import sysconfig IS64 = sys.maxsize > 2 ** 32 PY311 = sys.version_info >= (3, 11) PY312 = sys.version_info >= (3, 12) PYPY = platform.python_implementation() == 'PyPy' WASM = sys.platform == 'emscripten' or platform.machine() in ['wasm32', 'wasm64'] ISMUSL = 'musl' in (sysconfig.get_config_var('HOST_GNU_TYPE') or '') REF_COUNT = 2 if PY311 else 3 __all__ = ['IS64', 'ISMUSL', 'PY311', 'PY312', 'PYPY', 'WASM'] # File: pandas-main/pandas/compat/_optional.py from __future__ import annotations import importlib import sys from typing import TYPE_CHECKING, Literal, overload import warnings from pandas.util._exceptions import find_stack_level from pandas.util.version import Version if TYPE_CHECKING: import types VERSIONS = {'adbc-driver-postgresql': '0.10.0', 'adbc-driver-sqlite': '0.8.0', 'bs4': '4.11.2', 'blosc': '1.21.3', 'bottleneck': '1.3.6', 'fastparquet': '2023.10.0', 'fsspec': '2022.11.0', 'html5lib': '1.1', 'hypothesis': '6.84.0', 'gcsfs': '2022.11.0', 'jinja2': '3.1.2', 'lxml.etree': '4.9.2', 'matplotlib': '3.6.3', 'numba': '0.56.4', 'numexpr': '2.8.4', 'odfpy': '1.4.1', 'openpyxl': '3.1.0', 'psycopg2': '2.9.6', 'pymysql': '1.0.2', 'pyarrow': '10.0.1', 'pyreadstat': '1.2.0', 'pytest': '7.3.2', 'python-calamine': '0.1.7', 'pytz': '2023.4', 'pyxlsb': '1.0.10', 's3fs': '2022.11.0', 'scipy': '1.10.0', 'sqlalchemy': '2.0.0', 'tables': '3.8.0', 'tabulate': '0.9.0', 'xarray': '2022.12.0', 'xlrd': '2.0.1', 'xlsxwriter': '3.0.5', 'zstandard': '0.19.0', 'tzdata': '2022.7', 'qtpy': '2.3.0', 'pyqt5': '5.15.9'} INSTALL_MAPPING = {'bs4': 'beautifulsoup4', 'bottleneck': 'Bottleneck', 'jinja2': 'Jinja2', 'lxml.etree': 'lxml', 'odf': 'odfpy', 'python_calamine': 'python-calamine', 'sqlalchemy': 'SQLAlchemy', 'tables': 'pytables'} def get_version(module: types.ModuleType) -> str: version = getattr(module, '__version__', None) if version is None: raise ImportError(f"Can't determine version for {module.__name__}") if module.__name__ == 'psycopg2': version = version.split()[0] return version @overload def import_optional_dependency(name: str, extra: str=..., min_version: str | None=..., *, errors: Literal['raise']=...) -> types.ModuleType: ... @overload def import_optional_dependency(name: str, extra: str=..., min_version: str | None=..., *, errors: Literal['warn', 'ignore']) -> types.ModuleType | None: ... def import_optional_dependency(name: str, extra: str='', min_version: str | None=None, *, errors: Literal['raise', 'warn', 'ignore']='raise') -> types.ModuleType | None: assert errors in {'warn', 'raise', 'ignore'} package_name = INSTALL_MAPPING.get(name) install_name = package_name if package_name is not None else name msg = f"Missing optional dependency '{install_name}'. {extra} Use pip or conda to install {install_name}." try: module = importlib.import_module(name) except ImportError as err: if errors == 'raise': raise ImportError(msg) from err return None parent = name.split('.')[0] if parent != name: install_name = parent module_to_get = sys.modules[install_name] else: module_to_get = module minimum_version = min_version if min_version is not None else VERSIONS.get(parent) if minimum_version: version = get_version(module_to_get) if version and Version(version) < Version(minimum_version): msg = f"Pandas requires version '{minimum_version}' or newer of '{parent}' (version '{version}' currently installed)." if errors == 'warn': warnings.warn(msg, UserWarning, stacklevel=find_stack_level()) return None elif errors == 'raise': raise ImportError(msg) else: return None return module # File: pandas-main/pandas/compat/numpy/__init__.py """""" import warnings import numpy as np from pandas.util.version import Version _np_version = np.__version__ _nlv = Version(_np_version) np_version_gte1p24 = _nlv >= Version('1.24') np_version_gte1p24p3 = _nlv >= Version('1.24.3') np_version_gte1p25 = _nlv >= Version('1.25') np_version_gt2 = _nlv >= Version('2.0.0') is_numpy_dev = _nlv.dev is not None _min_numpy_ver = '1.23.5' if _nlv < Version(_min_numpy_ver): raise ImportError(f'this version of pandas is incompatible with numpy < {_min_numpy_ver}\nyour numpy version is {_np_version}.\nPlease upgrade numpy to >= {_min_numpy_ver} to use this pandas version') np_long: type np_ulong: type if np_version_gt2: try: with warnings.catch_warnings(): warnings.filterwarnings('ignore', '.*In the future `np\\.long` will be defined as.*', FutureWarning) np_long = np.long np_ulong = np.ulong except AttributeError: np_long = np.int_ np_ulong = np.uint else: np_long = np.int_ np_ulong = np.uint __all__ = ['np', '_np_version', 'is_numpy_dev'] # File: pandas-main/pandas/compat/numpy/function.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any, TypeVar, cast, overload import numpy as np from numpy import ndarray from pandas._libs.lib import is_bool, is_integer from pandas.errors import UnsupportedFunctionCall from pandas.util._validators import validate_args, validate_args_and_kwargs, validate_kwargs if TYPE_CHECKING: from pandas._typing import Axis, AxisInt AxisNoneT = TypeVar('AxisNoneT', Axis, None) class CompatValidator: def __init__(self, defaults, fname=None, method: str | None=None, max_fname_arg_count=None) -> None: self.fname = fname self.method = method self.defaults = defaults self.max_fname_arg_count = max_fname_arg_count def __call__(self, args, kwargs, fname=None, max_fname_arg_count=None, method: str | None=None) -> None: if not args and (not kwargs): return None fname = self.fname if fname is None else fname max_fname_arg_count = self.max_fname_arg_count if max_fname_arg_count is None else max_fname_arg_count method = self.method if method is None else method if method == 'args': validate_args(fname, args, max_fname_arg_count, self.defaults) elif method == 'kwargs': validate_kwargs(fname, kwargs, self.defaults) elif method == 'both': validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, self.defaults) else: raise ValueError(f"invalid validation method '{method}'") ARGMINMAX_DEFAULTS = {'out': None} validate_argmin = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmin', method='both', max_fname_arg_count=1) validate_argmax = CompatValidator(ARGMINMAX_DEFAULTS, fname='argmax', method='both', max_fname_arg_count=1) def process_skipna(skipna: bool | ndarray | None, args) -> tuple[bool, Any]: if isinstance(skipna, ndarray) or skipna is None: args = (skipna,) + args skipna = True return (skipna, args) def validate_argmin_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: (skipna, args) = process_skipna(skipna, args) validate_argmin(args, kwargs) return skipna def validate_argmax_with_skipna(skipna: bool | ndarray | None, args, kwargs) -> bool: (skipna, args) = process_skipna(skipna, args) validate_argmax(args, kwargs) return skipna ARGSORT_DEFAULTS: dict[str, int | str | None] = {} ARGSORT_DEFAULTS['axis'] = -1 ARGSORT_DEFAULTS['kind'] = 'quicksort' ARGSORT_DEFAULTS['order'] = None ARGSORT_DEFAULTS['kind'] = None ARGSORT_DEFAULTS['stable'] = None validate_argsort = CompatValidator(ARGSORT_DEFAULTS, fname='argsort', max_fname_arg_count=0, method='both') ARGSORT_DEFAULTS_KIND: dict[str, int | None] = {} ARGSORT_DEFAULTS_KIND['axis'] = -1 ARGSORT_DEFAULTS_KIND['order'] = None ARGSORT_DEFAULTS_KIND['stable'] = None validate_argsort_kind = CompatValidator(ARGSORT_DEFAULTS_KIND, fname='argsort', max_fname_arg_count=0, method='both') def validate_argsort_with_ascending(ascending: bool | int | None, args, kwargs) -> bool: if is_integer(ascending) or ascending is None: args = (ascending,) + args ascending = True validate_argsort_kind(args, kwargs, max_fname_arg_count=3) ascending = cast(bool, ascending) return ascending CLIP_DEFAULTS: dict[str, Any] = {'out': None} validate_clip = CompatValidator(CLIP_DEFAULTS, fname='clip', method='both', max_fname_arg_count=3) @overload def validate_clip_with_axis(axis: ndarray, args, kwargs) -> None: ... @overload def validate_clip_with_axis(axis: AxisNoneT, args, kwargs) -> AxisNoneT: ... def validate_clip_with_axis(axis: ndarray | AxisNoneT, args, kwargs) -> AxisNoneT | None: if isinstance(axis, ndarray): args = (axis,) + args axis = None validate_clip(args, kwargs) return axis CUM_FUNC_DEFAULTS: dict[str, Any] = {} CUM_FUNC_DEFAULTS['dtype'] = None CUM_FUNC_DEFAULTS['out'] = None validate_cum_func = CompatValidator(CUM_FUNC_DEFAULTS, method='both', max_fname_arg_count=1) validate_cumsum = CompatValidator(CUM_FUNC_DEFAULTS, fname='cumsum', method='both', max_fname_arg_count=1) def validate_cum_func_with_skipna(skipna: bool, args, kwargs, name) -> bool: if not is_bool(skipna): args = (skipna,) + args skipna = True elif isinstance(skipna, np.bool_): skipna = bool(skipna) validate_cum_func(args, kwargs, fname=name) return skipna ALLANY_DEFAULTS: dict[str, bool | None] = {} ALLANY_DEFAULTS['dtype'] = None ALLANY_DEFAULTS['out'] = None ALLANY_DEFAULTS['keepdims'] = False ALLANY_DEFAULTS['axis'] = None validate_all = CompatValidator(ALLANY_DEFAULTS, fname='all', method='both', max_fname_arg_count=1) validate_any = CompatValidator(ALLANY_DEFAULTS, fname='any', method='both', max_fname_arg_count=1) LOGICAL_FUNC_DEFAULTS = {'out': None, 'keepdims': False} validate_logical_func = CompatValidator(LOGICAL_FUNC_DEFAULTS, method='kwargs') MINMAX_DEFAULTS = {'axis': None, 'dtype': None, 'out': None, 'keepdims': False} validate_min = CompatValidator(MINMAX_DEFAULTS, fname='min', method='both', max_fname_arg_count=1) validate_max = CompatValidator(MINMAX_DEFAULTS, fname='max', method='both', max_fname_arg_count=1) REPEAT_DEFAULTS: dict[str, Any] = {'axis': None} validate_repeat = CompatValidator(REPEAT_DEFAULTS, fname='repeat', method='both', max_fname_arg_count=1) ROUND_DEFAULTS: dict[str, Any] = {'out': None} validate_round = CompatValidator(ROUND_DEFAULTS, fname='round', method='both', max_fname_arg_count=1) STAT_FUNC_DEFAULTS: dict[str, Any | None] = {} STAT_FUNC_DEFAULTS['dtype'] = None STAT_FUNC_DEFAULTS['out'] = None SUM_DEFAULTS = STAT_FUNC_DEFAULTS.copy() SUM_DEFAULTS['axis'] = None SUM_DEFAULTS['keepdims'] = False SUM_DEFAULTS['initial'] = None PROD_DEFAULTS = SUM_DEFAULTS.copy() MEAN_DEFAULTS = SUM_DEFAULTS.copy() MEDIAN_DEFAULTS = STAT_FUNC_DEFAULTS.copy() MEDIAN_DEFAULTS['overwrite_input'] = False MEDIAN_DEFAULTS['keepdims'] = False STAT_FUNC_DEFAULTS['keepdims'] = False validate_stat_func = CompatValidator(STAT_FUNC_DEFAULTS, method='kwargs') validate_sum = CompatValidator(SUM_DEFAULTS, fname='sum', method='both', max_fname_arg_count=1) validate_prod = CompatValidator(PROD_DEFAULTS, fname='prod', method='both', max_fname_arg_count=1) validate_mean = CompatValidator(MEAN_DEFAULTS, fname='mean', method='both', max_fname_arg_count=1) validate_median = CompatValidator(MEDIAN_DEFAULTS, fname='median', method='both', max_fname_arg_count=1) STAT_DDOF_FUNC_DEFAULTS: dict[str, bool | None] = {} STAT_DDOF_FUNC_DEFAULTS['dtype'] = None STAT_DDOF_FUNC_DEFAULTS['out'] = None STAT_DDOF_FUNC_DEFAULTS['keepdims'] = False validate_stat_ddof_func = CompatValidator(STAT_DDOF_FUNC_DEFAULTS, method='kwargs') TAKE_DEFAULTS: dict[str, str | None] = {} TAKE_DEFAULTS['out'] = None TAKE_DEFAULTS['mode'] = 'raise' validate_take = CompatValidator(TAKE_DEFAULTS, fname='take', method='kwargs') TRANSPOSE_DEFAULTS = {'axes': None} validate_transpose = CompatValidator(TRANSPOSE_DEFAULTS, fname='transpose', method='both', max_fname_arg_count=0) def validate_groupby_func(name: str, args, kwargs, allowed=None) -> None: if allowed is None: allowed = [] kwargs = set(kwargs) - set(allowed) if len(args) + len(kwargs) > 0: raise UnsupportedFunctionCall(f'numpy operations are not valid with groupby. Use .groupby(...).{name}() instead') def validate_minmax_axis(axis: AxisInt | None, ndim: int=1) -> None: if axis is None: return if axis >= ndim or (axis < 0 and ndim + axis < 0): raise ValueError(f'`axis` must be fewer than the number of dimensions ({ndim})') _validation_funcs = {'median': validate_median, 'mean': validate_mean, 'min': validate_min, 'max': validate_max, 'sum': validate_sum, 'prod': validate_prod} def validate_func(fname, args, kwargs) -> None: if fname not in _validation_funcs: return validate_stat_func(args, kwargs, fname=fname) validation_func = _validation_funcs[fname] return validation_func(args, kwargs) # File: pandas-main/pandas/compat/pickle_compat.py """""" from __future__ import annotations import contextlib import io import pickle from typing import TYPE_CHECKING, Any import numpy as np from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import BaseOffset from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.internals import BlockManager if TYPE_CHECKING: from collections.abc import Generator _class_locations_map = {('pandas.core.internals.blocks', 'new_block'): ('pandas._libs.internals', '_unpickle_block'), ('pandas._libs.tslibs.nattype', '__nat_unpickle'): ('pandas._libs.tslibs.nattype', '_nat_unpickle'), ('pandas.core.indexes.numeric', 'Int64Index'): ('pandas.core.indexes.base', 'Index'), ('pandas.core.indexes.numeric', 'UInt64Index'): ('pandas.core.indexes.base', 'Index'), ('pandas.core.indexes.numeric', 'Float64Index'): ('pandas.core.indexes.base', 'Index'), ('pandas.core.arrays.sparse.dtype', 'SparseDtype'): ('pandas.core.dtypes.dtypes', 'SparseDtype')} class Unpickler(pickle._Unpickler): def find_class(self, module: str, name: str) -> Any: key = (module, name) (module, name) = _class_locations_map.get(key, key) return super().find_class(module, name) dispatch = pickle._Unpickler.dispatch.copy() def load_reduce(self) -> None: stack = self.stack args = stack.pop() func = stack[-1] try: stack[-1] = func(*args) except TypeError: if args and isinstance(args[0], type) and issubclass(args[0], BaseOffset): cls = args[0] stack[-1] = cls.__new__(*args) return elif args and issubclass(args[0], PeriodArray): cls = args[0] stack[-1] = NDArrayBacked.__new__(*args) return raise dispatch[pickle.REDUCE[0]] = load_reduce def load_newobj(self) -> None: args = self.stack.pop() cls = self.stack.pop() if issubclass(cls, DatetimeArray) and (not args): arr = np.array([], dtype='M8[ns]') obj = cls.__new__(cls, arr, arr.dtype) elif issubclass(cls, TimedeltaArray) and (not args): arr = np.array([], dtype='m8[ns]') obj = cls.__new__(cls, arr, arr.dtype) elif cls is BlockManager and (not args): obj = cls.__new__(cls, (), [], False) else: obj = cls.__new__(cls, *args) self.append(obj) dispatch[pickle.NEWOBJ[0]] = load_newobj def loads(bytes_object: bytes, *, fix_imports: bool=True, encoding: str='ASCII', errors: str='strict') -> Any: fd = io.BytesIO(bytes_object) return Unpickler(fd, fix_imports=fix_imports, encoding=encoding, errors=errors).load() @contextlib.contextmanager def patch_pickle() -> Generator[None, None, None]: orig_loads = pickle.loads try: setattr(pickle, 'loads', loads) yield finally: setattr(pickle, 'loads', orig_loads) # File: pandas-main/pandas/compat/pyarrow.py """""" from __future__ import annotations from pandas.util.version import Version try: import pyarrow as pa _palv = Version(Version(pa.__version__).base_version) pa_version_under10p1 = _palv < Version('10.0.1') pa_version_under11p0 = _palv < Version('11.0.0') pa_version_under12p0 = _palv < Version('12.0.0') pa_version_under13p0 = _palv < Version('13.0.0') pa_version_under14p0 = _palv < Version('14.0.0') pa_version_under14p1 = _palv < Version('14.0.1') pa_version_under15p0 = _palv < Version('15.0.0') pa_version_under16p0 = _palv < Version('16.0.0') pa_version_under17p0 = _palv < Version('17.0.0') pa_version_under18p0 = _palv < Version('18.0.0') HAS_PYARROW = True except ImportError: pa_version_under10p1 = True pa_version_under11p0 = True pa_version_under12p0 = True pa_version_under13p0 = True pa_version_under14p0 = True pa_version_under14p1 = True pa_version_under15p0 = True pa_version_under16p0 = True pa_version_under17p0 = True pa_version_under18p0 = True HAS_PYARROW = False # File: pandas-main/pandas/core/_numba/executor.py from __future__ import annotations import functools from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import Scalar import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import jit_user_function @functools.cache def generate_apply_looper(func, nopython=True, nogil=True, parallel=False): if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') nb_compat_func = jit_user_function(func) @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def nb_looper(values, axis, *args): if axis == 0: first_elem = values[:, 0] dim0 = values.shape[1] else: first_elem = values[0] dim0 = values.shape[0] res0 = nb_compat_func(first_elem, *args) buf_shape = (dim0,) + np.atleast_1d(np.asarray(res0)).shape if axis == 0: buf_shape = buf_shape[::-1] buff = np.empty(buf_shape) if axis == 1: buff[0] = res0 for i in numba.prange(1, values.shape[0]): buff[i] = nb_compat_func(values[i], *args) else: buff[:, 0] = res0 for j in numba.prange(1, values.shape[1]): buff[:, j] = nb_compat_func(values[:, j], *args) return buff return nb_looper @functools.cache def make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel): if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') if is_grouped_kernel: @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def column_looper(values: np.ndarray, labels: np.ndarray, ngroups: int, min_periods: int, *args): result = np.empty((values.shape[0], ngroups), dtype=result_dtype) na_positions = {} for i in numba.prange(values.shape[0]): (output, na_pos) = func(values[i], result_dtype, labels, ngroups, min_periods, *args) result[i] = output if len(na_pos) > 0: na_positions[i] = np.array(na_pos) return (result, na_positions) else: @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def column_looper(values: np.ndarray, start: np.ndarray, end: np.ndarray, min_periods: int, *args): result = np.empty((values.shape[0], len(start)), dtype=result_dtype) na_positions = {} for i in numba.prange(values.shape[0]): (output, na_pos) = func(values[i], result_dtype, start, end, min_periods, *args) result[i] = output if len(na_pos) > 0: na_positions[i] = np.array(na_pos) return (result, na_positions) return column_looper default_dtype_mapping: dict[np.dtype, Any] = {np.dtype('int8'): np.int64, np.dtype('int16'): np.int64, np.dtype('int32'): np.int64, np.dtype('int64'): np.int64, np.dtype('uint8'): np.uint64, np.dtype('uint16'): np.uint64, np.dtype('uint32'): np.uint64, np.dtype('uint64'): np.uint64, np.dtype('float32'): np.float64, np.dtype('float64'): np.float64, np.dtype('complex64'): np.complex128, np.dtype('complex128'): np.complex128} float_dtype_mapping: dict[np.dtype, Any] = {np.dtype('int8'): np.float64, np.dtype('int16'): np.float64, np.dtype('int32'): np.float64, np.dtype('int64'): np.float64, np.dtype('uint8'): np.float64, np.dtype('uint16'): np.float64, np.dtype('uint32'): np.float64, np.dtype('uint64'): np.float64, np.dtype('float32'): np.float64, np.dtype('float64'): np.float64, np.dtype('complex64'): np.float64, np.dtype('complex128'): np.float64} identity_dtype_mapping: dict[np.dtype, Any] = {np.dtype('int8'): np.int8, np.dtype('int16'): np.int16, np.dtype('int32'): np.int32, np.dtype('int64'): np.int64, np.dtype('uint8'): np.uint8, np.dtype('uint16'): np.uint16, np.dtype('uint32'): np.uint32, np.dtype('uint64'): np.uint64, np.dtype('float32'): np.float32, np.dtype('float64'): np.float64, np.dtype('complex64'): np.complex64, np.dtype('complex128'): np.complex128} def generate_shared_aggregator(func: Callable[..., Scalar], dtype_mapping: dict[np.dtype, np.dtype], is_grouped_kernel: bool, nopython: bool, nogil: bool, parallel: bool): def looper_wrapper(values, start=None, end=None, labels=None, ngroups=None, min_periods: int=0, **kwargs): result_dtype = dtype_mapping[values.dtype] column_looper = make_looper(func, result_dtype, is_grouped_kernel, nopython, nogil, parallel) if is_grouped_kernel: (result, na_positions) = column_looper(values, labels, ngroups, min_periods, *kwargs.values()) else: (result, na_positions) = column_looper(values, start, end, min_periods, *kwargs.values()) if result.dtype.kind == 'i': for na_pos in na_positions.values(): if len(na_pos) > 0: result = result.astype('float64') break for (i, na_pos) in na_positions.items(): if len(na_pos) > 0: result[i, na_pos] = np.nan return result return looper_wrapper # File: pandas-main/pandas/core/_numba/extensions.py """""" from __future__ import annotations from contextlib import contextmanager import operator from typing import TYPE_CHECKING import numba from numba import types from numba.core import cgutils from numba.core.datamodel import models from numba.core.extending import NativeValue, box, lower_builtin, make_attribute_wrapper, overload, overload_attribute, overload_method, register_model, type_callable, typeof_impl, unbox from numba.core.imputils import impl_ret_borrowed import numpy as np from pandas._libs import lib from pandas.core.indexes.base import Index from pandas.core.indexing import _iLocIndexer from pandas.core.internals import SingleBlockManager from pandas.core.series import Series if TYPE_CHECKING: from pandas._typing import Self @contextmanager def set_numba_data(index: Index): numba_data = index._data if numba_data.dtype == object: if not lib.is_string_array(numba_data): raise ValueError('The numba engine only supports using string or numeric column names') numba_data = numba_data.astype('U') try: index._numba_data = numba_data yield index finally: del index._numba_data class IndexType(types.Type): def __init__(self, dtype, layout, pyclass: any) -> None: self.pyclass = pyclass name = f'index({dtype}, {layout})' self.dtype = dtype self.layout = layout super().__init__(name) @property def key(self): return (self.pyclass, self.dtype, self.layout) @property def as_array(self): return types.Array(self.dtype, 1, self.layout) def copy(self, dtype=None, ndim: int=1, layout=None) -> Self: assert ndim == 1 if dtype is None: dtype = self.dtype layout = layout or self.layout return type(self)(dtype, layout, self.pyclass) class SeriesType(types.Type): def __init__(self, dtype, index, namety) -> None: assert isinstance(index, IndexType) self.dtype = dtype self.index = index self.values = types.Array(self.dtype, 1, 'C') self.namety = namety name = f'series({dtype}, {index}, {namety})' super().__init__(name) @property def key(self): return (self.dtype, self.index, self.namety) @property def as_array(self): return self.values def copy(self, dtype=None, ndim: int=1, layout: str='C') -> Self: assert ndim == 1 assert layout == 'C' if dtype is None: dtype = self.dtype return type(self)(dtype, self.index, self.namety) @typeof_impl.register(Index) def typeof_index(val, c) -> IndexType: arrty = typeof_impl(val._numba_data, c) assert arrty.ndim == 1 return IndexType(arrty.dtype, arrty.layout, type(val)) @typeof_impl.register(Series) def typeof_series(val, c) -> SeriesType: index = typeof_impl(val.index, c) arrty = typeof_impl(val.values, c) namety = typeof_impl(val.name, c) assert arrty.ndim == 1 assert arrty.layout == 'C' return SeriesType(arrty.dtype, index, namety) @type_callable(Series) def type_series_constructor(context): def typer(data, index, name=None): if isinstance(index, IndexType) and isinstance(data, types.Array): assert data.ndim == 1 if name is None: name = types.intp return SeriesType(data.dtype, index, name) return typer @type_callable(Index) def type_index_constructor(context): def typer(data, hashmap=None): if isinstance(data, types.Array): assert data.layout == 'C' assert data.ndim == 1 assert hashmap is None or isinstance(hashmap, types.DictType) return IndexType(data.dtype, layout=data.layout, pyclass=Index) return typer @register_model(IndexType) class IndexModel(models.StructModel): def __init__(self, dmm, fe_type) -> None: members = [('data', fe_type.as_array), ('hashmap', types.DictType(fe_type.dtype, types.intp)), ('parent', types.pyobject)] models.StructModel.__init__(self, dmm, fe_type, members) @register_model(SeriesType) class SeriesModel(models.StructModel): def __init__(self, dmm, fe_type) -> None: members = [('index', fe_type.index), ('values', fe_type.as_array), ('name', fe_type.namety)] models.StructModel.__init__(self, dmm, fe_type, members) make_attribute_wrapper(IndexType, 'data', '_data') make_attribute_wrapper(IndexType, 'hashmap', 'hashmap') make_attribute_wrapper(SeriesType, 'index', 'index') make_attribute_wrapper(SeriesType, 'values', 'values') make_attribute_wrapper(SeriesType, 'name', 'name') @lower_builtin(Series, types.Array, IndexType) def pdseries_constructor(context, builder, sig, args): (data, index) = args series = cgutils.create_struct_proxy(sig.return_type)(context, builder) series.index = index series.values = data series.name = context.get_constant(types.intp, 0) return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue()) @lower_builtin(Series, types.Array, IndexType, types.intp) @lower_builtin(Series, types.Array, IndexType, types.float64) @lower_builtin(Series, types.Array, IndexType, types.unicode_type) def pdseries_constructor_with_name(context, builder, sig, args): (data, index, name) = args series = cgutils.create_struct_proxy(sig.return_type)(context, builder) series.index = index series.values = data series.name = name return impl_ret_borrowed(context, builder, sig.return_type, series._getvalue()) @lower_builtin(Index, types.Array, types.DictType, types.pyobject) def index_constructor_2arg(context, builder, sig, args): (data, hashmap, parent) = args index = cgutils.create_struct_proxy(sig.return_type)(context, builder) index.data = data index.hashmap = hashmap index.parent = parent return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue()) @lower_builtin(Index, types.Array, types.DictType) def index_constructor_2arg_parent(context, builder, sig, args): (data, hashmap) = args index = cgutils.create_struct_proxy(sig.return_type)(context, builder) index.data = data index.hashmap = hashmap return impl_ret_borrowed(context, builder, sig.return_type, index._getvalue()) @lower_builtin(Index, types.Array) def index_constructor_1arg(context, builder, sig, args): from numba.typed import Dict key_type = sig.return_type.dtype value_type = types.intp def index_impl(data): return Index(data, Dict.empty(key_type, value_type)) return context.compile_internal(builder, index_impl, sig, args) def maybe_cast_str(x): pass @overload(maybe_cast_str) def maybe_cast_str_impl(x): if isinstance(x, types.UnicodeCharSeq): return lambda x: str(x) else: return lambda x: x @unbox(IndexType) def unbox_index(typ, obj, c): data_obj = c.pyapi.object_getattr_string(obj, '_numba_data') index = cgutils.create_struct_proxy(typ)(c.context, c.builder) index.data = c.unbox(typ.as_array, data_obj).value typed_dict_obj = c.pyapi.unserialize(c.pyapi.serialize_object(numba.typed.Dict)) arr_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(typ.dtype)) intp_type_obj = c.pyapi.unserialize(c.pyapi.serialize_object(types.intp)) hashmap_obj = c.pyapi.call_method(typed_dict_obj, 'empty', (arr_type_obj, intp_type_obj)) index.hashmap = c.unbox(types.DictType(typ.dtype, types.intp), hashmap_obj).value index.parent = obj c.pyapi.decref(data_obj) c.pyapi.decref(arr_type_obj) c.pyapi.decref(intp_type_obj) c.pyapi.decref(typed_dict_obj) return NativeValue(index._getvalue()) @unbox(SeriesType) def unbox_series(typ, obj, c): index_obj = c.pyapi.object_getattr_string(obj, 'index') values_obj = c.pyapi.object_getattr_string(obj, 'values') name_obj = c.pyapi.object_getattr_string(obj, 'name') series = cgutils.create_struct_proxy(typ)(c.context, c.builder) series.index = c.unbox(typ.index, index_obj).value series.values = c.unbox(typ.values, values_obj).value series.name = c.unbox(typ.namety, name_obj).value c.pyapi.decref(index_obj) c.pyapi.decref(values_obj) c.pyapi.decref(name_obj) return NativeValue(series._getvalue()) @box(IndexType) def box_index(typ, val, c): index = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) res = cgutils.alloca_once_value(c.builder, index.parent) with c.builder.if_else(cgutils.is_not_null(c.builder, index.parent)) as (has_parent, otherwise): with has_parent: c.pyapi.incref(index.parent) with otherwise: class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Index)) array_obj = c.box(typ.as_array, index.data) if isinstance(typ.dtype, types.UnicodeCharSeq): object_str_obj = c.pyapi.unserialize(c.pyapi.serialize_object('object')) array_obj = c.pyapi.call_method(array_obj, 'astype', (object_str_obj,)) c.pyapi.decref(object_str_obj) index_obj = c.pyapi.call_method(class_obj, '_simple_new', (array_obj,)) index.parent = index_obj c.builder.store(index_obj, res) c.pyapi.decref(class_obj) c.pyapi.decref(array_obj) return c.builder.load(res) @box(SeriesType) def box_series(typ, val, c): series = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val) series_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Series._from_mgr)) mgr_const_obj = c.pyapi.unserialize(c.pyapi.serialize_object(SingleBlockManager.from_array)) index_obj = c.box(typ.index, series.index) array_obj = c.box(typ.as_array, series.values) name_obj = c.box(typ.namety, series.name) mgr_obj = c.pyapi.call_function_objargs(mgr_const_obj, (array_obj, index_obj)) mgr_axes_obj = c.pyapi.object_getattr_string(mgr_obj, 'axes') series_obj = c.pyapi.call_function_objargs(series_const_obj, (mgr_obj, mgr_axes_obj)) c.pyapi.object_setattr_string(series_obj, '_name', name_obj) c.pyapi.decref(series_const_obj) c.pyapi.decref(mgr_axes_obj) c.pyapi.decref(mgr_obj) c.pyapi.decref(mgr_const_obj) c.pyapi.decref(index_obj) c.pyapi.decref(array_obj) c.pyapi.decref(name_obj) return series_obj def generate_series_reduction(ser_reduction, ser_method): @overload_method(SeriesType, ser_reduction) def series_reduction(series): def series_reduction_impl(series): return ser_method(series.values) return series_reduction_impl return series_reduction def generate_series_binop(binop): @overload(binop) def series_binop(series1, value): if isinstance(series1, SeriesType): if isinstance(value, SeriesType): def series_binop_impl(series1, series2): return Series(binop(series1.values, series2.values), series1.index, series1.name) return series_binop_impl else: def series_binop_impl(series1, value): return Series(binop(series1.values, value), series1.index, series1.name) return series_binop_impl return series_binop series_reductions = [('sum', np.sum), ('mean', np.mean), ('min', np.min), ('max', np.max)] for (reduction, reduction_method) in series_reductions: generate_series_reduction(reduction, reduction_method) series_binops = [operator.add, operator.sub, operator.mul, operator.truediv] for ser_binop in series_binops: generate_series_binop(ser_binop) @overload_method(IndexType, 'get_loc') def index_get_loc(index, item): def index_get_loc_impl(index, item): if len(index.hashmap) == 0: for (i, val) in enumerate(index._data): index.hashmap[val] = i return index.hashmap[item] return index_get_loc_impl @overload(operator.getitem) def series_indexing(series, item): if isinstance(series, SeriesType): def series_getitem(series, item): loc = series.index.get_loc(item) return series.iloc[loc] return series_getitem @overload(operator.getitem) def index_indexing(index, idx): if isinstance(index, IndexType): def index_getitem(index, idx): return index._data[idx] return index_getitem class IlocType(types.Type): def __init__(self, obj_type) -> None: self.obj_type = obj_type name = f'iLocIndexer({obj_type})' super().__init__(name=name) @property def key(self): return self.obj_type @typeof_impl.register(_iLocIndexer) def typeof_iloc(val, c) -> IlocType: objtype = typeof_impl(val.obj, c) return IlocType(objtype) @type_callable(_iLocIndexer) def type_iloc_constructor(context): def typer(obj): if isinstance(obj, SeriesType): return IlocType(obj) return typer @lower_builtin(_iLocIndexer, SeriesType) def iloc_constructor(context, builder, sig, args): (obj,) = args iloc_indexer = cgutils.create_struct_proxy(sig.return_type)(context, builder) iloc_indexer.obj = obj return impl_ret_borrowed(context, builder, sig.return_type, iloc_indexer._getvalue()) @register_model(IlocType) class ILocModel(models.StructModel): def __init__(self, dmm, fe_type) -> None: members = [('obj', fe_type.obj_type)] models.StructModel.__init__(self, dmm, fe_type, members) make_attribute_wrapper(IlocType, 'obj', 'obj') @overload_attribute(SeriesType, 'iloc') def series_iloc(series): def get(series): return _iLocIndexer(series) return get @overload(operator.getitem) def iloc_getitem(iloc_indexer, i): if isinstance(iloc_indexer, IlocType): def getitem_impl(iloc_indexer, i): return iloc_indexer.obj.values[i] return getitem_impl # File: pandas-main/pandas/core/_numba/kernels/__init__.py from pandas.core._numba.kernels.mean_ import grouped_mean, sliding_mean from pandas.core._numba.kernels.min_max_ import grouped_min_max, sliding_min_max from pandas.core._numba.kernels.sum_ import grouped_sum, sliding_sum from pandas.core._numba.kernels.var_ import grouped_var, sliding_var __all__ = ['sliding_mean', 'grouped_mean', 'sliding_sum', 'grouped_sum', 'sliding_var', 'grouped_var', 'sliding_min_max', 'grouped_min_max'] # File: pandas-main/pandas/core/_numba/kernels/mean_.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numba import numpy as np from pandas.core._numba.kernels.shared import is_monotonic_increasing from pandas.core._numba.kernels.sum_ import grouped_kahan_sum if TYPE_CHECKING: from pandas._typing import npt @numba.jit(nopython=True, nogil=True, parallel=False) def add_mean(val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float, num_consecutive_same_value: int, prev_value: float) -> tuple[int, float, int, float, int, float]: if not np.isnan(val): nobs += 1 y = val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t if val < 0: neg_ct += 1 if val == prev_value: num_consecutive_same_value += 1 else: num_consecutive_same_value = 1 prev_value = val return (nobs, sum_x, neg_ct, compensation, num_consecutive_same_value, prev_value) @numba.jit(nopython=True, nogil=True, parallel=False) def remove_mean(val: float, nobs: int, sum_x: float, neg_ct: int, compensation: float) -> tuple[int, float, int, float]: if not np.isnan(val): nobs -= 1 y = -val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t if val < 0: neg_ct -= 1 return (nobs, sum_x, neg_ct, compensation) @numba.jit(nopython=True, nogil=True, parallel=False) def sliding_mean(values: np.ndarray, result_dtype: np.dtype, start: np.ndarray, end: np.ndarray, min_periods: int) -> tuple[np.ndarray, list[int]]: N = len(start) nobs = 0 sum_x = 0.0 neg_ct = 0 compensation_add = 0.0 compensation_remove = 0.0 is_monotonic_increasing_bounds = is_monotonic_increasing(start) and is_monotonic_increasing(end) output = np.empty(N, dtype=result_dtype) for i in range(N): s = start[i] e = end[i] if i == 0 or not is_monotonic_increasing_bounds: prev_value = values[s] num_consecutive_same_value = 0 for j in range(s, e): val = values[j] (nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value) = add_mean(val, nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value) else: for j in range(start[i - 1], s): val = values[j] (nobs, sum_x, neg_ct, compensation_remove) = remove_mean(val, nobs, sum_x, neg_ct, compensation_remove) for j in range(end[i - 1], e): val = values[j] (nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value) = add_mean(val, nobs, sum_x, neg_ct, compensation_add, num_consecutive_same_value, prev_value) if nobs >= min_periods and nobs > 0: result = sum_x / nobs if num_consecutive_same_value >= nobs: result = prev_value elif neg_ct == 0 and result < 0: result = 0 elif neg_ct == nobs and result > 0: result = 0 else: result = np.nan output[i] = result if not is_monotonic_increasing_bounds: nobs = 0 sum_x = 0.0 neg_ct = 0 compensation_remove = 0.0 na_pos = [0 for i in range(0)] return (output, na_pos) @numba.jit(nopython=True, nogil=True, parallel=False) def grouped_mean(values: np.ndarray, result_dtype: np.dtype, labels: npt.NDArray[np.intp], ngroups: int, min_periods: int) -> tuple[np.ndarray, list[int]]: (output, nobs_arr, comp_arr, consecutive_counts, prev_vals) = grouped_kahan_sum(values, result_dtype, labels, ngroups) for lab in range(ngroups): nobs = nobs_arr[lab] num_consecutive_same_value = consecutive_counts[lab] prev_value = prev_vals[lab] sum_x = output[lab] if nobs >= min_periods: if num_consecutive_same_value >= nobs: result = prev_value * nobs else: result = sum_x else: result = np.nan result /= nobs output[lab] = result na_pos = [0 for i in range(0)] return (output, na_pos) # File: pandas-main/pandas/core/_numba/kernels/min_max_.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numba import numpy as np if TYPE_CHECKING: from pandas._typing import npt @numba.jit(nopython=True, nogil=True, parallel=False) def sliding_min_max(values: np.ndarray, result_dtype: np.dtype, start: np.ndarray, end: np.ndarray, min_periods: int, is_max: bool) -> tuple[np.ndarray, list[int]]: N = len(start) nobs = 0 output = np.empty(N, dtype=result_dtype) na_pos = [] Q: list = [] W: list = [] for i in range(N): curr_win_size = end[i] - start[i] if i == 0: st = start[i] else: st = end[i - 1] for k in range(st, end[i]): ai = values[k] if not np.isnan(ai): nobs += 1 elif is_max: ai = -np.inf else: ai = np.inf if is_max: while Q and (ai >= values[Q[-1]] or values[Q[-1]] != values[Q[-1]]): Q.pop() else: while Q and (ai <= values[Q[-1]] or values[Q[-1]] != values[Q[-1]]): Q.pop() Q.append(k) W.append(k) while Q and Q[0] <= start[i] - 1: Q.pop(0) while W and W[0] <= start[i] - 1: if not np.isnan(values[W[0]]): nobs -= 1 W.pop(0) if Q and curr_win_size > 0 and (nobs >= min_periods): output[i] = values[Q[0]] elif values.dtype.kind != 'i': output[i] = np.nan else: na_pos.append(i) return (output, na_pos) @numba.jit(nopython=True, nogil=True, parallel=False) def grouped_min_max(values: np.ndarray, result_dtype: np.dtype, labels: npt.NDArray[np.intp], ngroups: int, min_periods: int, is_max: bool) -> tuple[np.ndarray, list[int]]: N = len(labels) nobs = np.zeros(ngroups, dtype=np.int64) na_pos = [] output = np.empty(ngroups, dtype=result_dtype) for i in range(N): lab = labels[i] val = values[i] if lab < 0: continue if values.dtype.kind == 'i' or not np.isnan(val): nobs[lab] += 1 else: continue if nobs[lab] == 1: output[lab] = val continue if is_max: if val > output[lab]: output[lab] = val elif val < output[lab]: output[lab] = val for (lab, count) in enumerate(nobs): if count < min_periods: na_pos.append(lab) return (output, na_pos) # File: pandas-main/pandas/core/_numba/kernels/shared.py from __future__ import annotations from typing import TYPE_CHECKING import numba if TYPE_CHECKING: import numpy as np @numba.jit(numba.boolean(numba.int64[:]), nopython=True, nogil=True, parallel=False) def is_monotonic_increasing(bounds: np.ndarray) -> bool: n = len(bounds) if n < 2: return True prev = bounds[0] for i in range(1, n): cur = bounds[i] if cur < prev: return False prev = cur return True # File: pandas-main/pandas/core/_numba/kernels/sum_.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any import numba from numba.extending import register_jitable import numpy as np if TYPE_CHECKING: from pandas._typing import npt from pandas.core._numba.kernels.shared import is_monotonic_increasing @numba.jit(nopython=True, nogil=True, parallel=False) def add_sum(val: Any, nobs: int, sum_x: Any, compensation: Any, num_consecutive_same_value: int, prev_value: Any) -> tuple[int, Any, Any, int, Any]: if not np.isnan(val): nobs += 1 y = val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t if val == prev_value: num_consecutive_same_value += 1 else: num_consecutive_same_value = 1 prev_value = val return (nobs, sum_x, compensation, num_consecutive_same_value, prev_value) @numba.jit(nopython=True, nogil=True, parallel=False) def remove_sum(val: Any, nobs: int, sum_x: Any, compensation: Any) -> tuple[int, Any, Any]: if not np.isnan(val): nobs -= 1 y = -val - compensation t = sum_x + y compensation = t - sum_x - y sum_x = t return (nobs, sum_x, compensation) @numba.jit(nopython=True, nogil=True, parallel=False) def sliding_sum(values: np.ndarray, result_dtype: np.dtype, start: np.ndarray, end: np.ndarray, min_periods: int) -> tuple[np.ndarray, list[int]]: dtype = values.dtype na_val: object = np.nan if dtype.kind == 'i': na_val = 0 N = len(start) nobs = 0 sum_x = 0 compensation_add = 0 compensation_remove = 0 na_pos = [] is_monotonic_increasing_bounds = is_monotonic_increasing(start) and is_monotonic_increasing(end) output = np.empty(N, dtype=result_dtype) for i in range(N): s = start[i] e = end[i] if i == 0 or not is_monotonic_increasing_bounds: prev_value = values[s] num_consecutive_same_value = 0 for j in range(s, e): val = values[j] (nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value) = add_sum(val, nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value) else: for j in range(start[i - 1], s): val = values[j] (nobs, sum_x, compensation_remove) = remove_sum(val, nobs, sum_x, compensation_remove) for j in range(end[i - 1], e): val = values[j] (nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value) = add_sum(val, nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value) if nobs == 0 == min_periods: result: object = 0 elif nobs >= min_periods: if num_consecutive_same_value >= nobs: result = prev_value * nobs else: result = sum_x else: result = na_val if dtype.kind == 'i': na_pos.append(i) output[i] = result if not is_monotonic_increasing_bounds: nobs = 0 sum_x = 0 compensation_remove = 0 return (output, na_pos) @register_jitable def grouped_kahan_sum(values: np.ndarray, result_dtype: np.dtype, labels: npt.NDArray[np.intp], ngroups: int) -> tuple[np.ndarray, npt.NDArray[np.int64], np.ndarray, npt.NDArray[np.int64], np.ndarray]: N = len(labels) nobs_arr = np.zeros(ngroups, dtype=np.int64) comp_arr = np.zeros(ngroups, dtype=values.dtype) consecutive_counts = np.zeros(ngroups, dtype=np.int64) prev_vals = np.zeros(ngroups, dtype=values.dtype) output = np.zeros(ngroups, dtype=result_dtype) for i in range(N): lab = labels[i] val = values[i] if lab < 0: continue sum_x = output[lab] nobs = nobs_arr[lab] compensation_add = comp_arr[lab] num_consecutive_same_value = consecutive_counts[lab] prev_value = prev_vals[lab] (nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value) = add_sum(val, nobs, sum_x, compensation_add, num_consecutive_same_value, prev_value) output[lab] = sum_x consecutive_counts[lab] = num_consecutive_same_value prev_vals[lab] = prev_value comp_arr[lab] = compensation_add nobs_arr[lab] = nobs return (output, nobs_arr, comp_arr, consecutive_counts, prev_vals) @numba.jit(nopython=True, nogil=True, parallel=False) def grouped_sum(values: np.ndarray, result_dtype: np.dtype, labels: npt.NDArray[np.intp], ngroups: int, min_periods: int) -> tuple[np.ndarray, list[int]]: na_pos = [] (output, nobs_arr, comp_arr, consecutive_counts, prev_vals) = grouped_kahan_sum(values, result_dtype, labels, ngroups) for lab in range(ngroups): nobs = nobs_arr[lab] num_consecutive_same_value = consecutive_counts[lab] prev_value = prev_vals[lab] sum_x = output[lab] if nobs >= min_periods: if num_consecutive_same_value >= nobs: result = prev_value * nobs else: result = sum_x else: result = sum_x na_pos.append(lab) output[lab] = result return (output, na_pos) # File: pandas-main/pandas/core/_numba/kernels/var_.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numba import numpy as np if TYPE_CHECKING: from pandas._typing import npt from pandas.core._numba.kernels.shared import is_monotonic_increasing @numba.jit(nopython=True, nogil=True, parallel=False) def add_var(val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float, num_consecutive_same_value: int, prev_value: float) -> tuple[int, float, float, float, int, float]: if not np.isnan(val): if val == prev_value: num_consecutive_same_value += 1 else: num_consecutive_same_value = 1 prev_value = val nobs += 1 prev_mean = mean_x - compensation y = val - compensation t = y - mean_x compensation = t + mean_x - y delta = t if nobs: mean_x += delta / nobs else: mean_x = 0 ssqdm_x += (val - prev_mean) * (val - mean_x) return (nobs, mean_x, ssqdm_x, compensation, num_consecutive_same_value, prev_value) @numba.jit(nopython=True, nogil=True, parallel=False) def remove_var(val: float, nobs: int, mean_x: float, ssqdm_x: float, compensation: float) -> tuple[int, float, float, float]: if not np.isnan(val): nobs -= 1 if nobs: prev_mean = mean_x - compensation y = val - compensation t = y - mean_x compensation = t + mean_x - y delta = t mean_x -= delta / nobs ssqdm_x -= (val - prev_mean) * (val - mean_x) else: mean_x = 0 ssqdm_x = 0 return (nobs, mean_x, ssqdm_x, compensation) @numba.jit(nopython=True, nogil=True, parallel=False) def sliding_var(values: np.ndarray, result_dtype: np.dtype, start: np.ndarray, end: np.ndarray, min_periods: int, ddof: int=1) -> tuple[np.ndarray, list[int]]: N = len(start) nobs = 0 mean_x = 0.0 ssqdm_x = 0.0 compensation_add = 0.0 compensation_remove = 0.0 min_periods = max(min_periods, 1) is_monotonic_increasing_bounds = is_monotonic_increasing(start) and is_monotonic_increasing(end) output = np.empty(N, dtype=result_dtype) for i in range(N): s = start[i] e = end[i] if i == 0 or not is_monotonic_increasing_bounds: prev_value = values[s] num_consecutive_same_value = 0 for j in range(s, e): val = values[j] (nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value) = add_var(val, nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value) else: for j in range(start[i - 1], s): val = values[j] (nobs, mean_x, ssqdm_x, compensation_remove) = remove_var(val, nobs, mean_x, ssqdm_x, compensation_remove) for j in range(end[i - 1], e): val = values[j] (nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value) = add_var(val, nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value) if nobs >= min_periods and nobs > ddof: if nobs == 1 or num_consecutive_same_value >= nobs: result = 0.0 else: result = ssqdm_x / (nobs - ddof) else: result = np.nan output[i] = result if not is_monotonic_increasing_bounds: nobs = 0 mean_x = 0.0 ssqdm_x = 0.0 compensation_remove = 0.0 na_pos = [0 for i in range(0)] return (output, na_pos) @numba.jit(nopython=True, nogil=True, parallel=False) def grouped_var(values: np.ndarray, result_dtype: np.dtype, labels: npt.NDArray[np.intp], ngroups: int, min_periods: int, ddof: int=1) -> tuple[np.ndarray, list[int]]: N = len(labels) nobs_arr = np.zeros(ngroups, dtype=np.int64) comp_arr = np.zeros(ngroups, dtype=values.dtype) consecutive_counts = np.zeros(ngroups, dtype=np.int64) prev_vals = np.zeros(ngroups, dtype=values.dtype) output = np.zeros(ngroups, dtype=result_dtype) means = np.zeros(ngroups, dtype=result_dtype) for i in range(N): lab = labels[i] val = values[i] if lab < 0: continue mean_x = means[lab] ssqdm_x = output[lab] nobs = nobs_arr[lab] compensation_add = comp_arr[lab] num_consecutive_same_value = consecutive_counts[lab] prev_value = prev_vals[lab] (nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value) = add_var(val, nobs, mean_x, ssqdm_x, compensation_add, num_consecutive_same_value, prev_value) output[lab] = ssqdm_x means[lab] = mean_x consecutive_counts[lab] = num_consecutive_same_value prev_vals[lab] = prev_value comp_arr[lab] = compensation_add nobs_arr[lab] = nobs for lab in range(ngroups): nobs = nobs_arr[lab] num_consecutive_same_value = consecutive_counts[lab] ssqdm_x = output[lab] if nobs >= min_periods and nobs > ddof: if nobs == 1 or num_consecutive_same_value >= nobs: result = 0.0 else: result = ssqdm_x / (nobs - ddof) else: result = np.nan output[lab] = result na_pos = [0 for i in range(0)] return (output, na_pos) # File: pandas-main/pandas/core/accessor.py """""" from __future__ import annotations from typing import TYPE_CHECKING, final import warnings from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import TypeT from pandas import Index from pandas.core.generic import NDFrame class DirNamesMixin: _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset() @final def _dir_deletions(self) -> set[str]: return self._accessors | self._hidden_attrs def _dir_additions(self) -> set[str]: return {accessor for accessor in self._accessors if hasattr(self, accessor)} def __dir__(self) -> list[str]: rv = set(super().__dir__()) rv = rv - self._dir_deletions() | self._dir_additions() return sorted(rv) class PandasDelegate: def _delegate_property_get(self, name: str, *args, **kwargs): raise TypeError(f'You cannot access the property {name}') def _delegate_property_set(self, name: str, value, *args, **kwargs) -> None: raise TypeError(f'The property {name} cannot be set') def _delegate_method(self, name: str, *args, **kwargs): raise TypeError(f'You cannot call method {name}') @classmethod def _add_delegate_accessors(cls, delegate, accessors: list[str], typ: str, overwrite: bool=False, accessor_mapping: Callable[[str], str]=lambda x: x, raise_on_missing: bool=True) -> None: def _create_delegator_property(name: str): def _getter(self): return self._delegate_property_get(name) def _setter(self, new_values): return self._delegate_property_set(name, new_values) _getter.__name__ = name _setter.__name__ = name return property(fget=_getter, fset=_setter, doc=getattr(delegate, accessor_mapping(name)).__doc__) def _create_delegator_method(name: str): def f(self, *args, **kwargs): return self._delegate_method(name, *args, **kwargs) f.__name__ = name f.__doc__ = getattr(delegate, accessor_mapping(name)).__doc__ return f for name in accessors: if not raise_on_missing and getattr(delegate, accessor_mapping(name), None) is None: continue if typ == 'property': f = _create_delegator_property(name) else: f = _create_delegator_method(name) if overwrite or not hasattr(cls, name): setattr(cls, name, f) def delegate_names(delegate, accessors: list[str], typ: str, overwrite: bool=False, accessor_mapping: Callable[[str], str]=lambda x: x, raise_on_missing: bool=True): def add_delegate_accessors(cls): cls._add_delegate_accessors(delegate, accessors, typ, overwrite=overwrite, accessor_mapping=accessor_mapping, raise_on_missing=raise_on_missing) return cls return add_delegate_accessors class Accessor: def __init__(self, name: str, accessor) -> None: self._name = name self._accessor = accessor def __get__(self, obj, cls): if obj is None: return self._accessor return self._accessor(obj) CachedAccessor = Accessor @doc(klass='', examples='', others='') def _register_accessor(name: str, cls: type[NDFrame | Index]) -> Callable[[TypeT], TypeT]: def decorator(accessor: TypeT) -> TypeT: if hasattr(cls, name): warnings.warn(f'registration of accessor {accessor!r} under name {name!r} for type {cls!r} is overriding a preexisting attribute with the same name.', UserWarning, stacklevel=find_stack_level()) setattr(cls, name, Accessor(name, accessor)) cls._accessors.add(name) return accessor return decorator _register_df_examples = '\nAn accessor that only accepts integers could\nhave a class defined like this:\n\n>>> @pd.api.extensions.register_dataframe_accessor("int_accessor")\n... class IntAccessor:\n... def __init__(self, pandas_obj):\n... if not all(pandas_obj[col].dtype == \'int64\' for col in pandas_obj.columns):\n... raise AttributeError("All columns must contain integer values only")\n... self._obj = pandas_obj\n...\n... def sum(self):\n... return self._obj.sum()\n...\n>>> df = pd.DataFrame([[1, 2], [\'x\', \'y\']])\n>>> df.int_accessor\nTraceback (most recent call last):\n...\nAttributeError: All columns must contain integer values only.\n>>> df = pd.DataFrame([[1, 2], [3, 4]])\n>>> df.int_accessor.sum()\n0 4\n1 6\ndtype: int64' @doc(_register_accessor, klass='DataFrame', examples=_register_df_examples) def register_dataframe_accessor(name: str) -> Callable[[TypeT], TypeT]: from pandas import DataFrame return _register_accessor(name, DataFrame) _register_series_examples = '\nAn accessor that only accepts integers could\nhave a class defined like this:\n\n>>> @pd.api.extensions.register_series_accessor("int_accessor")\n... class IntAccessor:\n... def __init__(self, pandas_obj):\n... if not pandas_obj.dtype == \'int64\':\n... raise AttributeError("The series must contain integer data only")\n... self._obj = pandas_obj\n...\n... def sum(self):\n... return self._obj.sum()\n...\n>>> df = pd.Series([1, 2, \'x\'])\n>>> df.int_accessor\nTraceback (most recent call last):\n...\nAttributeError: The series must contain integer data only.\n>>> df = pd.Series([1, 2, 3])\n>>> df.int_accessor.sum()\n6' @doc(_register_accessor, klass='Series', examples=_register_series_examples) def register_series_accessor(name: str) -> Callable[[TypeT], TypeT]: from pandas import Series return _register_accessor(name, Series) _register_index_examples = '\nAn accessor that only accepts integers could\nhave a class defined like this:\n\n>>> @pd.api.extensions.register_index_accessor("int_accessor")\n... class IntAccessor:\n... def __init__(self, pandas_obj):\n... if not all(isinstance(x, int) for x in pandas_obj):\n... raise AttributeError("The index must only be an integer value")\n... self._obj = pandas_obj\n...\n... def even(self):\n... return [x for x in self._obj if x % 2 == 0]\n>>> df = pd.DataFrame.from_dict(\n... {"row1": {"1": 1, "2": "a"}, "row2": {"1": 2, "2": "b"}}, orient="index"\n... )\n>>> df.index.int_accessor\nTraceback (most recent call last):\n...\nAttributeError: The index must only be an integer value.\n>>> df = pd.DataFrame(\n... {"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]}, index=[1, 2, 5, 8]\n... )\n>>> df.index.int_accessor.even()\n[2, 8]' @doc(_register_accessor, klass='Index', examples=_register_index_examples) def register_index_accessor(name: str) -> Callable[[TypeT], TypeT]: from pandas import Index return _register_accessor(name, Index) # File: pandas-main/pandas/core/algorithms.py """""" from __future__ import annotations import decimal import operator from textwrap import dedent from typing import TYPE_CHECKING, Literal, cast import warnings import numpy as np from pandas._libs import algos, hashtable as htable, iNaT, lib from pandas._typing import AnyArrayLike, ArrayLike, ArrayLikeT, AxisInt, DtypeObj, TakeIndexer, npt from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike, np_find_common_type from pandas.core.dtypes.common import ensure_float64, ensure_object, ensure_platform_int, is_bool_dtype, is_complex_dtype, is_dict_like, is_extension_array_dtype, is_float, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_object_dtype, is_signed_integer_dtype, needs_i8_conversion from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import BaseMaskedDtype, CategoricalDtype, ExtensionDtype, NumpyEADtype from pandas.core.dtypes.generic import ABCDatetimeArray, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCNumpyExtensionArray, ABCSeries, ABCTimedeltaArray from pandas.core.dtypes.missing import isna, na_value_for_dtype from pandas.core.array_algos.take import take_nd from pandas.core.construction import array as pd_array, ensure_wrapped_if_datetimelike, extract_array from pandas.core.indexers import validate_indices if TYPE_CHECKING: from pandas._typing import ListLike, NumpySorter, NumpyValueArrayLike from pandas import Categorical, Index, Series from pandas.core.arrays import BaseMaskedArray, ExtensionArray def _ensure_data(values: ArrayLike) -> np.ndarray: if not isinstance(values, ABCMultiIndex): values = extract_array(values, extract_numpy=True) if is_object_dtype(values.dtype): return ensure_object(np.asarray(values)) elif isinstance(values.dtype, BaseMaskedDtype): values = cast('BaseMaskedArray', values) if not values._hasna: return _ensure_data(values._data) return np.asarray(values) elif isinstance(values.dtype, CategoricalDtype): values = cast('Categorical', values) return values.codes elif is_bool_dtype(values.dtype): if isinstance(values, np.ndarray): return np.asarray(values).view('uint8') else: return np.asarray(values).astype('uint8', copy=False) elif is_integer_dtype(values.dtype): return np.asarray(values) elif is_float_dtype(values.dtype): if values.dtype.itemsize in [2, 12, 16]: return ensure_float64(values) return np.asarray(values) elif is_complex_dtype(values.dtype): return cast(np.ndarray, values) elif needs_i8_conversion(values.dtype): npvalues = values.view('i8') npvalues = cast(np.ndarray, npvalues) return npvalues values = np.asarray(values, dtype=object) return ensure_object(values) def _reconstruct_data(values: ArrayLikeT, dtype: DtypeObj, original: AnyArrayLike) -> ArrayLikeT: if isinstance(values, ABCExtensionArray) and values.dtype == dtype: return values if not isinstance(dtype, np.dtype): cls = dtype.construct_array_type() values = cls._from_sequence(values, dtype=dtype) else: values = values.astype(dtype, copy=False) return values def _ensure_arraylike(values, func_name: str) -> ArrayLike: if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray, ABCNumpyExtensionArray)): if func_name != 'isin-targets': raise TypeError(f'{func_name} requires a Series, Index, ExtensionArray, np.ndarray or NumpyExtensionArray got {type(values).__name__}.') inferred = lib.infer_dtype(values, skipna=False) if inferred in ['mixed', 'string', 'mixed-integer']: if isinstance(values, tuple): values = list(values) values = construct_1d_object_array_from_listlike(values) else: values = np.asarray(values) return values _hashtables = {'complex128': htable.Complex128HashTable, 'complex64': htable.Complex64HashTable, 'float64': htable.Float64HashTable, 'float32': htable.Float32HashTable, 'uint64': htable.UInt64HashTable, 'uint32': htable.UInt32HashTable, 'uint16': htable.UInt16HashTable, 'uint8': htable.UInt8HashTable, 'int64': htable.Int64HashTable, 'int32': htable.Int32HashTable, 'int16': htable.Int16HashTable, 'int8': htable.Int8HashTable, 'string': htable.StringHashTable, 'object': htable.PyObjectHashTable} def _get_hashtable_algo(values: np.ndarray) -> tuple[type[htable.HashTable], np.ndarray]: values = _ensure_data(values) ndtype = _check_object_for_strings(values) hashtable = _hashtables[ndtype] return (hashtable, values) def _check_object_for_strings(values: np.ndarray) -> str: ndtype = values.dtype.name if ndtype == 'object': if lib.is_string_array(values, skipna=False): ndtype = 'string' return ndtype def unique(values): return unique_with_mask(values) def nunique_ints(values: ArrayLike) -> int: if len(values) == 0: return 0 values = _ensure_data(values) result = (np.bincount(values.ravel().astype('intp')) != 0).sum() return result def unique_with_mask(values, mask: npt.NDArray[np.bool_] | None=None): values = _ensure_arraylike(values, func_name='unique') if isinstance(values.dtype, ExtensionDtype): return values.unique() if isinstance(values, ABCIndex): return values.unique() original = values (hashtable, values) = _get_hashtable_algo(values) table = hashtable(len(values)) if mask is None: uniques = table.unique(values) uniques = _reconstruct_data(uniques, original.dtype, original) return uniques else: (uniques, mask) = table.unique(values, mask=mask) uniques = _reconstruct_data(uniques, original.dtype, original) assert mask is not None return (uniques, mask.astype('bool')) unique1d = unique _MINIMUM_COMP_ARR_LEN = 1000000 def isin(comps: ListLike, values: ListLike) -> npt.NDArray[np.bool_]: if not is_list_like(comps): raise TypeError(f'only list-like objects are allowed to be passed to isin(), you passed a `{type(comps).__name__}`') if not is_list_like(values): raise TypeError(f'only list-like objects are allowed to be passed to isin(), you passed a `{type(values).__name__}`') if not isinstance(values, (ABCIndex, ABCSeries, ABCExtensionArray, np.ndarray)): orig_values = list(values) values = _ensure_arraylike(orig_values, func_name='isin-targets') if len(values) > 0 and values.dtype.kind in 'iufcb' and (not is_signed_integer_dtype(comps)): values = construct_1d_object_array_from_listlike(orig_values) elif isinstance(values, ABCMultiIndex): values = np.array(values) else: values = extract_array(values, extract_numpy=True, extract_range=True) comps_array = _ensure_arraylike(comps, func_name='isin') comps_array = extract_array(comps_array, extract_numpy=True) if not isinstance(comps_array, np.ndarray): return comps_array.isin(values) elif needs_i8_conversion(comps_array.dtype): return pd_array(comps_array).isin(values) elif needs_i8_conversion(values.dtype) and (not is_object_dtype(comps_array.dtype)): return np.zeros(comps_array.shape, dtype=bool) elif needs_i8_conversion(values.dtype): return isin(comps_array, values.astype(object)) elif isinstance(values.dtype, ExtensionDtype): return isin(np.asarray(comps_array), np.asarray(values)) if len(comps_array) > _MINIMUM_COMP_ARR_LEN and len(values) <= 26 and (comps_array.dtype != object): if isna(values).any(): def f(c, v): return np.logical_or(np.isin(c, v).ravel(), np.isnan(c)) else: f = lambda a, b: np.isin(a, b).ravel() else: common = np_find_common_type(values.dtype, comps_array.dtype) values = values.astype(common, copy=False) comps_array = comps_array.astype(common, copy=False) f = htable.ismember return f(comps_array, values) def factorize_array(values: np.ndarray, use_na_sentinel: bool=True, size_hint: int | None=None, na_value: object=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[npt.NDArray[np.intp], np.ndarray]: original = values if values.dtype.kind in 'mM': na_value = iNaT (hash_klass, values) = _get_hashtable_algo(values) table = hash_klass(size_hint or len(values)) (uniques, codes) = table.factorize(values, na_sentinel=-1, na_value=na_value, mask=mask, ignore_na=use_na_sentinel) uniques = _reconstruct_data(uniques, original.dtype, original) codes = ensure_platform_int(codes) return (codes, uniques) @doc(values=dedent(" values : sequence\n A 1-D sequence. Sequences that aren't pandas objects are\n coerced to ndarrays before factorization.\n "), sort=dedent(' sort : bool, default False\n Sort `uniques` and shuffle `codes` to maintain the\n relationship.\n '), size_hint=dedent(' size_hint : int, optional\n Hint to the hashtable sizer.\n ')) def factorize(values, sort: bool=False, use_na_sentinel: bool=True, size_hint: int | None=None) -> tuple[np.ndarray, np.ndarray | Index]: if isinstance(values, (ABCIndex, ABCSeries)): return values.factorize(sort=sort, use_na_sentinel=use_na_sentinel) values = _ensure_arraylike(values, func_name='factorize') original = values if isinstance(values, (ABCDatetimeArray, ABCTimedeltaArray)) and values.freq is not None: (codes, uniques) = values.factorize(sort=sort) return (codes, uniques) elif not isinstance(values, np.ndarray): (codes, uniques) = values.factorize(use_na_sentinel=use_na_sentinel) else: values = np.asarray(values) if not use_na_sentinel and values.dtype == object: null_mask = isna(values) if null_mask.any(): na_value = na_value_for_dtype(values.dtype, compat=False) values = np.where(null_mask, na_value, values) (codes, uniques) = factorize_array(values, use_na_sentinel=use_na_sentinel, size_hint=size_hint) if sort and len(uniques) > 0: (uniques, codes) = safe_sort(uniques, codes, use_na_sentinel=use_na_sentinel, assume_unique=True, verify=False) uniques = _reconstruct_data(uniques, original.dtype, original) return (codes, uniques) def value_counts_internal(values, sort: bool=True, ascending: bool=False, normalize: bool=False, bins=None, dropna: bool=True) -> Series: from pandas import Index, Series index_name = getattr(values, 'name', None) name = 'proportion' if normalize else 'count' if bins is not None: from pandas.core.reshape.tile import cut if isinstance(values, Series): values = values._values try: ii = cut(values, bins, include_lowest=True) except TypeError as err: raise TypeError('bins argument only works with numeric data.') from err result = ii.value_counts(dropna=dropna) result.name = name result = result[result.index.notna()] result.index = result.index.astype('interval') result = result.sort_index() if dropna and (result._values == 0).all(): result = result.iloc[0:0] counts = np.array([len(ii)]) elif is_extension_array_dtype(values): result = Series(values, copy=False)._values.value_counts(dropna=dropna) result.name = name result.index.name = index_name counts = result._values if not isinstance(counts, np.ndarray): counts = np.asarray(counts) elif isinstance(values, ABCMultiIndex): levels = list(range(values.nlevels)) result = Series(index=values, name=name).groupby(level=levels, dropna=dropna).size() result.index.names = values.names counts = result._values else: values = _ensure_arraylike(values, func_name='value_counts') (keys, counts, _) = value_counts_arraylike(values, dropna) if keys.dtype == np.float16: keys = keys.astype(np.float32) idx = Index(keys, dtype=keys.dtype, name=index_name) result = Series(counts, index=idx, name=name, copy=False) if sort: result = result.sort_values(ascending=ascending) if normalize: result = result / counts.sum() return result def value_counts_arraylike(values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None=None) -> tuple[ArrayLike, npt.NDArray[np.int64], int]: original = values values = _ensure_data(values) (keys, counts, na_counter) = htable.value_count(values, dropna, mask=mask) if needs_i8_conversion(original.dtype): if dropna: mask = keys != iNaT (keys, counts) = (keys[mask], counts[mask]) res_keys = _reconstruct_data(keys, original.dtype, original) return (res_keys, counts, na_counter) def duplicated(values: ArrayLike, keep: Literal['first', 'last', False]='first', mask: npt.NDArray[np.bool_] | None=None) -> npt.NDArray[np.bool_]: values = _ensure_data(values) return htable.duplicated(values, keep=keep, mask=mask) def mode(values: ArrayLike, dropna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> ArrayLike: values = _ensure_arraylike(values, func_name='mode') original = values if needs_i8_conversion(values.dtype): values = ensure_wrapped_if_datetimelike(values) values = cast('ExtensionArray', values) return values._mode(dropna=dropna) values = _ensure_data(values) (npresult, res_mask) = htable.mode(values, dropna=dropna, mask=mask) if res_mask is not None: return (npresult, res_mask) try: npresult = np.sort(npresult) except TypeError as err: warnings.warn(f'Unable to sort modes: {err}', stacklevel=find_stack_level()) result = _reconstruct_data(npresult, original.dtype, original) return result def rank(values: ArrayLike, axis: AxisInt=0, method: str='average', na_option: str='keep', ascending: bool=True, pct: bool=False) -> npt.NDArray[np.float64]: is_datetimelike = needs_i8_conversion(values.dtype) values = _ensure_data(values) if values.ndim == 1: ranks = algos.rank_1d(values, is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) elif values.ndim == 2: ranks = algos.rank_2d(values, axis=axis, is_datetimelike=is_datetimelike, ties_method=method, ascending=ascending, na_option=na_option, pct=pct) else: raise TypeError('Array with ndim > 2 are not supported.') return ranks def take(arr, indices: TakeIndexer, axis: AxisInt=0, allow_fill: bool=False, fill_value=None): if not isinstance(arr, (np.ndarray, ABCExtensionArray, ABCIndex, ABCSeries, ABCNumpyExtensionArray)): raise TypeError(f'pd.api.extensions.take requires a numpy.ndarray, ExtensionArray, Index, Series, or NumpyExtensionArray got {type(arr).__name__}.') indices = ensure_platform_int(indices) if allow_fill: validate_indices(indices, arr.shape[axis]) result = take_nd(arr, indices, axis=axis, allow_fill=True, fill_value=fill_value) else: result = arr.take(indices, axis=axis) return result def searchsorted(arr: ArrayLike, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: if sorter is not None: sorter = ensure_platform_int(sorter) if isinstance(arr, np.ndarray) and arr.dtype.kind in 'iu' and (is_integer(value) or is_integer_dtype(value)): iinfo = np.iinfo(arr.dtype.type) value_arr = np.array([value]) if is_integer(value) else np.array(value) if (value_arr >= iinfo.min).all() and (value_arr <= iinfo.max).all(): dtype = arr.dtype else: dtype = value_arr.dtype if is_integer(value): value = cast(int, dtype.type(value)) else: value = pd_array(cast(ArrayLike, value), dtype=dtype) else: arr = ensure_wrapped_if_datetimelike(arr) return arr.searchsorted(value, side=side, sorter=sorter) _diff_special = {'float64', 'float32', 'int64', 'int32', 'int16', 'int8'} def diff(arr, n: int, axis: AxisInt=0): if not lib.is_integer(n): if not (is_float(n) and n.is_integer()): raise ValueError('periods must be an integer') n = int(n) na = np.nan dtype = arr.dtype is_bool = is_bool_dtype(dtype) if is_bool: op = operator.xor else: op = operator.sub if isinstance(dtype, NumpyEADtype): arr = arr.to_numpy() dtype = arr.dtype if not isinstance(arr, np.ndarray): if hasattr(arr, f'__{op.__name__}__'): if axis != 0: raise ValueError(f'cannot diff {type(arr).__name__} on axis={axis}') return op(arr, arr.shift(n)) else: raise TypeError(f"{type(arr).__name__} has no 'diff' method. Convert to a suitable dtype prior to calling 'diff'.") is_timedelta = False if arr.dtype.kind in 'mM': dtype = np.int64 arr = arr.view('i8') na = iNaT is_timedelta = True elif is_bool: dtype = np.object_ elif dtype.kind in 'iu': if arr.dtype.name in ['int8', 'int16']: dtype = np.float32 else: dtype = np.float64 orig_ndim = arr.ndim if orig_ndim == 1: arr = arr.reshape(-1, 1) dtype = np.dtype(dtype) out_arr = np.empty(arr.shape, dtype=dtype) na_indexer = [slice(None)] * 2 na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None) out_arr[tuple(na_indexer)] = na if arr.dtype.name in _diff_special: algos.diff_2d(arr, out_arr, n, axis, datetimelike=is_timedelta) else: _res_indexer = [slice(None)] * 2 _res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n) res_indexer = tuple(_res_indexer) _lag_indexer = [slice(None)] * 2 _lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None) lag_indexer = tuple(_lag_indexer) out_arr[res_indexer] = op(arr[res_indexer], arr[lag_indexer]) if is_timedelta: out_arr = out_arr.view('timedelta64[ns]') if orig_ndim == 1: out_arr = out_arr[:, 0] return out_arr def safe_sort(values: Index | ArrayLike, codes: npt.NDArray[np.intp] | None=None, use_na_sentinel: bool=True, assume_unique: bool=False, verify: bool=True) -> AnyArrayLike | tuple[AnyArrayLike, np.ndarray]: if not isinstance(values, (np.ndarray, ABCExtensionArray, ABCIndex)): raise TypeError('Only np.ndarray, ExtensionArray, and Index objects are allowed to be passed to safe_sort as values') sorter = None ordered: AnyArrayLike if not isinstance(values.dtype, ExtensionDtype) and lib.infer_dtype(values, skipna=False) == 'mixed-integer': ordered = _sort_mixed(values) else: try: sorter = values.argsort() ordered = values.take(sorter) except (TypeError, decimal.InvalidOperation): if values.size and isinstance(values[0], tuple): ordered = _sort_tuples(values) else: ordered = _sort_mixed(values) if codes is None: return ordered if not is_list_like(codes): raise TypeError('Only list-like objects or None are allowed to be passed to safe_sort as codes') codes = ensure_platform_int(np.asarray(codes)) if not assume_unique and (not len(unique(values)) == len(values)): raise ValueError('values should be unique if codes is not None') if sorter is None: (hash_klass, values) = _get_hashtable_algo(values) t = hash_klass(len(values)) t.map_locations(values) sorter = ensure_platform_int(t.lookup(ordered)) if use_na_sentinel: order2 = sorter.argsort() if verify: mask = (codes < -len(values)) | (codes >= len(values)) codes[mask] = -1 new_codes = take_nd(order2, codes, fill_value=-1) else: reverse_indexer = np.empty(len(sorter), dtype=int) reverse_indexer.put(sorter, np.arange(len(sorter))) new_codes = reverse_indexer.take(codes, mode='wrap') return (ordered, ensure_platform_int(new_codes)) def _sort_mixed(values) -> AnyArrayLike: str_pos = np.array([isinstance(x, str) for x in values], dtype=bool) null_pos = np.array([isna(x) for x in values], dtype=bool) num_pos = ~str_pos & ~null_pos str_argsort = np.argsort(values[str_pos]) num_argsort = np.argsort(values[num_pos]) str_locs = str_pos.nonzero()[0].take(str_argsort) num_locs = num_pos.nonzero()[0].take(num_argsort) null_locs = null_pos.nonzero()[0] locs = np.concatenate([num_locs, str_locs, null_locs]) return values.take(locs) def _sort_tuples(values: np.ndarray) -> np.ndarray: from pandas.core.internals.construction import to_arrays from pandas.core.sorting import lexsort_indexer (arrays, _) = to_arrays(values, None) indexer = lexsort_indexer(arrays, orders=True) return values[indexer] def union_with_duplicates(lvals: ArrayLike | Index, rvals: ArrayLike | Index) -> ArrayLike | Index: from pandas import Series l_count = value_counts_internal(lvals, dropna=False) r_count = value_counts_internal(rvals, dropna=False) (l_count, r_count) = l_count.align(r_count, fill_value=0) final_count = np.maximum(l_count.values, r_count.values) final_count = Series(final_count, index=l_count.index, dtype='int', copy=False) if isinstance(lvals, ABCMultiIndex) and isinstance(rvals, ABCMultiIndex): unique_vals = lvals.append(rvals).unique() else: if isinstance(lvals, ABCIndex): lvals = lvals._values if isinstance(rvals, ABCIndex): rvals = rvals._values combined = concat_compat([lvals, rvals]) unique_vals = unique(combined) unique_vals = ensure_wrapped_if_datetimelike(unique_vals) repeats = final_count.reindex(unique_vals).values return np.repeat(unique_vals, repeats) def map_array(arr: ArrayLike, mapper, na_action: Literal['ignore'] | None=None) -> np.ndarray | ExtensionArray | Index: if na_action not in (None, 'ignore'): msg = f"na_action must either be 'ignore' or None, {na_action} was passed" raise ValueError(msg) if is_dict_like(mapper): if isinstance(mapper, dict) and hasattr(mapper, '__missing__'): dict_with_default = mapper mapper = lambda x: dict_with_default[np.nan if isinstance(x, float) and np.isnan(x) else x] else: from pandas import Series if len(mapper) == 0: mapper = Series(mapper, dtype=np.float64) else: mapper = Series(mapper) if isinstance(mapper, ABCSeries): if na_action == 'ignore': mapper = mapper[mapper.index.notna()] indexer = mapper.index.get_indexer(arr) new_values = take_nd(mapper._values, indexer) return new_values if not len(arr): return arr.copy() values = arr.astype(object, copy=False) if na_action is None: return lib.map_infer(values, mapper) else: return lib.map_infer_mask(values, mapper, mask=isna(values).view(np.uint8)) # File: pandas-main/pandas/core/api.py from pandas._libs import NaT, Period, Timedelta, Timestamp from pandas._libs.missing import NA from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype from pandas.core.dtypes.missing import isna, isnull, notna, notnull from pandas.core.algorithms import factorize, unique from pandas.core.arrays import Categorical from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.floating import Float32Dtype, Float64Dtype from pandas.core.arrays.integer import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype, UInt8Dtype, UInt16Dtype, UInt32Dtype, UInt64Dtype from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import array from pandas.core.flags import Flags from pandas.core.groupby import Grouper, NamedAgg from pandas.core.indexes.api import CategoricalIndex, DatetimeIndex, Index, IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, TimedeltaIndex from pandas.core.indexes.datetimes import bdate_range, date_range from pandas.core.indexes.interval import Interval, interval_range from pandas.core.indexes.period import period_range from pandas.core.indexes.timedeltas import timedelta_range from pandas.core.indexing import IndexSlice from pandas.core.series import Series from pandas.core.tools.datetimes import to_datetime from pandas.core.tools.numeric import to_numeric from pandas.core.tools.timedeltas import to_timedelta from pandas.io.formats.format import set_eng_float_format from pandas.tseries.offsets import DateOffset from pandas.core.frame import DataFrame __all__ = ['array', 'ArrowDtype', 'bdate_range', 'BooleanDtype', 'Categorical', 'CategoricalDtype', 'CategoricalIndex', 'DataFrame', 'DateOffset', 'date_range', 'DatetimeIndex', 'DatetimeTZDtype', 'factorize', 'Flags', 'Float32Dtype', 'Float64Dtype', 'Grouper', 'Index', 'IndexSlice', 'Int16Dtype', 'Int32Dtype', 'Int64Dtype', 'Int8Dtype', 'Interval', 'IntervalDtype', 'IntervalIndex', 'interval_range', 'isna', 'isnull', 'MultiIndex', 'NA', 'NamedAgg', 'NaT', 'notna', 'notnull', 'Period', 'PeriodDtype', 'PeriodIndex', 'period_range', 'RangeIndex', 'Series', 'set_eng_float_format', 'StringDtype', 'Timedelta', 'TimedeltaIndex', 'timedelta_range', 'Timestamp', 'to_datetime', 'to_numeric', 'to_timedelta', 'UInt16Dtype', 'UInt32Dtype', 'UInt64Dtype', 'UInt8Dtype', 'unique'] # File: pandas-main/pandas/core/apply.py from __future__ import annotations import abc from collections import defaultdict from collections.abc import Callable import functools from functools import partial import inspect from typing import TYPE_CHECKING, Any, Literal, cast import numpy as np from pandas._libs.internals import BlockValuesRefs from pandas._typing import AggFuncType, AggFuncTypeBase, AggFuncTypeDict, AggObjType, Axis, AxisInt, NDFrameT, npt from pandas.compat._optional import import_optional_dependency from pandas.errors import SpecificationError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import is_nested_object from pandas.core.dtypes.common import is_dict_like, is_extension_array_dtype, is_list_like, is_numeric_dtype, is_sequence from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCNDFrame, ABCSeries from pandas.core._numba.executor import generate_apply_looper import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.util.numba_ import get_jit_arguments, prepare_function_arguments if TYPE_CHECKING: from collections.abc import Generator, Hashable, Iterable, MutableMapping, Sequence from pandas import DataFrame, Index, Series from pandas.core.groupby import GroupBy from pandas.core.resample import Resampler from pandas.core.window.rolling import BaseWindow ResType = dict[int, Any] def frame_apply(obj: DataFrame, func: AggFuncType, axis: Axis=0, raw: bool=False, result_type: str | None=None, by_row: Literal[False, 'compat']='compat', engine: str='python', engine_kwargs: dict[str, bool] | None=None, args=None, kwargs=None) -> FrameApply: (_, func, columns, _) = reconstruct_func(func, **kwargs) axis = obj._get_axis_number(axis) klass: type[FrameApply] if axis == 0: klass = FrameRowApply elif axis == 1: if columns: raise NotImplementedError(f'Named aggregation is not supported when axis={axis!r}.') klass = FrameColumnApply return klass(obj, func, raw=raw, result_type=result_type, by_row=by_row, engine=engine, engine_kwargs=engine_kwargs, args=args, kwargs=kwargs) class Apply(metaclass=abc.ABCMeta): axis: AxisInt def __init__(self, obj: AggObjType, func: AggFuncType, raw: bool, result_type: str | None, *, by_row: Literal[False, 'compat', '_compat']='compat', engine: str='python', engine_kwargs: dict[str, bool] | None=None, args, kwargs) -> None: self.obj = obj self.raw = raw assert by_row is False or by_row in ['compat', '_compat'] self.by_row = by_row self.args = args or () self.kwargs = kwargs or {} self.engine = engine self.engine_kwargs = {} if engine_kwargs is None else engine_kwargs if result_type not in [None, 'reduce', 'broadcast', 'expand']: raise ValueError("invalid value for result_type, must be one of {None, 'reduce', 'broadcast', 'expand'}") self.result_type = result_type self.func = func @abc.abstractmethod def apply(self) -> DataFrame | Series: pass @abc.abstractmethod def agg_or_apply_list_like(self, op_name: Literal['agg', 'apply']) -> DataFrame | Series: pass @abc.abstractmethod def agg_or_apply_dict_like(self, op_name: Literal['agg', 'apply']) -> DataFrame | Series: pass def agg(self) -> DataFrame | Series | None: func = self.func if isinstance(func, str): return self.apply_str() if is_dict_like(func): return self.agg_dict_like() elif is_list_like(func): return self.agg_list_like() return None def transform(self) -> DataFrame | Series: obj = self.obj func = self.func axis = self.axis args = self.args kwargs = self.kwargs is_series = obj.ndim == 1 if obj._get_axis_number(axis) == 1: assert not is_series return obj.T.transform(func, 0, *args, **kwargs).T if is_list_like(func) and (not is_dict_like(func)): func = cast(list[AggFuncTypeBase], func) if is_series: func = {com.get_callable_name(v) or v: v for v in func} else: func = {col: func for col in obj} if is_dict_like(func): func = cast(AggFuncTypeDict, func) return self.transform_dict_like(func) func = cast(AggFuncTypeBase, func) try: result = self.transform_str_or_callable(func) except TypeError: raise except Exception as err: raise ValueError('Transform function failed') from err if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty and (not obj.empty): raise ValueError('Transform function failed') if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(obj.index): raise ValueError('Function did not transform') return result def transform_dict_like(self, func) -> DataFrame: from pandas.core.reshape.concat import concat obj = self.obj args = self.args kwargs = self.kwargs assert isinstance(obj, ABCNDFrame) if len(func) == 0: raise ValueError('No transform functions were provided') func = self.normalize_dictlike_arg('transform', obj, func) results: dict[Hashable, DataFrame | Series] = {} for (name, how) in func.items(): colg = obj._gotitem(name, ndim=1) results[name] = colg.transform(how, 0, *args, **kwargs) return concat(results, axis=1) def transform_str_or_callable(self, func) -> DataFrame | Series: obj = self.obj args = self.args kwargs = self.kwargs if isinstance(func, str): return self._apply_str(obj, func, *args, **kwargs) try: return obj.apply(func, args=args, **kwargs) except Exception: return func(obj, *args, **kwargs) def agg_list_like(self) -> DataFrame | Series: return self.agg_or_apply_list_like(op_name='agg') def compute_list_like(self, op_name: Literal['agg', 'apply'], selected_obj: Series | DataFrame, kwargs: dict[str, Any]) -> tuple[list[Hashable] | Index, list[Any]]: func = cast(list[AggFuncTypeBase], self.func) obj = self.obj results = [] keys = [] if selected_obj.ndim == 1: for a in func: colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj) args = [self.axis, *self.args] if include_axis(op_name, colg) else self.args new_res = getattr(colg, op_name)(a, *args, **kwargs) results.append(new_res) name = com.get_callable_name(a) or a keys.append(name) else: indices = [] for (index, col) in enumerate(selected_obj): colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index]) args = [self.axis, *self.args] if include_axis(op_name, colg) else self.args new_res = getattr(colg, op_name)(func, *args, **kwargs) results.append(new_res) indices.append(index) keys = selected_obj.columns.take(indices) return (keys, results) def wrap_results_list_like(self, keys: Iterable[Hashable], results: list[Series | DataFrame]): from pandas.core.reshape.concat import concat obj = self.obj try: return concat(results, keys=keys, axis=1, sort=False) except TypeError as err: from pandas import Series result = Series(results, index=keys, name=obj.name) if is_nested_object(result): raise ValueError('cannot combine transform and aggregation operations') from err return result def agg_dict_like(self) -> DataFrame | Series: return self.agg_or_apply_dict_like(op_name='agg') def compute_dict_like(self, op_name: Literal['agg', 'apply'], selected_obj: Series | DataFrame, selection: Hashable | Sequence[Hashable], kwargs: dict[str, Any]) -> tuple[list[Hashable], list[Any]]: from pandas.core.groupby.generic import DataFrameGroupBy, SeriesGroupBy obj = self.obj is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) func = cast(AggFuncTypeDict, self.func) func = self.normalize_dictlike_arg(op_name, selected_obj, func) is_non_unique_col = selected_obj.ndim == 2 and selected_obj.columns.nunique() < len(selected_obj.columns) if selected_obj.ndim == 1: colg = obj._gotitem(selection, ndim=1) results = [getattr(colg, op_name)(how, **kwargs) for (_, how) in func.items()] keys = list(func.keys()) elif not is_groupby and is_non_unique_col: results = [] keys = [] for (key, how) in func.items(): indices = selected_obj.columns.get_indexer_for([key]) labels = selected_obj.columns.take(indices) label_to_indices = defaultdict(list) for (index, label) in zip(indices, labels): label_to_indices[label].append(index) key_data = [getattr(selected_obj._ixs(indice, axis=1), op_name)(how, **kwargs) for (label, indices) in label_to_indices.items() for indice in indices] keys += [key] * len(key_data) results += key_data elif is_groupby: df = selected_obj (results, keys) = ([], []) for (key, how) in func.items(): cols = df[key] if cols.ndim == 1: series = obj._gotitem(key, ndim=1, subset=cols) results.append(getattr(series, op_name)(how, **kwargs)) keys.append(key) else: for (_, col) in cols.items(): series = obj._gotitem(key, ndim=1, subset=col) results.append(getattr(series, op_name)(how, **kwargs)) keys.append(key) else: results = [getattr(obj._gotitem(key, ndim=1), op_name)(how, **kwargs) for (key, how) in func.items()] keys = list(func.keys()) return (keys, results) def wrap_results_dict_like(self, selected_obj: Series | DataFrame, result_index: list[Hashable], result_data: list): from pandas import Index from pandas.core.reshape.concat import concat obj = self.obj is_ndframe = [isinstance(r, ABCNDFrame) for r in result_data] if all(is_ndframe): results = [result for result in result_data if not result.empty] keys_to_use: Iterable[Hashable] keys_to_use = [k for (k, v) in zip(result_index, result_data) if not v.empty] if keys_to_use == []: keys_to_use = result_index results = result_data if selected_obj.ndim == 2: ktu = Index(keys_to_use) ktu._set_names(selected_obj.columns.names) keys_to_use = ktu axis: AxisInt = 0 if isinstance(obj, ABCSeries) else 1 result = concat(results, axis=axis, keys=keys_to_use) elif any(is_ndframe): raise ValueError('cannot perform both aggregation and transformation operations simultaneously') else: from pandas import Series if obj.ndim == 1: obj = cast('Series', obj) name = obj.name else: name = None result = Series(result_data, index=result_index, name=name) return result def apply_str(self) -> DataFrame | Series: func = cast(str, self.func) obj = self.obj from pandas.core.groupby.generic import DataFrameGroupBy, SeriesGroupBy method = getattr(obj, func, None) if callable(method): sig = inspect.getfullargspec(method) arg_names = (*sig.args, *sig.kwonlyargs) if self.axis != 0 and ('axis' not in arg_names or func in ('corrwith', 'skew')): raise ValueError(f'Operation {func} does not support axis=1') if 'axis' in arg_names and (not isinstance(obj, (SeriesGroupBy, DataFrameGroupBy))): self.kwargs['axis'] = self.axis return self._apply_str(obj, func, *self.args, **self.kwargs) def apply_list_or_dict_like(self) -> DataFrame | Series: if self.engine == 'numba': raise NotImplementedError("The 'numba' engine doesn't support list-like/dict likes of callables yet.") if self.axis == 1 and isinstance(self.obj, ABCDataFrame): return self.obj.T.apply(self.func, 0, args=self.args, **self.kwargs).T func = self.func kwargs = self.kwargs if is_dict_like(func): result = self.agg_or_apply_dict_like(op_name='apply') else: result = self.agg_or_apply_list_like(op_name='apply') result = reconstruct_and_relabel_result(result, func, **kwargs) return result def normalize_dictlike_arg(self, how: str, obj: DataFrame | Series, func: AggFuncTypeDict) -> AggFuncTypeDict: assert how in ('apply', 'agg', 'transform') if how == 'agg' and isinstance(obj, ABCSeries) and any((is_list_like(v) for (_, v) in func.items())) or any((is_dict_like(v) for (_, v) in func.items())): raise SpecificationError('nested renamer is not supported') if obj.ndim != 1: from pandas import Index cols = Index(list(func.keys())).difference(obj.columns, sort=True) if len(cols) > 0: raise KeyError(f'Label(s) {list(cols)} do not exist') aggregator_types = (list, tuple, dict) if any((isinstance(x, aggregator_types) for (_, x) in func.items())): new_func: AggFuncTypeDict = {} for (k, v) in func.items(): if not isinstance(v, aggregator_types): new_func[k] = [v] else: new_func[k] = v func = new_func return func def _apply_str(self, obj, func: str, *args, **kwargs): assert isinstance(func, str) if hasattr(obj, func): f = getattr(obj, func) if callable(f): return f(*args, **kwargs) assert len(args) == 0 assert not any((kwarg == 'axis' for kwarg in kwargs)) return f elif hasattr(np, func) and hasattr(obj, '__array__'): f = getattr(np, func) return f(obj, *args, **kwargs) else: msg = f"'{func}' is not a valid function for '{type(obj).__name__}' object" raise AttributeError(msg) class NDFrameApply(Apply): obj: DataFrame | Series @property def index(self) -> Index: return self.obj.index @property def agg_axis(self) -> Index: return self.obj._get_agg_axis(self.axis) def agg_or_apply_list_like(self, op_name: Literal['agg', 'apply']) -> DataFrame | Series: obj = self.obj kwargs = self.kwargs if op_name == 'apply': if isinstance(self, FrameApply): by_row = self.by_row elif isinstance(self, SeriesApply): by_row = '_compat' if self.by_row else False else: by_row = False kwargs = {**kwargs, 'by_row': by_row} if getattr(obj, 'axis', 0) == 1: raise NotImplementedError('axis other than 0 is not supported') (keys, results) = self.compute_list_like(op_name, obj, kwargs) result = self.wrap_results_list_like(keys, results) return result def agg_or_apply_dict_like(self, op_name: Literal['agg', 'apply']) -> DataFrame | Series: assert op_name in ['agg', 'apply'] obj = self.obj kwargs = {} if op_name == 'apply': by_row = '_compat' if self.by_row else False kwargs.update({'by_row': by_row}) if getattr(obj, 'axis', 0) == 1: raise NotImplementedError('axis other than 0 is not supported') selection = None (result_index, result_data) = self.compute_dict_like(op_name, obj, selection, kwargs) result = self.wrap_results_dict_like(obj, result_index, result_data) return result class FrameApply(NDFrameApply): obj: DataFrame def __init__(self, obj: AggObjType, func: AggFuncType, raw: bool, result_type: str | None, *, by_row: Literal[False, 'compat']=False, engine: str='python', engine_kwargs: dict[str, bool] | None=None, args, kwargs) -> None: if by_row is not False and by_row != 'compat': raise ValueError(f'by_row={by_row} not allowed') super().__init__(obj, func, raw, result_type, by_row=by_row, engine=engine, engine_kwargs=engine_kwargs, args=args, kwargs=kwargs) @property @abc.abstractmethod def result_index(self) -> Index: pass @property @abc.abstractmethod def result_columns(self) -> Index: pass @property @abc.abstractmethod def series_generator(self) -> Generator[Series, None, None]: pass @staticmethod @functools.cache @abc.abstractmethod def generate_numba_apply_func(func, nogil=True, nopython=True, parallel=False) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]: pass @abc.abstractmethod def apply_with_numba(self): pass def validate_values_for_numba(self) -> None: for (colname, dtype) in self.obj.dtypes.items(): if not is_numeric_dtype(dtype): raise ValueError(f"Column {colname} must have a numeric dtype. Found '{dtype}' instead") if is_extension_array_dtype(dtype): raise ValueError(f'Column {colname} is backed by an extension array, which is not supported by the numba engine.') @abc.abstractmethod def wrap_results_for_axis(self, results: ResType, res_index: Index) -> DataFrame | Series: pass @property def res_columns(self) -> Index: return self.result_columns @property def columns(self) -> Index: return self.obj.columns @cache_readonly def values(self): return self.obj.values def apply(self) -> DataFrame | Series: if is_list_like(self.func): if self.engine == 'numba': raise NotImplementedError("the 'numba' engine doesn't support lists of callables yet") return self.apply_list_or_dict_like() if len(self.columns) == 0 and len(self.index) == 0: return self.apply_empty_result() if isinstance(self.func, str): if self.engine == 'numba': raise NotImplementedError("the 'numba' engine doesn't support using a string as the callable function") return self.apply_str() elif isinstance(self.func, np.ufunc): if self.engine == 'numba': raise NotImplementedError("the 'numba' engine doesn't support using a numpy ufunc as the callable function") with np.errstate(all='ignore'): results = self.obj._mgr.apply('apply', func=self.func) return self.obj._constructor_from_mgr(results, axes=results.axes) if self.result_type == 'broadcast': if self.engine == 'numba': raise NotImplementedError("the 'numba' engine doesn't support result_type='broadcast'") return self.apply_broadcast(self.obj) elif not all(self.obj.shape): return self.apply_empty_result() elif self.raw: return self.apply_raw(engine=self.engine, engine_kwargs=self.engine_kwargs) return self.apply_standard() def agg(self): obj = self.obj axis = self.axis self.obj = self.obj if self.axis == 0 else self.obj.T self.axis = 0 result = None try: result = super().agg() finally: self.obj = obj self.axis = axis if axis == 1: result = result.T if result is not None else result if result is None: result = self.obj.apply(self.func, axis, args=self.args, **self.kwargs) return result def apply_empty_result(self): assert callable(self.func) if self.result_type not in ['reduce', None]: return self.obj.copy() should_reduce = self.result_type == 'reduce' from pandas import Series if not should_reduce: try: if self.axis == 0: r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs) else: r = self.func(Series(index=self.columns, dtype=np.float64), *self.args, **self.kwargs) except Exception: pass else: should_reduce = not isinstance(r, Series) if should_reduce: if len(self.agg_axis): r = self.func(Series([], dtype=np.float64), *self.args, **self.kwargs) else: r = np.nan return self.obj._constructor_sliced(r, index=self.agg_axis) else: return self.obj.copy() def apply_raw(self, engine='python', engine_kwargs=None): def wrap_function(func): def wrapper(*args, **kwargs): result = func(*args, **kwargs) if isinstance(result, str): result = np.array(result, dtype=object) return result return wrapper if engine == 'numba': (args, kwargs) = prepare_function_arguments(self.func, self.args, self.kwargs) nb_looper = generate_apply_looper(self.func, **get_jit_arguments(engine_kwargs, kwargs)) result = nb_looper(self.values, self.axis, *args) result = np.squeeze(result) else: result = np.apply_along_axis(wrap_function(self.func), self.axis, self.values, *self.args, **self.kwargs) if result.ndim == 2: return self.obj._constructor(result, index=self.index, columns=self.columns) else: return self.obj._constructor_sliced(result, index=self.agg_axis) def apply_broadcast(self, target: DataFrame) -> DataFrame: assert callable(self.func) result_values = np.empty_like(target.values) result_compare = target.shape[0] for (i, col) in enumerate(target.columns): res = self.func(target[col], *self.args, **self.kwargs) ares = np.asarray(res).ndim if ares > 1: raise ValueError('too many dims to broadcast') if ares == 1: if result_compare != len(res): raise ValueError('cannot broadcast result') result_values[:, i] = res result = self.obj._constructor(result_values, index=target.index, columns=target.columns) return result def apply_standard(self): if self.engine == 'python': (results, res_index) = self.apply_series_generator() else: (results, res_index) = self.apply_series_numba() return self.wrap_results(results, res_index) def apply_series_generator(self) -> tuple[ResType, Index]: assert callable(self.func) series_gen = self.series_generator res_index = self.result_index results = {} for (i, v) in enumerate(series_gen): results[i] = self.func(v, *self.args, **self.kwargs) if isinstance(results[i], ABCSeries): results[i] = results[i].copy(deep=False) return (results, res_index) def apply_series_numba(self): if self.engine_kwargs.get('parallel', False): raise NotImplementedError("Parallel apply is not supported when raw=False and engine='numba'") if not self.obj.index.is_unique or not self.columns.is_unique: raise NotImplementedError("The index/columns must be unique when raw=False and engine='numba'") self.validate_values_for_numba() results = self.apply_with_numba() return (results, self.result_index) def wrap_results(self, results: ResType, res_index: Index) -> DataFrame | Series: from pandas import Series if len(results) > 0 and 0 in results and is_sequence(results[0]): return self.wrap_results_for_axis(results, res_index) constructor_sliced = self.obj._constructor_sliced if len(results) == 0 and constructor_sliced is Series: result = constructor_sliced(results, dtype=np.float64) else: result = constructor_sliced(results) result.index = res_index return result def apply_str(self) -> DataFrame | Series: if self.func == 'size': obj = self.obj value = obj.shape[self.axis] return obj._constructor_sliced(value, index=self.agg_axis) return super().apply_str() class FrameRowApply(FrameApply): axis: AxisInt = 0 @property def series_generator(self) -> Generator[Series, None, None]: return (self.obj._ixs(i, axis=1) for i in range(len(self.columns))) @staticmethod @functools.cache def generate_numba_apply_func(func, nogil=True, nopython=True, parallel=False) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]: numba = import_optional_dependency('numba') from pandas import Series from pandas.core._numba.extensions import maybe_cast_str jitted_udf = numba.extending.register_jitable(func) @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel) def numba_func(values, col_names, df_index, *args): results = {} for j in range(values.shape[1]): ser = Series(values[:, j], index=df_index, name=maybe_cast_str(col_names[j])) results[j] = jitted_udf(ser, *args) return results return numba_func def apply_with_numba(self) -> dict[int, Any]: func = cast(Callable, self.func) (args, kwargs) = prepare_function_arguments(func, self.args, self.kwargs) nb_func = self.generate_numba_apply_func(func, **get_jit_arguments(self.engine_kwargs, kwargs)) from pandas.core._numba.extensions import set_numba_data index = self.obj.index if index.dtype == 'string': index = index.astype(object) columns = self.obj.columns if columns.dtype == 'string': columns = columns.astype(object) with set_numba_data(index) as index, set_numba_data(columns) as columns: res = dict(nb_func(self.values, columns, index, *args)) return res @property def result_index(self) -> Index: return self.columns @property def result_columns(self) -> Index: return self.index def wrap_results_for_axis(self, results: ResType, res_index: Index) -> DataFrame | Series: if self.result_type == 'reduce': res = self.obj._constructor_sliced(results) res.index = res_index return res elif self.result_type is None and all((isinstance(x, dict) for x in results.values())): res = self.obj._constructor_sliced(results) res.index = res_index return res try: result = self.obj._constructor(data=results) except ValueError as err: if 'All arrays must be of the same length' in str(err): res = self.obj._constructor_sliced(results) res.index = res_index return res else: raise if not isinstance(results[0], ABCSeries): if len(result.index) == len(self.res_columns): result.index = self.res_columns if len(result.columns) == len(res_index): result.columns = res_index return result class FrameColumnApply(FrameApply): axis: AxisInt = 1 def apply_broadcast(self, target: DataFrame) -> DataFrame: result = super().apply_broadcast(target.T) return result.T @property def series_generator(self) -> Generator[Series, None, None]: values = self.values values = ensure_wrapped_if_datetimelike(values) assert len(values) > 0 ser = self.obj._ixs(0, axis=0) mgr = ser._mgr is_view = mgr.blocks[0].refs.has_reference() if isinstance(ser.dtype, ExtensionDtype): obj = self.obj for i in range(len(obj)): yield obj._ixs(i, axis=0) else: for (arr, name) in zip(values, self.index): ser._mgr = mgr mgr.set_values(arr) object.__setattr__(ser, '_name', name) if not is_view: mgr.blocks[0].refs = BlockValuesRefs(mgr.blocks[0]) yield ser @staticmethod @functools.cache def generate_numba_apply_func(func, nogil=True, nopython=True, parallel=False) -> Callable[[npt.NDArray, Index, Index], dict[int, Any]]: numba = import_optional_dependency('numba') from pandas import Series from pandas.core._numba.extensions import maybe_cast_str jitted_udf = numba.extending.register_jitable(func) @numba.jit(nogil=nogil, nopython=nopython, parallel=parallel) def numba_func(values, col_names_index, index, *args): results = {} for i in range(values.shape[0]): ser = Series(values[i].copy(), index=col_names_index, name=maybe_cast_str(index[i])) results[i] = jitted_udf(ser, *args) return results return numba_func def apply_with_numba(self) -> dict[int, Any]: func = cast(Callable, self.func) (args, kwargs) = prepare_function_arguments(func, self.args, self.kwargs) nb_func = self.generate_numba_apply_func(func, **get_jit_arguments(self.engine_kwargs, kwargs)) from pandas.core._numba.extensions import set_numba_data with set_numba_data(self.obj.index) as index, set_numba_data(self.columns) as columns: res = dict(nb_func(self.values, columns, index, *args)) return res @property def result_index(self) -> Index: return self.index @property def result_columns(self) -> Index: return self.columns def wrap_results_for_axis(self, results: ResType, res_index: Index) -> DataFrame | Series: result: DataFrame | Series if self.result_type == 'expand': result = self.infer_to_same_shape(results, res_index) elif not isinstance(results[0], ABCSeries): result = self.obj._constructor_sliced(results) result.index = res_index else: result = self.infer_to_same_shape(results, res_index) return result def infer_to_same_shape(self, results: ResType, res_index: Index) -> DataFrame: result = self.obj._constructor(data=results) result = result.T result.index = res_index result = result.infer_objects() return result class SeriesApply(NDFrameApply): obj: Series axis: AxisInt = 0 by_row: Literal[False, 'compat', '_compat'] def __init__(self, obj: Series, func: AggFuncType, *, by_row: Literal[False, 'compat', '_compat']='compat', args, kwargs) -> None: super().__init__(obj, func, raw=False, result_type=None, by_row=by_row, args=args, kwargs=kwargs) def apply(self) -> DataFrame | Series: obj = self.obj if len(obj) == 0: return self.apply_empty_result() if is_list_like(self.func): return self.apply_list_or_dict_like() if isinstance(self.func, str): return self.apply_str() if self.by_row == '_compat': return self.apply_compat() return self.apply_standard() def agg(self): result = super().agg() if result is None: obj = self.obj func = self.func assert callable(func) result = func(obj, *self.args, **self.kwargs) return result def apply_empty_result(self) -> Series: obj = self.obj return obj._constructor(dtype=obj.dtype, index=obj.index).__finalize__(obj, method='apply') def apply_compat(self): obj = self.obj func = self.func if callable(func): f = com.get_cython_func(func) if f and (not self.args) and (not self.kwargs): return obj.apply(func, by_row=False) try: result = obj.apply(func, by_row='compat') except (ValueError, AttributeError, TypeError): result = obj.apply(func, by_row=False) return result def apply_standard(self) -> DataFrame | Series: func = cast(Callable, self.func) obj = self.obj if isinstance(func, np.ufunc): with np.errstate(all='ignore'): return func(obj, *self.args, **self.kwargs) elif not self.by_row: return func(obj, *self.args, **self.kwargs) if self.args or self.kwargs: def curried(x): return func(x, *self.args, **self.kwargs) else: curried = func action = 'ignore' if isinstance(obj.dtype, CategoricalDtype) else None mapped = obj._map_values(mapper=curried, na_action=action) if len(mapped) and isinstance(mapped[0], ABCSeries): return obj._constructor_expanddim(list(mapped), index=obj.index) else: return obj._constructor(mapped, index=obj.index).__finalize__(obj, method='apply') class GroupByApply(Apply): obj: GroupBy | Resampler | BaseWindow def __init__(self, obj: GroupBy[NDFrameT], func: AggFuncType, *, args, kwargs) -> None: kwargs = kwargs.copy() self.axis = obj.obj._get_axis_number(kwargs.get('axis', 0)) super().__init__(obj, func, raw=False, result_type=None, args=args, kwargs=kwargs) def apply(self): raise NotImplementedError def transform(self): raise NotImplementedError def agg_or_apply_list_like(self, op_name: Literal['agg', 'apply']) -> DataFrame | Series: obj = self.obj kwargs = self.kwargs if op_name == 'apply': kwargs = {**kwargs, 'by_row': False} if getattr(obj, 'axis', 0) == 1: raise NotImplementedError('axis other than 0 is not supported') if obj._selected_obj.ndim == 1: selected_obj = obj._selected_obj else: selected_obj = obj._obj_with_exclusions with com.temp_setattr(obj, 'as_index', True, condition=hasattr(obj, 'as_index')): (keys, results) = self.compute_list_like(op_name, selected_obj, kwargs) result = self.wrap_results_list_like(keys, results) return result def agg_or_apply_dict_like(self, op_name: Literal['agg', 'apply']) -> DataFrame | Series: from pandas.core.groupby.generic import DataFrameGroupBy, SeriesGroupBy assert op_name in ['agg', 'apply'] obj = self.obj kwargs = {} if op_name == 'apply': by_row = '_compat' if self.by_row else False kwargs.update({'by_row': by_row}) if getattr(obj, 'axis', 0) == 1: raise NotImplementedError('axis other than 0 is not supported') selected_obj = obj._selected_obj selection = obj._selection is_groupby = isinstance(obj, (DataFrameGroupBy, SeriesGroupBy)) if is_groupby: engine = self.kwargs.get('engine', None) engine_kwargs = self.kwargs.get('engine_kwargs', None) kwargs.update({'engine': engine, 'engine_kwargs': engine_kwargs}) with com.temp_setattr(obj, 'as_index', True, condition=hasattr(obj, 'as_index')): (result_index, result_data) = self.compute_dict_like(op_name, selected_obj, selection, kwargs) result = self.wrap_results_dict_like(selected_obj, result_index, result_data) return result class ResamplerWindowApply(GroupByApply): axis: AxisInt = 0 obj: Resampler | BaseWindow def __init__(self, obj: Resampler | BaseWindow, func: AggFuncType, *, args, kwargs) -> None: super(GroupByApply, self).__init__(obj, func, raw=False, result_type=None, args=args, kwargs=kwargs) def apply(self): raise NotImplementedError def transform(self): raise NotImplementedError def reconstruct_func(func: AggFuncType | None, **kwargs) -> tuple[bool, AggFuncType, tuple[str, ...] | None, npt.NDArray[np.intp] | None]: relabeling = func is None and is_multi_agg_with_relabel(**kwargs) columns: tuple[str, ...] | None = None order: npt.NDArray[np.intp] | None = None if not relabeling: if isinstance(func, list) and len(func) > len(set(func)): raise SpecificationError('Function names must be unique if there is no new column names assigned') if func is None: raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).") if relabeling: (func, columns, order) = normalize_keyword_aggregation(kwargs) assert func is not None return (relabeling, func, columns, order) def is_multi_agg_with_relabel(**kwargs) -> bool: return all((isinstance(v, tuple) and len(v) == 2 for v in kwargs.values())) and len(kwargs) > 0 def normalize_keyword_aggregation(kwargs: dict) -> tuple[MutableMapping[Hashable, list[AggFuncTypeBase]], tuple[str, ...], npt.NDArray[np.intp]]: from pandas.core.indexes.base import Index aggspec = defaultdict(list) order = [] columns = tuple(kwargs.keys()) for (column, aggfunc) in kwargs.values(): aggspec[column].append(aggfunc) order.append((column, com.get_callable_name(aggfunc) or aggfunc)) uniquified_order = _make_unique_kwarg_list(order) aggspec_order = [(column, com.get_callable_name(aggfunc) or aggfunc) for (column, aggfuncs) in aggspec.items() for aggfunc in aggfuncs] uniquified_aggspec = _make_unique_kwarg_list(aggspec_order) col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order) return (aggspec, columns, col_idx_order) def _make_unique_kwarg_list(seq: Sequence[tuple[Any, Any]]) -> Sequence[tuple[Any, Any]]: return [(pair[0], f'{pair[1]}_{seq[:i].count(pair)}') if seq.count(pair) > 1 else pair for (i, pair) in enumerate(seq)] def relabel_result(result: DataFrame | Series, func: dict[str, list[Callable | str]], columns: Iterable[Hashable], order: Iterable[int]) -> dict[Hashable, Series]: from pandas.core.indexes.base import Index reordered_indexes = [pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])] reordered_result_in_dict: dict[Hashable, Series] = {} idx = 0 reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1 for (col, fun) in func.items(): s = result[col].dropna() if reorder_mask: fun = [com.get_callable_name(f) if not isinstance(f, str) else f for f in fun] col_idx_order = Index(s.index).get_indexer(fun) valid_idx = col_idx_order != -1 if valid_idx.any(): s = s.iloc[col_idx_order[valid_idx]] if not s.empty: s.index = reordered_indexes[idx:idx + len(fun)] reordered_result_in_dict[col] = s.reindex(columns) idx = idx + len(fun) return reordered_result_in_dict def reconstruct_and_relabel_result(result, func, **kwargs) -> DataFrame | Series: from pandas import DataFrame (relabeling, func, columns, order) = reconstruct_func(func, **kwargs) if relabeling: assert columns is not None assert order is not None result_in_dict = relabel_result(result, func, columns, order) result = DataFrame(result_in_dict, index=columns) return result def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]: if len(aggfuncs) <= 1: return aggfuncs i = 0 mangled_aggfuncs = [] for aggfunc in aggfuncs: if com.get_callable_name(aggfunc) == '': aggfunc = partial(aggfunc) aggfunc.__name__ = f'' i += 1 mangled_aggfuncs.append(aggfunc) return mangled_aggfuncs def maybe_mangle_lambdas(agg_spec: Any) -> Any: is_dict = is_dict_like(agg_spec) if not (is_dict or is_list_like(agg_spec)): return agg_spec mangled_aggspec = type(agg_spec)() if is_dict: for (key, aggfuncs) in agg_spec.items(): if is_list_like(aggfuncs) and (not is_dict_like(aggfuncs)): mangled_aggfuncs = _managle_lambda_list(aggfuncs) else: mangled_aggfuncs = aggfuncs mangled_aggspec[key] = mangled_aggfuncs else: mangled_aggspec = _managle_lambda_list(agg_spec) return mangled_aggspec def validate_func_kwargs(kwargs: dict) -> tuple[list[str], list[str | Callable[..., Any]]]: tuple_given_message = 'func is expected but received {} in **kwargs.' columns = list(kwargs) func = [] for col_func in kwargs.values(): if not (isinstance(col_func, str) or callable(col_func)): raise TypeError(tuple_given_message.format(type(col_func).__name__)) func.append(col_func) if not columns: no_arg_message = "Must provide 'func' or named aggregation **kwargs." raise TypeError(no_arg_message) return (columns, func) def include_axis(op_name: Literal['agg', 'apply'], colg: Series | DataFrame) -> bool: return isinstance(colg, ABCDataFrame) or (isinstance(colg, ABCSeries) and op_name == 'agg') # File: pandas-main/pandas/core/array_algos/datetimelike_accumulations.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs import iNaT from pandas.core.dtypes.missing import isna if TYPE_CHECKING: from collections.abc import Callable def _cum_func(func: Callable, values: np.ndarray, *, skipna: bool=True) -> np.ndarray: try: fill_value = {np.maximum.accumulate: np.iinfo(np.int64).min, np.cumsum: 0, np.minimum.accumulate: np.iinfo(np.int64).max}[func] except KeyError as err: raise ValueError(f'No accumulation for {func} implemented on BaseMaskedArray') from err mask = isna(values) y = values.view('i8') y[mask] = fill_value if not skipna: mask = np.maximum.accumulate(mask) result = func(y, axis=0) result[mask] = iNaT if values.dtype.kind in 'mM': return result.view(values.dtype.base) return result def cumsum(values: np.ndarray, *, skipna: bool=True) -> np.ndarray: return _cum_func(np.cumsum, values, skipna=skipna) def cummin(values: np.ndarray, *, skipna: bool=True) -> np.ndarray: return _cum_func(np.minimum.accumulate, values, skipna=skipna) def cummax(values: np.ndarray, *, skipna: bool=True) -> np.ndarray: return _cum_func(np.maximum.accumulate, values, skipna=skipna) # File: pandas-main/pandas/core/array_algos/masked_accumulations.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numpy as np if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import npt def _cum_func(func: Callable, values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: dtype_info: np.iinfo | np.finfo if values.dtype.kind == 'f': dtype_info = np.finfo(values.dtype.type) elif values.dtype.kind in 'iu': dtype_info = np.iinfo(values.dtype.type) elif values.dtype.kind == 'b': dtype_info = np.iinfo(np.uint8) else: raise NotImplementedError(f'No masked accumulation defined for dtype {values.dtype.type}') try: fill_value = {np.cumprod: 1, np.maximum.accumulate: dtype_info.min, np.cumsum: 0, np.minimum.accumulate: dtype_info.max}[func] except KeyError as err: raise NotImplementedError(f'No accumulation for {func} implemented on BaseMaskedArray') from err values[mask] = fill_value if not skipna: mask = np.maximum.accumulate(mask) values = func(values) return (values, mask) def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: return _cum_func(np.cumsum, values, mask, skipna=skipna) def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: return _cum_func(np.cumprod, values, mask, skipna=skipna) def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna) def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna) # File: pandas-main/pandas/core/array_algos/masked_reductions.py """""" from __future__ import annotations from typing import TYPE_CHECKING import warnings import numpy as np from pandas._libs import missing as libmissing from pandas.core.nanops import check_below_min_count if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import AxisInt, npt def _reductions(func: Callable, values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, min_count: int=0, axis: AxisInt | None=None, **kwargs): if not skipna: if mask.any() or check_below_min_count(values.shape, None, min_count): return libmissing.NA else: return func(values, axis=axis, **kwargs) else: if check_below_min_count(values.shape, mask, min_count) and (axis is None or values.ndim == 1): return libmissing.NA return func(values, where=~mask, axis=axis, **kwargs) def sum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, min_count: int=0, axis: AxisInt | None=None): return _reductions(np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis) def prod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, min_count: int=0, axis: AxisInt | None=None): return _reductions(np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis) def _minmax(func: Callable, values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, axis: AxisInt | None=None): if not skipna: if mask.any() or not values.size: return libmissing.NA else: return func(values, axis=axis) else: subset = values[~mask] if subset.size: return func(subset, axis=axis) else: return libmissing.NA def min(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, axis: AxisInt | None=None): return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis) def max(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, axis: AxisInt | None=None): return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis) def mean(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, axis: AxisInt | None=None): if not values.size or mask.all(): return libmissing.NA return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis) def var(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, axis: AxisInt | None=None, ddof: int=1): if not values.size or mask.all(): return libmissing.NA with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) return _reductions(np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof) def std(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool=True, axis: AxisInt | None=None, ddof: int=1): if not values.size or mask.all(): return libmissing.NA with warnings.catch_warnings(): warnings.simplefilter('ignore', RuntimeWarning) return _reductions(np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof) # File: pandas-main/pandas/core/array_algos/putmask.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any import numpy as np from pandas._libs import lib from pandas.core.dtypes.cast import infer_dtype_from from pandas.core.dtypes.common import is_list_like from pandas.core.arrays import ExtensionArray if TYPE_CHECKING: from pandas._typing import ArrayLike, npt from pandas import MultiIndex def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None: if not isinstance(values, np.ndarray) or (values.dtype == object and (not lib.is_scalar(value))) or (isinstance(value, np.ndarray) and (not np.can_cast(value.dtype, values.dtype))): if is_list_like(value) and len(value) == len(values): values[mask] = value[mask] else: values[mask] = value else: np.putmask(values, mask, value) def putmask_without_repeat(values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any) -> None: if getattr(new, 'ndim', 0) >= 1: new = new.astype(values.dtype, copy=False) nlocs = mask.sum() if nlocs > 0 and is_list_like(new) and (getattr(new, 'ndim', 1) == 1): shape = np.shape(new) if nlocs == shape[-1]: np.place(values, mask, new) elif mask.shape[-1] == shape[-1] or shape[-1] == 1: np.putmask(values, mask, new) else: raise ValueError('cannot assign mismatch length to masked array') else: np.putmask(values, mask, new) def validate_putmask(values: ArrayLike | MultiIndex, mask: np.ndarray) -> tuple[npt.NDArray[np.bool_], bool]: mask = extract_bool_array(mask) if mask.shape != values.shape: raise ValueError('putmask: mask and data must be the same size') noop = not mask.any() return (mask, noop) def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]: if isinstance(mask, ExtensionArray): mask = mask.to_numpy(dtype=bool, na_value=False) mask = np.asarray(mask, dtype=bool) return mask def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other): if values.dtype == object: (dtype, _) = infer_dtype_from(other) if lib.is_np_dtype(dtype, 'mM'): if not is_list_like(other): other = [other] * num_set else: other = list(other) return other # File: pandas-main/pandas/core/array_algos/quantile.py from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas.core.dtypes.missing import isna, na_value_for_dtype if TYPE_CHECKING: from pandas._typing import ArrayLike, Scalar, npt def quantile_compat(values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str) -> ArrayLike: if isinstance(values, np.ndarray): fill_value = na_value_for_dtype(values.dtype, compat=False) mask = isna(values) return quantile_with_mask(values, mask, fill_value, qs, interpolation) else: return values._quantile(qs, interpolation) def quantile_with_mask(values: np.ndarray, mask: npt.NDArray[np.bool_], fill_value, qs: npt.NDArray[np.float64], interpolation: str) -> np.ndarray: assert values.shape == mask.shape if values.ndim == 1: values = np.atleast_2d(values) mask = np.atleast_2d(mask) res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation) return res_values[0] assert values.ndim == 2 is_empty = values.shape[1] == 0 if is_empty: flat = np.full(len(qs), fill_value) result = np.repeat(flat, len(values)).reshape(len(values), len(qs)) else: result = _nanquantile(values, qs, na_value=fill_value, mask=mask, interpolation=interpolation) result = np.asarray(result) result = result.T return result def _nanquantile_1d(values: np.ndarray, mask: npt.NDArray[np.bool_], qs: npt.NDArray[np.float64], na_value: Scalar, interpolation: str) -> Scalar | np.ndarray: values = values[~mask] if len(values) == 0: return np.full(len(qs), na_value) return np.quantile(values, qs, method=interpolation) def _nanquantile(values: np.ndarray, qs: npt.NDArray[np.float64], *, na_value, mask: npt.NDArray[np.bool_], interpolation: str): if values.dtype.kind in 'mM': result = _nanquantile(values.view('i8'), qs=qs, na_value=na_value.view('i8'), mask=mask, interpolation=interpolation) return result.astype(values.dtype) if mask.any(): assert mask.shape == values.shape result = [_nanquantile_1d(val, m, qs, na_value, interpolation=interpolation) for (val, m) in zip(list(values), list(mask))] if values.dtype.kind == 'f': result = np.asarray(result, dtype=values.dtype).T else: result = np.asarray(result).T if result.dtype != values.dtype and (not mask.all()) and (result == result.astype(values.dtype, copy=False)).all(): result = result.astype(values.dtype, copy=False) return result else: return np.quantile(values, qs, axis=1, method=interpolation) # File: pandas-main/pandas/core/array_algos/replace.py """""" from __future__ import annotations import operator import re from re import Pattern from typing import TYPE_CHECKING, Any import numpy as np from pandas.core.dtypes.common import is_bool, is_re, is_re_compilable from pandas.core.dtypes.missing import isna if TYPE_CHECKING: from pandas._typing import ArrayLike, Scalar, npt def should_use_regex(regex: bool, to_replace: Any) -> bool: if is_re(to_replace): regex = True regex = regex and is_re_compilable(to_replace) regex = regex and re.compile(to_replace).pattern != '' return regex def compare_or_regex_search(a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]) -> ArrayLike: if isna(b): return ~mask def _check_comparison_types(result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern) -> None: if is_bool(result) and isinstance(a, np.ndarray): type_names = [type(a).__name__, type(b).__name__] type_names[0] = f'ndarray(dtype={a.dtype})' raise TypeError(f'Cannot compare types {type_names[0]!r} and {type_names[1]!r}') if not regex or not should_use_regex(regex, b): op = lambda x: operator.eq(x, b) else: op = np.vectorize(lambda x: bool(re.search(b, x)) if isinstance(x, str) and isinstance(b, (str, Pattern)) else False) if isinstance(a, np.ndarray) and mask is not None: a = a[mask] result = op(a) if isinstance(result, np.ndarray): tmp = np.zeros(mask.shape, dtype=np.bool_) np.place(tmp, mask, result) result = tmp else: result = op(a) _check_comparison_types(result, a, b) return result def replace_regex(values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None) -> None: if isna(value) or not isinstance(value, str): def re_replacer(s): if is_re(rx) and isinstance(s, str): return value if rx.search(s) is not None else s else: return s else: def re_replacer(s): if is_re(rx) and isinstance(s, str): return rx.sub(value, s) else: return s f = np.vectorize(re_replacer, otypes=[np.object_]) if mask is None: values[:] = f(values) else: values[mask] = f(values[mask]) # File: pandas-main/pandas/core/array_algos/take.py from __future__ import annotations import functools from typing import TYPE_CHECKING, cast, overload import numpy as np from pandas._libs import algos as libalgos, lib from pandas.core.dtypes.cast import maybe_promote from pandas.core.dtypes.common import ensure_platform_int, is_1d_only_ea_dtype from pandas.core.dtypes.missing import na_value_for_dtype from pandas.core.construction import ensure_wrapped_if_datetimelike if TYPE_CHECKING: from pandas._typing import ArrayLike, AxisInt, npt from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.base import ExtensionArray @overload def take_nd(arr: np.ndarray, indexer, axis: AxisInt=..., fill_value=..., allow_fill: bool=...) -> np.ndarray: ... @overload def take_nd(arr: ExtensionArray, indexer, axis: AxisInt=..., fill_value=..., allow_fill: bool=...) -> ArrayLike: ... def take_nd(arr: ArrayLike, indexer, axis: AxisInt=0, fill_value=lib.no_default, allow_fill: bool=True) -> ArrayLike: if fill_value is lib.no_default: fill_value = na_value_for_dtype(arr.dtype, compat=False) elif lib.is_np_dtype(arr.dtype, 'mM'): (dtype, fill_value) = maybe_promote(arr.dtype, fill_value) if arr.dtype != dtype: arr = arr.astype(dtype) if not isinstance(arr, np.ndarray): if not is_1d_only_ea_dtype(arr.dtype): arr = cast('NDArrayBackedExtensionArray', arr) return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis) return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill) arr = np.asarray(arr) return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill) def _take_nd_ndarray(arr: np.ndarray, indexer: npt.NDArray[np.intp] | None, axis: AxisInt, fill_value, allow_fill: bool) -> np.ndarray: if indexer is None: indexer = np.arange(arr.shape[axis], dtype=np.intp) (dtype, fill_value) = (arr.dtype, arr.dtype.type()) else: indexer = ensure_platform_int(indexer) (dtype, fill_value, mask_info) = _take_preprocess_indexer_and_fill_value(arr, indexer, fill_value, allow_fill) flip_order = False if arr.ndim == 2 and arr.flags.f_contiguous: flip_order = True if flip_order: arr = arr.T axis = arr.ndim - axis - 1 out_shape_ = list(arr.shape) out_shape_[axis] = len(indexer) out_shape = tuple(out_shape_) if arr.flags.f_contiguous and axis == arr.ndim - 1: out = np.empty(out_shape, dtype=dtype, order='F') else: out = np.empty(out_shape, dtype=dtype) func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info) func(arr, indexer, out, fill_value) if flip_order: out = out.T return out def take_2d_multi(arr: np.ndarray, indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], fill_value=np.nan) -> np.ndarray: assert indexer is not None assert indexer[0] is not None assert indexer[1] is not None (row_idx, col_idx) = indexer row_idx = ensure_platform_int(row_idx) col_idx = ensure_platform_int(col_idx) indexer = (row_idx, col_idx) mask_info = None (dtype, fill_value) = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype: row_mask = row_idx == -1 col_mask = col_idx == -1 row_needs = row_mask.any() col_needs = col_mask.any() mask_info = ((row_mask, col_mask), (row_needs, col_needs)) if not (row_needs or col_needs): (dtype, fill_value) = (arr.dtype, arr.dtype.type()) out_shape = (len(row_idx), len(col_idx)) out = np.empty(out_shape, dtype=dtype) func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None) if func is None and arr.dtype != out.dtype: func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None) if func is not None: func = _convert_wrapper(func, out.dtype) if func is not None: func(arr, indexer, out=out, fill_value=fill_value) else: _take_2d_multi_object(arr, indexer, out, fill_value=fill_value, mask_info=mask_info) return out @functools.lru_cache def _get_take_nd_function_cached(ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt): tup = (arr_dtype.name, out_dtype.name) if ndim == 1: func = _take_1d_dict.get(tup, None) elif ndim == 2: if axis == 0: func = _take_2d_axis0_dict.get(tup, None) else: func = _take_2d_axis1_dict.get(tup, None) if func is not None: return func tup = (out_dtype.name, out_dtype.name) if ndim == 1: func = _take_1d_dict.get(tup, None) elif ndim == 2: if axis == 0: func = _take_2d_axis0_dict.get(tup, None) else: func = _take_2d_axis1_dict.get(tup, None) if func is not None: func = _convert_wrapper(func, out_dtype) return func return None def _get_take_nd_function(ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt=0, mask_info=None): func = None if ndim <= 2: func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis) if func is None: def func(arr, indexer, out, fill_value=np.nan) -> None: indexer = ensure_platform_int(indexer) _take_nd_object(arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info) return func def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None): def wrapper(arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan) -> None: if arr_dtype is not None: arr = arr.view(arr_dtype) if out_dtype is not None: out = out.view(out_dtype) if fill_wrap is not None: if fill_value.dtype.kind == 'm': fill_value = fill_value.astype('m8[ns]') else: fill_value = fill_value.astype('M8[ns]') fill_value = fill_wrap(fill_value) f(arr, indexer, out, fill_value=fill_value) return wrapper def _convert_wrapper(f, conv_dtype): def wrapper(arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan) -> None: if conv_dtype == object: arr = ensure_wrapped_if_datetimelike(arr) arr = arr.astype(conv_dtype) f(arr, indexer, out, fill_value=fill_value) return wrapper _take_1d_dict = {('int8', 'int8'): libalgos.take_1d_int8_int8, ('int8', 'int32'): libalgos.take_1d_int8_int32, ('int8', 'int64'): libalgos.take_1d_int8_int64, ('int8', 'float64'): libalgos.take_1d_int8_float64, ('int16', 'int16'): libalgos.take_1d_int16_int16, ('int16', 'int32'): libalgos.take_1d_int16_int32, ('int16', 'int64'): libalgos.take_1d_int16_int64, ('int16', 'float64'): libalgos.take_1d_int16_float64, ('int32', 'int32'): libalgos.take_1d_int32_int32, ('int32', 'int64'): libalgos.take_1d_int32_int64, ('int32', 'float64'): libalgos.take_1d_int32_float64, ('int64', 'int64'): libalgos.take_1d_int64_int64, ('uint8', 'uint8'): libalgos.take_1d_bool_bool, ('uint16', 'int64'): libalgos.take_1d_uint16_uint16, ('uint32', 'int64'): libalgos.take_1d_uint32_uint32, ('uint64', 'int64'): libalgos.take_1d_uint64_uint64, ('int64', 'float64'): libalgos.take_1d_int64_float64, ('float32', 'float32'): libalgos.take_1d_float32_float32, ('float32', 'float64'): libalgos.take_1d_float32_float64, ('float64', 'float64'): libalgos.take_1d_float64_float64, ('object', 'object'): libalgos.take_1d_object_object, ('bool', 'bool'): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8), ('bool', 'object'): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None), ('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64), ('timedelta64[ns]', 'timedelta64[ns]'): _view_wrapper(libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64)} _take_2d_axis0_dict = {('int8', 'int8'): libalgos.take_2d_axis0_int8_int8, ('int8', 'int32'): libalgos.take_2d_axis0_int8_int32, ('int8', 'int64'): libalgos.take_2d_axis0_int8_int64, ('int8', 'float64'): libalgos.take_2d_axis0_int8_float64, ('int16', 'int16'): libalgos.take_2d_axis0_int16_int16, ('int16', 'int32'): libalgos.take_2d_axis0_int16_int32, ('int16', 'int64'): libalgos.take_2d_axis0_int16_int64, ('int16', 'float64'): libalgos.take_2d_axis0_int16_float64, ('int32', 'int32'): libalgos.take_2d_axis0_int32_int32, ('int32', 'int64'): libalgos.take_2d_axis0_int32_int64, ('int32', 'float64'): libalgos.take_2d_axis0_int32_float64, ('int64', 'int64'): libalgos.take_2d_axis0_int64_int64, ('int64', 'float64'): libalgos.take_2d_axis0_int64_float64, ('uint8', 'uint8'): libalgos.take_2d_axis0_bool_bool, ('uint16', 'uint16'): libalgos.take_2d_axis0_uint16_uint16, ('uint32', 'uint32'): libalgos.take_2d_axis0_uint32_uint32, ('uint64', 'uint64'): libalgos.take_2d_axis0_uint64_uint64, ('float32', 'float32'): libalgos.take_2d_axis0_float32_float32, ('float32', 'float64'): libalgos.take_2d_axis0_float32_float64, ('float64', 'float64'): libalgos.take_2d_axis0_float64_float64, ('object', 'object'): libalgos.take_2d_axis0_object_object, ('bool', 'bool'): _view_wrapper(libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8), ('bool', 'object'): _view_wrapper(libalgos.take_2d_axis0_bool_object, np.uint8, None), ('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64), ('timedelta64[ns]', 'timedelta64[ns]'): _view_wrapper(libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64)} _take_2d_axis1_dict = {('int8', 'int8'): libalgos.take_2d_axis1_int8_int8, ('int8', 'int32'): libalgos.take_2d_axis1_int8_int32, ('int8', 'int64'): libalgos.take_2d_axis1_int8_int64, ('int8', 'float64'): libalgos.take_2d_axis1_int8_float64, ('int16', 'int16'): libalgos.take_2d_axis1_int16_int16, ('int16', 'int32'): libalgos.take_2d_axis1_int16_int32, ('int16', 'int64'): libalgos.take_2d_axis1_int16_int64, ('int16', 'float64'): libalgos.take_2d_axis1_int16_float64, ('int32', 'int32'): libalgos.take_2d_axis1_int32_int32, ('int32', 'int64'): libalgos.take_2d_axis1_int32_int64, ('int32', 'float64'): libalgos.take_2d_axis1_int32_float64, ('int64', 'int64'): libalgos.take_2d_axis1_int64_int64, ('int64', 'float64'): libalgos.take_2d_axis1_int64_float64, ('uint8', 'uint8'): libalgos.take_2d_axis1_bool_bool, ('uint16', 'uint16'): libalgos.take_2d_axis1_uint16_uint16, ('uint32', 'uint32'): libalgos.take_2d_axis1_uint32_uint32, ('uint64', 'uint64'): libalgos.take_2d_axis1_uint64_uint64, ('float32', 'float32'): libalgos.take_2d_axis1_float32_float32, ('float32', 'float64'): libalgos.take_2d_axis1_float32_float64, ('float64', 'float64'): libalgos.take_2d_axis1_float64_float64, ('object', 'object'): libalgos.take_2d_axis1_object_object, ('bool', 'bool'): _view_wrapper(libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8), ('bool', 'object'): _view_wrapper(libalgos.take_2d_axis1_bool_object, np.uint8, None), ('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64), ('timedelta64[ns]', 'timedelta64[ns]'): _view_wrapper(libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64)} _take_2d_multi_dict = {('int8', 'int8'): libalgos.take_2d_multi_int8_int8, ('int8', 'int32'): libalgos.take_2d_multi_int8_int32, ('int8', 'int64'): libalgos.take_2d_multi_int8_int64, ('int8', 'float64'): libalgos.take_2d_multi_int8_float64, ('int16', 'int16'): libalgos.take_2d_multi_int16_int16, ('int16', 'int32'): libalgos.take_2d_multi_int16_int32, ('int16', 'int64'): libalgos.take_2d_multi_int16_int64, ('int16', 'float64'): libalgos.take_2d_multi_int16_float64, ('int32', 'int32'): libalgos.take_2d_multi_int32_int32, ('int32', 'int64'): libalgos.take_2d_multi_int32_int64, ('int32', 'float64'): libalgos.take_2d_multi_int32_float64, ('int64', 'int64'): libalgos.take_2d_multi_int64_int64, ('int64', 'float64'): libalgos.take_2d_multi_int64_float64, ('float32', 'float32'): libalgos.take_2d_multi_float32_float32, ('float32', 'float64'): libalgos.take_2d_multi_float32_float64, ('float64', 'float64'): libalgos.take_2d_multi_float64_float64, ('object', 'object'): libalgos.take_2d_multi_object_object, ('bool', 'bool'): _view_wrapper(libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8), ('bool', 'object'): _view_wrapper(libalgos.take_2d_multi_bool_object, np.uint8, None), ('datetime64[ns]', 'datetime64[ns]'): _view_wrapper(libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64), ('timedelta64[ns]', 'timedelta64[ns]'): _view_wrapper(libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64)} def _take_nd_object(arr: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, axis: AxisInt, fill_value, mask_info) -> None: if mask_info is not None: (mask, needs_masking) = mask_info else: mask = indexer == -1 needs_masking = mask.any() if arr.dtype != out.dtype: arr = arr.astype(out.dtype) if arr.shape[axis] > 0: arr.take(indexer, axis=axis, out=out) if needs_masking: outindexer = [slice(None)] * arr.ndim outindexer[axis] = mask out[tuple(outindexer)] = fill_value def _take_2d_multi_object(arr: np.ndarray, indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]], out: np.ndarray, fill_value, mask_info) -> None: (row_idx, col_idx) = indexer if mask_info is not None: ((row_mask, col_mask), (row_needs, col_needs)) = mask_info else: row_mask = row_idx == -1 col_mask = col_idx == -1 row_needs = row_mask.any() col_needs = col_mask.any() if fill_value is not None: if row_needs: out[row_mask, :] = fill_value if col_needs: out[:, col_mask] = fill_value for (i, u_) in enumerate(row_idx): if u_ != -1: for (j, v) in enumerate(col_idx): if v != -1: out[i, j] = arr[u_, v] def _take_preprocess_indexer_and_fill_value(arr: np.ndarray, indexer: npt.NDArray[np.intp], fill_value, allow_fill: bool, mask: npt.NDArray[np.bool_] | None=None): mask_info: tuple[np.ndarray | None, bool] | None = None if not allow_fill: (dtype, fill_value) = (arr.dtype, arr.dtype.type()) mask_info = (None, False) else: (dtype, fill_value) = maybe_promote(arr.dtype, fill_value) if dtype != arr.dtype: if mask is not None: needs_masking = True else: mask = indexer == -1 needs_masking = bool(mask.any()) mask_info = (mask, needs_masking) if not needs_masking: (dtype, fill_value) = (arr.dtype, arr.dtype.type()) return (dtype, fill_value, mask_info) # File: pandas-main/pandas/core/array_algos/transforms.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numpy as np if TYPE_CHECKING: from pandas._typing import AxisInt, Scalar def shift(values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar) -> np.ndarray: new_values = values if periods == 0 or values.size == 0: return new_values.copy() f_ordered = values.flags.f_contiguous if f_ordered: new_values = new_values.T axis = new_values.ndim - axis - 1 if new_values.size: new_values = np.roll(new_values, np.intp(periods), axis=axis) axis_indexer = [slice(None)] * values.ndim if periods > 0: axis_indexer[axis] = slice(None, periods) else: axis_indexer[axis] = slice(periods, None) new_values[tuple(axis_indexer)] = fill_value if f_ordered: new_values = new_values.T return new_values # File: pandas-main/pandas/core/arraylike.py """""" from __future__ import annotations import operator from typing import Any import numpy as np from pandas._libs import lib from pandas._libs.ops_dispatch import maybe_dispatch_ufunc_to_dunder_op from pandas.core.dtypes.generic import ABCNDFrame from pandas.core import roperator from pandas.core.construction import extract_array from pandas.core.ops.common import unpack_zerodim_and_defer REDUCTION_ALIASES = {'maximum': 'max', 'minimum': 'min', 'add': 'sum', 'multiply': 'prod'} class OpsMixin: def _cmp_method(self, other, op): return NotImplemented @unpack_zerodim_and_defer('__eq__') def __eq__(self, other): return self._cmp_method(other, operator.eq) @unpack_zerodim_and_defer('__ne__') def __ne__(self, other): return self._cmp_method(other, operator.ne) @unpack_zerodim_and_defer('__lt__') def __lt__(self, other): return self._cmp_method(other, operator.lt) @unpack_zerodim_and_defer('__le__') def __le__(self, other): return self._cmp_method(other, operator.le) @unpack_zerodim_and_defer('__gt__') def __gt__(self, other): return self._cmp_method(other, operator.gt) @unpack_zerodim_and_defer('__ge__') def __ge__(self, other): return self._cmp_method(other, operator.ge) def _logical_method(self, other, op): return NotImplemented @unpack_zerodim_and_defer('__and__') def __and__(self, other): return self._logical_method(other, operator.and_) @unpack_zerodim_and_defer('__rand__') def __rand__(self, other): return self._logical_method(other, roperator.rand_) @unpack_zerodim_and_defer('__or__') def __or__(self, other): return self._logical_method(other, operator.or_) @unpack_zerodim_and_defer('__ror__') def __ror__(self, other): return self._logical_method(other, roperator.ror_) @unpack_zerodim_and_defer('__xor__') def __xor__(self, other): return self._logical_method(other, operator.xor) @unpack_zerodim_and_defer('__rxor__') def __rxor__(self, other): return self._logical_method(other, roperator.rxor) def _arith_method(self, other, op): return NotImplemented @unpack_zerodim_and_defer('__add__') def __add__(self, other): return self._arith_method(other, operator.add) @unpack_zerodim_and_defer('__radd__') def __radd__(self, other): return self._arith_method(other, roperator.radd) @unpack_zerodim_and_defer('__sub__') def __sub__(self, other): return self._arith_method(other, operator.sub) @unpack_zerodim_and_defer('__rsub__') def __rsub__(self, other): return self._arith_method(other, roperator.rsub) @unpack_zerodim_and_defer('__mul__') def __mul__(self, other): return self._arith_method(other, operator.mul) @unpack_zerodim_and_defer('__rmul__') def __rmul__(self, other): return self._arith_method(other, roperator.rmul) @unpack_zerodim_and_defer('__truediv__') def __truediv__(self, other): return self._arith_method(other, operator.truediv) @unpack_zerodim_and_defer('__rtruediv__') def __rtruediv__(self, other): return self._arith_method(other, roperator.rtruediv) @unpack_zerodim_and_defer('__floordiv__') def __floordiv__(self, other): return self._arith_method(other, operator.floordiv) @unpack_zerodim_and_defer('__rfloordiv') def __rfloordiv__(self, other): return self._arith_method(other, roperator.rfloordiv) @unpack_zerodim_and_defer('__mod__') def __mod__(self, other): return self._arith_method(other, operator.mod) @unpack_zerodim_and_defer('__rmod__') def __rmod__(self, other): return self._arith_method(other, roperator.rmod) @unpack_zerodim_and_defer('__divmod__') def __divmod__(self, other): return self._arith_method(other, divmod) @unpack_zerodim_and_defer('__rdivmod__') def __rdivmod__(self, other): return self._arith_method(other, roperator.rdivmod) @unpack_zerodim_and_defer('__pow__') def __pow__(self, other): return self._arith_method(other, operator.pow) @unpack_zerodim_and_defer('__rpow__') def __rpow__(self, other): return self._arith_method(other, roperator.rpow) def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): from pandas.core.frame import DataFrame, Series from pandas.core.generic import NDFrame from pandas.core.internals import BlockManager cls = type(self) kwargs = _standardize_out_kwarg(**kwargs) result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result no_defer = (np.ndarray.__array_ufunc__, cls.__array_ufunc__) for item in inputs: higher_priority = hasattr(item, '__array_priority__') and item.__array_priority__ > self.__array_priority__ has_array_ufunc = hasattr(item, '__array_ufunc__') and type(item).__array_ufunc__ not in no_defer and (not isinstance(item, self._HANDLED_TYPES)) if higher_priority or has_array_ufunc: return NotImplemented types = tuple((type(x) for x in inputs)) alignable = [x for (x, t) in zip(inputs, types) if issubclass(t, NDFrame)] if len(alignable) > 1: set_types = set(types) if len(set_types) > 1 and {DataFrame, Series}.issubset(set_types): raise NotImplementedError(f'Cannot apply ufunc {ufunc} to mixed DataFrame and Series inputs.') axes = self.axes for obj in alignable[1:]: for (i, (ax1, ax2)) in enumerate(zip(axes, obj.axes)): axes[i] = ax1.union(ax2) reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes)) inputs = tuple((x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x for (x, t) in zip(inputs, types))) else: reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes)) if self.ndim == 1: names = {getattr(x, 'name') for x in inputs if hasattr(x, 'name')} name = names.pop() if len(names) == 1 else None reconstruct_kwargs = {'name': name} else: reconstruct_kwargs = {} def reconstruct(result): if ufunc.nout > 1: return tuple((_reconstruct(x) for x in result)) return _reconstruct(result) def _reconstruct(result): if lib.is_scalar(result): return result if result.ndim != self.ndim: if method == 'outer': raise NotImplementedError return result if isinstance(result, BlockManager): result = self._constructor_from_mgr(result, axes=result.axes) else: result = self._constructor(result, **reconstruct_axes, **reconstruct_kwargs, copy=False) if len(alignable) == 1: result = result.__finalize__(self) return result if 'out' in kwargs: result = dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) return reconstruct(result) if method == 'reduce': result = dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1): inputs = tuple((np.asarray(x) for x in inputs)) result = getattr(ufunc, method)(*inputs, **kwargs) elif self.ndim == 1: inputs = tuple((extract_array(x, extract_numpy=True) for x in inputs)) result = getattr(ufunc, method)(*inputs, **kwargs) elif method == '__call__' and (not kwargs): mgr = inputs[0]._mgr result = mgr.apply(getattr(ufunc, method)) else: result = default_array_ufunc(inputs[0], ufunc, method, *inputs, **kwargs) result = reconstruct(result) return result def _standardize_out_kwarg(**kwargs) -> dict: if 'out' not in kwargs and 'out1' in kwargs and ('out2' in kwargs): out1 = kwargs.pop('out1') out2 = kwargs.pop('out2') out = (out1, out2) kwargs['out'] = out return kwargs def dispatch_ufunc_with_out(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): out = kwargs.pop('out') where = kwargs.pop('where', None) result = getattr(ufunc, method)(*inputs, **kwargs) if result is NotImplemented: return NotImplemented if isinstance(result, tuple): if not isinstance(out, tuple) or len(out) != len(result): raise NotImplementedError for (arr, res) in zip(out, result): _assign_where(arr, res, where) return out if isinstance(out, tuple): if len(out) == 1: out = out[0] else: raise NotImplementedError _assign_where(out, result, where) return out def _assign_where(out, result, where) -> None: if where is None: out[:] = result else: np.putmask(out, where, result) def default_array_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): if not any((x is self for x in inputs)): raise NotImplementedError new_inputs = [x if x is not self else np.asarray(x) for x in inputs] return getattr(ufunc, method)(*new_inputs, **kwargs) def dispatch_reduction_ufunc(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): assert method == 'reduce' if len(inputs) != 1 or inputs[0] is not self: return NotImplemented if ufunc.__name__ not in REDUCTION_ALIASES: return NotImplemented method_name = REDUCTION_ALIASES[ufunc.__name__] if not hasattr(self, method_name): return NotImplemented if self.ndim > 1: if isinstance(self, ABCNDFrame): kwargs['numeric_only'] = False if 'axis' not in kwargs: kwargs['axis'] = 0 return getattr(self, method_name)(skipna=False, **kwargs) # File: pandas-main/pandas/core/arrays/__init__.py from pandas.core.arrays.arrow import ArrowExtensionArray from pandas.core.arrays.base import ExtensionArray, ExtensionOpsMixin, ExtensionScalarOpsMixin from pandas.core.arrays.boolean import BooleanArray from pandas.core.arrays.categorical import Categorical from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.floating import FloatingArray from pandas.core.arrays.integer import IntegerArray from pandas.core.arrays.interval import IntervalArray from pandas.core.arrays.masked import BaseMaskedArray from pandas.core.arrays.numpy_ import NumpyExtensionArray from pandas.core.arrays.period import PeriodArray, period_array from pandas.core.arrays.sparse import SparseArray from pandas.core.arrays.string_ import StringArray from pandas.core.arrays.string_arrow import ArrowStringArray from pandas.core.arrays.timedeltas import TimedeltaArray __all__ = ['ArrowExtensionArray', 'ExtensionArray', 'ExtensionOpsMixin', 'ExtensionScalarOpsMixin', 'ArrowStringArray', 'BaseMaskedArray', 'BooleanArray', 'Categorical', 'DatetimeArray', 'FloatingArray', 'IntegerArray', 'IntervalArray', 'NumpyExtensionArray', 'PeriodArray', 'period_array', 'SparseArray', 'StringArray', 'TimedeltaArray'] # File: pandas-main/pandas/core/arrays/_arrow_string_mixins.py from __future__ import annotations from functools import partial import re from typing import TYPE_CHECKING, Any, Literal import numpy as np from pandas.compat import pa_version_under10p1, pa_version_under11p0, pa_version_under13p0, pa_version_under17p0 from pandas.core.dtypes.missing import isna if not pa_version_under10p1: import pyarrow as pa import pyarrow.compute as pc if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import Scalar, Self class ArrowStringArrayMixin: _pa_array: pa.ChunkedArray def __init__(self, *args, **kwargs) -> None: raise NotImplementedError def _convert_bool_result(self, result): raise NotImplementedError def _convert_int_result(self, result): raise NotImplementedError def _apply_elementwise(self, func: Callable) -> list[list[Any]]: raise NotImplementedError def _str_len(self): result = pc.utf8_length(self._pa_array) return self._convert_int_result(result) def _str_lower(self) -> Self: return type(self)(pc.utf8_lower(self._pa_array)) def _str_upper(self) -> Self: return type(self)(pc.utf8_upper(self._pa_array)) def _str_strip(self, to_strip=None) -> Self: if to_strip is None: result = pc.utf8_trim_whitespace(self._pa_array) else: result = pc.utf8_trim(self._pa_array, characters=to_strip) return type(self)(result) def _str_lstrip(self, to_strip=None) -> Self: if to_strip is None: result = pc.utf8_ltrim_whitespace(self._pa_array) else: result = pc.utf8_ltrim(self._pa_array, characters=to_strip) return type(self)(result) def _str_rstrip(self, to_strip=None) -> Self: if to_strip is None: result = pc.utf8_rtrim_whitespace(self._pa_array) else: result = pc.utf8_rtrim(self._pa_array, characters=to_strip) return type(self)(result) def _str_pad(self, width: int, side: Literal['left', 'right', 'both']='left', fillchar: str=' ') -> Self: if side == 'left': pa_pad = pc.utf8_lpad elif side == 'right': pa_pad = pc.utf8_rpad elif side == 'both': if pa_version_under17p0: from pandas import array obj_arr = self.astype(object, copy=False) obj = array(obj_arr, dtype=object) result = obj._str_pad(width, side, fillchar) return type(self)._from_sequence(result, dtype=self.dtype) else: lean_left = width % 2 == 0 pa_pad = partial(pc.utf8_center, lean_left_on_odd_padding=lean_left) else: raise ValueError(f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'") return type(self)(pa_pad(self._pa_array, width=width, padding=fillchar)) def _str_get(self, i: int) -> Self: lengths = pc.utf8_length(self._pa_array) if i >= 0: out_of_bounds = pc.greater_equal(i, lengths) start = i stop = i + 1 step = 1 else: out_of_bounds = pc.greater(-i, lengths) start = i stop = i - 1 step = -1 not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True)) selected = pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step) null_value = pa.scalar(None, type=self._pa_array.type) result = pc.if_else(not_out_of_bounds, selected, null_value) return type(self)(result) def _str_slice(self, start: int | None=None, stop: int | None=None, step: int | None=None) -> Self: if pa_version_under11p0: result = self._apply_elementwise(lambda val: val[start:stop:step]) return type(self)(pa.chunked_array(result, type=self._pa_array.type)) if start is None: if step is not None and step < 0: start = -1 else: start = 0 if step is None: step = 1 return type(self)(pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step)) def _str_slice_replace(self, start: int | None=None, stop: int | None=None, repl: str | None=None) -> Self: if repl is None: repl = '' if start is None: start = 0 if stop is None: stop = np.iinfo(np.int64).max return type(self)(pc.utf8_replace_slice(self._pa_array, start, stop, repl)) def _str_replace(self, pat: str | re.Pattern, repl: str | Callable, n: int=-1, case: bool=True, flags: int=0, regex: bool=True) -> Self: if isinstance(pat, re.Pattern) or callable(repl) or (not case) or flags: raise NotImplementedError('replace is not supported with a re.Pattern, callable repl, case=False, or flags!=0') func = pc.replace_substring_regex if regex else pc.replace_substring pa_max_replacements = None if n < 0 else n result = func(self._pa_array, pattern=pat, replacement=repl, max_replacements=pa_max_replacements) return type(self)(result) def _str_capitalize(self) -> Self: return type(self)(pc.utf8_capitalize(self._pa_array)) def _str_title(self) -> Self: return type(self)(pc.utf8_title(self._pa_array)) def _str_swapcase(self) -> Self: return type(self)(pc.utf8_swapcase(self._pa_array)) def _str_removeprefix(self, prefix: str): if not pa_version_under13p0: starts_with = pc.starts_with(self._pa_array, pattern=prefix) removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix)) result = pc.if_else(starts_with, removed, self._pa_array) return type(self)(result) predicate = lambda val: val.removeprefix(prefix) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_removesuffix(self, suffix: str): ends_with = pc.ends_with(self._pa_array, pattern=suffix) removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix)) result = pc.if_else(ends_with, removed, self._pa_array) return type(self)(result) def _str_startswith(self, pat: str | tuple[str, ...], na: Scalar | None=None): if isinstance(pat, str): result = pc.starts_with(self._pa_array, pattern=pat) elif len(pat) == 0: result = pc.if_else(pc.is_null(self._pa_array), None, False) else: result = pc.starts_with(self._pa_array, pattern=pat[0]) for p in pat[1:]: result = pc.or_(result, pc.starts_with(self._pa_array, pattern=p)) if not isna(na): result = result.fill_null(na) return self._convert_bool_result(result) def _str_endswith(self, pat: str | tuple[str, ...], na: Scalar | None=None): if isinstance(pat, str): result = pc.ends_with(self._pa_array, pattern=pat) elif len(pat) == 0: result = pc.if_else(pc.is_null(self._pa_array), None, False) else: result = pc.ends_with(self._pa_array, pattern=pat[0]) for p in pat[1:]: result = pc.or_(result, pc.ends_with(self._pa_array, pattern=p)) if not isna(na): result = result.fill_null(na) return self._convert_bool_result(result) def _str_isalnum(self): result = pc.utf8_is_alnum(self._pa_array) return self._convert_bool_result(result) def _str_isalpha(self): result = pc.utf8_is_alpha(self._pa_array) return self._convert_bool_result(result) def _str_isdecimal(self): result = pc.utf8_is_decimal(self._pa_array) return self._convert_bool_result(result) def _str_isdigit(self): result = pc.utf8_is_digit(self._pa_array) return self._convert_bool_result(result) def _str_islower(self): result = pc.utf8_is_lower(self._pa_array) return self._convert_bool_result(result) def _str_isnumeric(self): result = pc.utf8_is_numeric(self._pa_array) return self._convert_bool_result(result) def _str_isspace(self): result = pc.utf8_is_space(self._pa_array) return self._convert_bool_result(result) def _str_istitle(self): result = pc.utf8_is_title(self._pa_array) return self._convert_bool_result(result) def _str_isupper(self): result = pc.utf8_is_upper(self._pa_array) return self._convert_bool_result(result) def _str_contains(self, pat, case: bool=True, flags: int=0, na=None, regex: bool=True): if flags: raise NotImplementedError(f'contains not implemented with flags={flags!r}') if regex: pa_contains = pc.match_substring_regex else: pa_contains = pc.match_substring result = pa_contains(self._pa_array, pat, ignore_case=not case) if not isna(na): result = result.fill_null(na) return self._convert_bool_result(result) def _str_match(self, pat: str, case: bool=True, flags: int=0, na: Scalar | None=None): if not pat.startswith('^'): pat = f'^{pat}' return self._str_contains(pat, case, flags, na, regex=True) def _str_fullmatch(self, pat, case: bool=True, flags: int=0, na: Scalar | None=None): if not pat.endswith('$') or pat.endswith('\\$'): pat = f'{pat}$' return self._str_match(pat, case, flags, na) def _str_find(self, sub: str, start: int=0, end: int | None=None): if pa_version_under13p0 and (not (start != 0 and end is not None)) and (not (start == 0 and end is None)): res_list = self._apply_elementwise(lambda val: val.find(sub, start, end)) return self._convert_int_result(pa.chunked_array(res_list)) if (start == 0 or start is None) and end is None: result = pc.find_substring(self._pa_array, sub) else: if sub == '': res_list = self._apply_elementwise(lambda val: val.find(sub, start, end)) return self._convert_int_result(pa.chunked_array(res_list)) if start is None: start_offset = 0 start = 0 elif start < 0: start_offset = pc.add(start, pc.utf8_length(self._pa_array)) start_offset = pc.if_else(pc.less(start_offset, 0), 0, start_offset) else: start_offset = start slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end) result = pc.find_substring(slices, sub) found = pc.not_equal(result, pa.scalar(-1, type=result.type)) offset_result = pc.add(result, start_offset) result = pc.if_else(found, offset_result, -1) return self._convert_int_result(result) # File: pandas-main/pandas/core/arrays/_mixins.py from __future__ import annotations from functools import wraps from typing import TYPE_CHECKING, Any, Literal, cast, overload import numpy as np from pandas._libs import lib from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import is_supported_dtype from pandas._typing import ArrayLike, AxisInt, Dtype, F, FillnaOptions, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, Self, SequenceIndexer, Shape, TakeIndexer, npt from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import validate_bool_kwarg, validate_insert_loc from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, ExtensionDtype, PeriodDtype from pandas.core.dtypes.missing import array_equivalent from pandas.core import missing from pandas.core.algorithms import take, unique, value_counts_internal as value_counts from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.array_algos.transforms import shift from pandas.core.arrays.base import ExtensionArray from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer from pandas.core.sorting import nargminmax if TYPE_CHECKING: from collections.abc import Sequence from pandas._typing import NumpySorter, NumpyValueArrayLike from pandas import Series def ravel_compat(meth: F) -> F: @wraps(meth) def method(self, *args, **kwargs): if self.ndim == 1: return meth(self, *args, **kwargs) flags = self._ndarray.flags flat = self.ravel('K') result = meth(flat, *args, **kwargs) order = 'F' if flags.f_contiguous else 'C' return result.reshape(self.shape, order=order) return cast(F, method) class NDArrayBackedExtensionArray(NDArrayBacked, ExtensionArray): _ndarray: np.ndarray _internal_fill_value: Any def _box_func(self, x): return x def _validate_scalar(self, value): raise AbstractMethodError(self) def view(self, dtype: Dtype | None=None) -> ArrayLike: if dtype is None or dtype is self.dtype: return self._from_backing_data(self._ndarray) if isinstance(dtype, type): return self._ndarray.view(dtype) dtype = pandas_dtype(dtype) arr = self._ndarray if isinstance(dtype, PeriodDtype): cls = dtype.construct_array_type() return cls(arr.view('i8'), dtype=dtype) elif isinstance(dtype, DatetimeTZDtype): dt_cls = dtype.construct_array_type() dt64_values = arr.view(f'M8[{dtype.unit}]') return dt_cls._simple_new(dt64_values, dtype=dtype) elif lib.is_np_dtype(dtype, 'M') and is_supported_dtype(dtype): from pandas.core.arrays import DatetimeArray dt64_values = arr.view(dtype) return DatetimeArray._simple_new(dt64_values, dtype=dtype) elif lib.is_np_dtype(dtype, 'm') and is_supported_dtype(dtype): from pandas.core.arrays import TimedeltaArray td64_values = arr.view(dtype) return TimedeltaArray._simple_new(td64_values, dtype=dtype) return arr.view(dtype=dtype) def take(self, indices: TakeIndexer, *, allow_fill: bool=False, fill_value: Any=None, axis: AxisInt=0) -> Self: if allow_fill: fill_value = self._validate_scalar(fill_value) new_data = take(self._ndarray, indices, allow_fill=allow_fill, fill_value=fill_value, axis=axis) return self._from_backing_data(new_data) def equals(self, other) -> bool: if type(self) is not type(other): return False if self.dtype != other.dtype: return False return bool(array_equivalent(self._ndarray, other._ndarray, dtype_equal=True)) @classmethod def _from_factorized(cls, values, original): assert values.dtype == original._ndarray.dtype return original._from_backing_data(values) def _values_for_argsort(self) -> np.ndarray: return self._ndarray def _values_for_factorize(self): return (self._ndarray, self._internal_fill_value) def _hash_pandas_object(self, *, encoding: str, hash_key: str, categorize: bool) -> npt.NDArray[np.uint64]: from pandas.core.util.hashing import hash_array values = self._ndarray return hash_array(values, encoding=encoding, hash_key=hash_key, categorize=categorize) def argmin(self, axis: AxisInt=0, skipna: bool=True): validate_bool_kwarg(skipna, 'skipna') if not skipna and self._hasna: raise ValueError('Encountered an NA value with skipna=False') return nargminmax(self, 'argmin', axis=axis) def argmax(self, axis: AxisInt=0, skipna: bool=True): validate_bool_kwarg(skipna, 'skipna') if not skipna and self._hasna: raise ValueError('Encountered an NA value with skipna=False') return nargminmax(self, 'argmax', axis=axis) def unique(self) -> Self: new_data = unique(self._ndarray) return self._from_backing_data(new_data) @classmethod @doc(ExtensionArray._concat_same_type) def _concat_same_type(cls, to_concat: Sequence[Self], axis: AxisInt=0) -> Self: if not lib.dtypes_all_equal([x.dtype for x in to_concat]): dtypes = {str(x.dtype) for x in to_concat} raise ValueError('to_concat must have the same dtype', dtypes) return super()._concat_same_type(to_concat, axis=axis) @doc(ExtensionArray.searchsorted) def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: npvalue = self._validate_setitem_value(value) return self._ndarray.searchsorted(npvalue, side=side, sorter=sorter) @doc(ExtensionArray.shift) def shift(self, periods: int=1, fill_value=None) -> Self: axis = 0 fill_value = self._validate_scalar(fill_value) new_values = shift(self._ndarray, periods, axis, fill_value) return self._from_backing_data(new_values) def __setitem__(self, key, value) -> None: key = check_array_indexer(self, key) value = self._validate_setitem_value(value) self._ndarray[key] = value def _validate_setitem_value(self, value): return value @overload def __getitem__(self, key: ScalarIndexer) -> Any: ... @overload def __getitem__(self, key: SequenceIndexer | PositionalIndexerTuple) -> Self: ... def __getitem__(self, key: PositionalIndexer2D) -> Self | Any: if lib.is_integer(key): result = self._ndarray[key] if self.ndim == 1: return self._box_func(result) return self._from_backing_data(result) key = extract_array(key, extract_numpy=True) key = check_array_indexer(self, key) result = self._ndarray[key] if lib.is_scalar(result): return self._box_func(result) result = self._from_backing_data(result) return result def _pad_or_backfill(self, *, method: FillnaOptions, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, copy: bool=True) -> Self: mask = self.isna() if mask.any(): func = missing.get_fill_func(method, ndim=self.ndim) npvalues = self._ndarray.T if copy: npvalues = npvalues.copy() func(npvalues, limit=limit, limit_area=limit_area, mask=mask.T) npvalues = npvalues.T if copy: new_values = self._from_backing_data(npvalues) else: new_values = self elif copy: new_values = self.copy() else: new_values = self return new_values @doc(ExtensionArray.fillna) def fillna(self, value, limit: int | None=None, copy: bool=True) -> Self: mask = self.isna() if limit is not None and limit < len(self): modify = mask.cumsum() > limit if modify.any(): mask = mask.copy() mask[modify] = False value = missing.check_value_size(value, mask, len(self)) if mask.any(): if copy: new_values = self.copy() else: new_values = self[:] new_values[mask] = value else: self._validate_setitem_value(value) if not copy: new_values = self[:] else: new_values = self.copy() return new_values def _wrap_reduction_result(self, axis: AxisInt | None, result) -> Any: if axis is None or self.ndim == 1: return self._box_func(result) return self._from_backing_data(result) def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: value = self._validate_setitem_value(value) np.putmask(self._ndarray, mask, value) def _where(self: Self, mask: npt.NDArray[np.bool_], value) -> Self: value = self._validate_setitem_value(value) res_values = np.where(mask, self._ndarray, value) if res_values.dtype != self._ndarray.dtype: raise AssertionError('Something has gone wrong, please report a bug at github.com/pandas-dev/pandas/') return self._from_backing_data(res_values) def insert(self, loc: int, item) -> Self: loc = validate_insert_loc(loc, len(self)) code = self._validate_scalar(item) new_vals = np.concatenate((self._ndarray[:loc], np.asarray([code], dtype=self._ndarray.dtype), self._ndarray[loc:])) return self._from_backing_data(new_vals) def value_counts(self, dropna: bool=True) -> Series: if self.ndim != 1: raise NotImplementedError from pandas import Index, Series if dropna: values = self[~self.isna()]._ndarray else: values = self._ndarray result = value_counts(values, sort=False, dropna=dropna) index_arr = self._from_backing_data(np.asarray(result.index._data)) index = Index(index_arr, name=result.index.name) return Series(result._values, index=index, name=result.name, copy=False) def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: mask = np.asarray(self.isna()) arr = self._ndarray fill_value = self._internal_fill_value res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) if res_values.dtype == self._ndarray.dtype: return self._from_backing_data(res_values) else: return type(self)(res_values) @classmethod def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self: arr = cls._from_sequence([], dtype=dtype) backing = np.empty(shape, dtype=arr._ndarray.dtype) return arr._from_backing_data(backing) # File: pandas-main/pandas/core/arrays/_ranges.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs.lib import i8max from pandas._libs.tslibs import BaseOffset, OutOfBoundsDatetime, Timedelta, Timestamp, iNaT from pandas.core.construction import range_to_ndarray if TYPE_CHECKING: from pandas._typing import npt def generate_regular_range(start: Timestamp | Timedelta | None, end: Timestamp | Timedelta | None, periods: int | None, freq: BaseOffset, unit: str='ns') -> npt.NDArray[np.intp]: istart = start._value if start is not None else None iend = end._value if end is not None else None freq.nanos td = Timedelta(freq) b: int e: int try: td = td.as_unit(unit, round_ok=False) except ValueError as err: raise ValueError(f'freq={freq} is incompatible with unit={unit}. Use a lower freq or a higher unit instead.') from err stride = int(td._value) if periods is None and istart is not None and (iend is not None): b = istart e = b + (iend - b) // stride * stride + stride // 2 + 1 elif istart is not None and periods is not None: b = istart e = _generate_range_overflow_safe(b, periods, stride, side='start') elif iend is not None and periods is not None: e = iend + stride b = _generate_range_overflow_safe(e, periods, stride, side='end') else: raise ValueError("at least 'start' or 'end' should be specified if a 'period' is given.") return range_to_ndarray(range(b, e, stride)) def _generate_range_overflow_safe(endpoint: int, periods: int, stride: int, side: str='start') -> int: assert side in ['start', 'end'] i64max = np.uint64(i8max) msg = f'Cannot generate range with {side}={endpoint} and periods={periods}' with np.errstate(over='raise'): try: addend = np.uint64(periods) * np.uint64(np.abs(stride)) except FloatingPointError as err: raise OutOfBoundsDatetime(msg) from err if np.abs(addend) <= i64max: return _generate_range_overflow_safe_signed(endpoint, periods, stride, side) elif endpoint > 0 and side == 'start' and (stride > 0) or (endpoint < 0 < stride and side == 'end'): raise OutOfBoundsDatetime(msg) elif side == 'end' and endpoint - stride <= i64max < endpoint: return _generate_range_overflow_safe(endpoint - stride, periods - 1, stride, side) mid_periods = periods // 2 remaining = periods - mid_periods assert 0 < remaining < periods, (remaining, periods, endpoint, stride) midpoint = int(_generate_range_overflow_safe(endpoint, mid_periods, stride, side)) return _generate_range_overflow_safe(midpoint, remaining, stride, side) def _generate_range_overflow_safe_signed(endpoint: int, periods: int, stride: int, side: str) -> int: assert side in ['start', 'end'] if side == 'end': stride *= -1 with np.errstate(over='raise'): addend = np.int64(periods) * np.int64(stride) try: result = np.int64(endpoint) + addend if result == iNaT: raise OverflowError return int(result) except (FloatingPointError, OverflowError): pass assert stride > 0 and endpoint >= 0 or (stride < 0 and endpoint <= 0) if stride > 0: uresult = np.uint64(endpoint) + np.uint64(addend) i64max = np.uint64(i8max) assert uresult > i64max if uresult <= i64max + np.uint64(stride): return int(uresult) raise OutOfBoundsDatetime(f'Cannot generate range with {side}={endpoint} and periods={periods}') # File: pandas-main/pandas/core/arrays/_utils.py from __future__ import annotations from typing import TYPE_CHECKING, Any import numpy as np from pandas._libs import lib from pandas.errors import LossySetitemError from pandas.core.dtypes.cast import np_can_hold_element from pandas.core.dtypes.common import is_numeric_dtype if TYPE_CHECKING: from pandas._typing import ArrayLike, npt def to_numpy_dtype_inference(arr: ArrayLike, dtype: npt.DTypeLike | None, na_value, hasna: bool) -> tuple[npt.DTypeLike, Any]: if dtype is None and is_numeric_dtype(arr.dtype): dtype_given = False if hasna: if arr.dtype.kind == 'b': dtype = np.dtype(np.object_) else: if arr.dtype.kind in 'iu': dtype = np.dtype(np.float64) else: dtype = arr.dtype.numpy_dtype if na_value is lib.no_default: na_value = np.nan else: dtype = arr.dtype.numpy_dtype elif dtype is not None: dtype = np.dtype(dtype) dtype_given = True else: dtype_given = True if na_value is lib.no_default: if dtype is None or not hasna: na_value = arr.dtype.na_value elif dtype.kind == 'f': na_value = np.nan elif dtype.kind == 'M': na_value = np.datetime64('nat') elif dtype.kind == 'm': na_value = np.timedelta64('nat') else: na_value = arr.dtype.na_value if not dtype_given and hasna: try: np_can_hold_element(dtype, na_value) except LossySetitemError: dtype = np.dtype(np.object_) return (dtype, na_value) # File: pandas-main/pandas/core/arrays/arrow/_arrow_utils.py from __future__ import annotations import numpy as np import pyarrow def pyarrow_array_to_numpy_and_mask(arr, dtype: np.dtype) -> tuple[np.ndarray, np.ndarray]: dtype = np.dtype(dtype) if pyarrow.types.is_null(arr.type): data = np.empty(len(arr), dtype=dtype) mask = np.zeros(len(arr), dtype=bool) return (data, mask) buflist = arr.buffers() offset = arr.offset * dtype.itemsize length = len(arr) * dtype.itemsize data_buf = buflist[1][offset:offset + length] data = np.frombuffer(data_buf, dtype=dtype) bitmask = buflist[0] if bitmask is not None: mask = pyarrow.BooleanArray.from_buffers(pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset) mask = np.asarray(mask) else: mask = np.ones(len(arr), dtype=bool) return (data, mask) # File: pandas-main/pandas/core/arrays/arrow/accessors.py """""" from __future__ import annotations from abc import ABCMeta, abstractmethod from typing import TYPE_CHECKING, cast from pandas.compat import pa_version_under10p1, pa_version_under11p0 from pandas.core.dtypes.common import is_list_like if not pa_version_under10p1: import pyarrow as pa import pyarrow.compute as pc from pandas.core.dtypes.dtypes import ArrowDtype if TYPE_CHECKING: from collections.abc import Iterator from pandas import DataFrame, Series class ArrowAccessor(metaclass=ABCMeta): @abstractmethod def __init__(self, data, validation_msg: str) -> None: self._data = data self._validation_msg = validation_msg self._validate(data) @abstractmethod def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool: pass def _validate(self, data) -> None: dtype = data.dtype if not isinstance(dtype, ArrowDtype): raise AttributeError(self._validation_msg.format(dtype=dtype)) if not self._is_valid_pyarrow_dtype(dtype.pyarrow_dtype): raise AttributeError(self._validation_msg.format(dtype=dtype)) @property def _pa_array(self): return self._data.array._pa_array class ListAccessor(ArrowAccessor): def __init__(self, data=None) -> None: super().__init__(data, validation_msg="Can only use the '.list' accessor with 'list[pyarrow]' dtype, not {dtype}.") def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool: return pa.types.is_list(pyarrow_dtype) or pa.types.is_fixed_size_list(pyarrow_dtype) or pa.types.is_large_list(pyarrow_dtype) def len(self) -> Series: from pandas import Series value_lengths = pc.list_value_length(self._pa_array) return Series(value_lengths, dtype=ArrowDtype(value_lengths.type), index=self._data.index) def __getitem__(self, key: int | slice) -> Series: from pandas import Series if isinstance(key, int): element = pc.list_element(self._pa_array, key) return Series(element, dtype=ArrowDtype(element.type), index=self._data.index) elif isinstance(key, slice): if pa_version_under11p0: raise NotImplementedError(f'List slice not supported by pyarrow {pa.__version__}.') (start, stop, step) = (key.start, key.stop, key.step) if start is None: start = 0 if step is None: step = 1 sliced = pc.list_slice(self._pa_array, start, stop, step) return Series(sliced, dtype=ArrowDtype(sliced.type), index=self._data.index) else: raise ValueError(f'key must be an int or slice, got {type(key).__name__}') def __iter__(self) -> Iterator: raise TypeError(f"'{type(self).__name__}' object is not iterable") def flatten(self) -> Series: from pandas import Series counts = pa.compute.list_value_length(self._pa_array) flattened = pa.compute.list_flatten(self._pa_array) index = self._data.index.repeat(counts.fill_null(pa.scalar(0, counts.type))) return Series(flattened, dtype=ArrowDtype(flattened.type), index=index) class StructAccessor(ArrowAccessor): def __init__(self, data=None) -> None: super().__init__(data, validation_msg="Can only use the '.struct' accessor with 'struct[pyarrow]' dtype, not {dtype}.") def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool: return pa.types.is_struct(pyarrow_dtype) @property def dtypes(self) -> Series: from pandas import Index, Series pa_type = self._data.dtype.pyarrow_dtype types = [ArrowDtype(struct.type) for struct in pa_type] names = [struct.name for struct in pa_type] return Series(types, index=Index(names)) def field(self, name_or_index: list[str] | list[bytes] | list[int] | pc.Expression | bytes | str | int) -> Series: from pandas import Series def get_name(level_name_or_index: list[str] | list[bytes] | list[int] | pc.Expression | bytes | str | int, data: pa.ChunkedArray): if isinstance(level_name_or_index, int): name = data.type.field(level_name_or_index).name elif isinstance(level_name_or_index, (str, bytes)): name = level_name_or_index elif isinstance(level_name_or_index, pc.Expression): name = str(level_name_or_index) elif is_list_like(level_name_or_index): level_name_or_index = list(reversed(level_name_or_index)) selected = data while level_name_or_index: level_name_or_index = cast(list, level_name_or_index) name_or_index = level_name_or_index.pop() name = get_name(name_or_index, selected) selected = selected.type.field(selected.type.get_field_index(name)) name = selected.name else: raise ValueError('name_or_index must be an int, str, bytes, pyarrow.compute.Expression, or list of those') return name pa_arr = self._data.array._pa_array name = get_name(name_or_index, pa_arr) field_arr = pc.struct_field(pa_arr, name_or_index) return Series(field_arr, dtype=ArrowDtype(field_arr.type), index=self._data.index, name=name) def explode(self) -> DataFrame: from pandas import concat pa_type = self._pa_array.type return concat([self.field(i) for i in range(pa_type.num_fields)], axis='columns') # File: pandas-main/pandas/core/arrays/arrow/array.py from __future__ import annotations import functools import operator import re import textwrap from typing import TYPE_CHECKING, Any, Literal, cast, overload import unicodedata import numpy as np from pandas._libs import lib from pandas._libs.tslibs import Timedelta, Timestamp, timezones from pandas.compat import pa_version_under10p1, pa_version_under11p0, pa_version_under13p0 from pandas.util._decorators import doc from pandas.core.dtypes.cast import can_hold_element, infer_dtype_from_scalar from pandas.core.dtypes.common import CategoricalDtype, is_array_like, is_bool_dtype, is_float_dtype, is_integer, is_list_like, is_numeric_dtype, is_scalar, pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas.core import algorithms as algos, missing, ops, roperator from pandas.core.algorithms import map_array from pandas.core.arraylike import OpsMixin from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin from pandas.core.arrays._utils import to_numpy_dtype_inference from pandas.core.arrays.base import ExtensionArray, ExtensionArraySupportsAnyAll from pandas.core.arrays.masked import BaseMaskedArray from pandas.core.arrays.string_ import StringDtype import pandas.core.common as com from pandas.core.indexers import check_array_indexer, unpack_tuple_and_ellipses, validate_indices from pandas.core.strings.base import BaseStringArrayMethods from pandas.io._util import _arrow_dtype_mapping from pandas.tseries.frequencies import to_offset if not pa_version_under10p1: import pyarrow as pa import pyarrow.compute as pc from pandas.core.dtypes.dtypes import ArrowDtype ARROW_CMP_FUNCS = {'eq': pc.equal, 'ne': pc.not_equal, 'lt': pc.less, 'gt': pc.greater, 'le': pc.less_equal, 'ge': pc.greater_equal} ARROW_LOGICAL_FUNCS = {'and_': pc.and_kleene, 'rand_': lambda x, y: pc.and_kleene(y, x), 'or_': pc.or_kleene, 'ror_': lambda x, y: pc.or_kleene(y, x), 'xor': pc.xor, 'rxor': lambda x, y: pc.xor(y, x)} ARROW_BIT_WISE_FUNCS = {'and_': pc.bit_wise_and, 'rand_': lambda x, y: pc.bit_wise_and(y, x), 'or_': pc.bit_wise_or, 'ror_': lambda x, y: pc.bit_wise_or(y, x), 'xor': pc.bit_wise_xor, 'rxor': lambda x, y: pc.bit_wise_xor(y, x)} def cast_for_truediv(arrow_array: pa.ChunkedArray, pa_object: pa.Array | pa.Scalar) -> tuple[pa.ChunkedArray, pa.Array | pa.Scalar]: if pa.types.is_integer(arrow_array.type) and pa.types.is_integer(pa_object.type): return (pc.cast(arrow_array, pa.float64(), safe=False), pc.cast(pa_object, pa.float64(), safe=False)) return (arrow_array, pa_object) def floordiv_compat(left: pa.ChunkedArray | pa.Array | pa.Scalar, right: pa.ChunkedArray | pa.Array | pa.Scalar) -> pa.ChunkedArray: if pa.types.is_integer(left.type) and pa.types.is_integer(right.type): divided = pc.divide_checked(left, right) if pa.types.is_signed_integer(divided.type): has_remainder = pc.not_equal(pc.multiply(divided, right), left) has_one_negative_operand = pc.less(pc.bit_wise_xor(left, right), pa.scalar(0, type=divided.type)) result = pc.if_else(pc.and_(has_remainder, has_one_negative_operand), pc.subtract(divided, pa.scalar(1, type=divided.type)), divided) else: result = divided result = result.cast(left.type) else: divided = pc.divide(left, right) result = pc.floor(divided) return result ARROW_ARITHMETIC_FUNCS = {'add': pc.add_checked, 'radd': lambda x, y: pc.add_checked(y, x), 'sub': pc.subtract_checked, 'rsub': lambda x, y: pc.subtract_checked(y, x), 'mul': pc.multiply_checked, 'rmul': lambda x, y: pc.multiply_checked(y, x), 'truediv': lambda x, y: pc.divide(*cast_for_truediv(x, y)), 'rtruediv': lambda x, y: pc.divide(*cast_for_truediv(y, x)), 'floordiv': lambda x, y: floordiv_compat(x, y), 'rfloordiv': lambda x, y: floordiv_compat(y, x), 'mod': NotImplemented, 'rmod': NotImplemented, 'divmod': NotImplemented, 'rdivmod': NotImplemented, 'pow': pc.power_checked, 'rpow': lambda x, y: pc.power_checked(y, x)} if TYPE_CHECKING: from collections.abc import Callable, Sequence from pandas._libs.missing import NAType from pandas._typing import ArrayLike, AxisInt, Dtype, FillnaOptions, InterpolateOptions, Iterator, NpDtype, NumpySorter, NumpyValueArrayLike, PositionalIndexer, Scalar, Self, SortKind, TakeIndexer, TimeAmbiguous, TimeNonexistent, npt from pandas.core.dtypes.dtypes import ExtensionDtype from pandas import Series from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.timedeltas import TimedeltaArray def get_unit_from_pa_dtype(pa_dtype) -> str: if pa_version_under11p0: unit = str(pa_dtype).split('[', 1)[-1][:-1] if unit not in ['s', 'ms', 'us', 'ns']: raise ValueError(pa_dtype) return unit return pa_dtype.unit def to_pyarrow_type(dtype: ArrowDtype | pa.DataType | Dtype | None) -> pa.DataType | None: if isinstance(dtype, ArrowDtype): return dtype.pyarrow_dtype elif isinstance(dtype, pa.DataType): return dtype elif isinstance(dtype, DatetimeTZDtype): return pa.timestamp(dtype.unit, dtype.tz) elif dtype: try: return pa.from_numpy_dtype(dtype) except pa.ArrowNotImplementedError: pass return None class ArrowExtensionArray(OpsMixin, ExtensionArraySupportsAnyAll, ArrowStringArrayMixin, BaseStringArrayMethods): _pa_array: pa.ChunkedArray _dtype: ArrowDtype def __init__(self, values: pa.Array | pa.ChunkedArray) -> None: if pa_version_under10p1: msg = 'pyarrow>=10.0.1 is required for PyArrow backed ArrowExtensionArray.' raise ImportError(msg) if isinstance(values, pa.Array): self._pa_array = pa.chunked_array([values]) elif isinstance(values, pa.ChunkedArray): self._pa_array = values else: raise ValueError(f"Unsupported type '{type(values)}' for ArrowExtensionArray") self._dtype = ArrowDtype(self._pa_array.type) @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: pa_type = to_pyarrow_type(dtype) pa_array = cls._box_pa_array(scalars, pa_type=pa_type, copy=copy) arr = cls(pa_array) return arr @classmethod def _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy: bool=False) -> Self: pa_type = to_pyarrow_type(dtype) if pa_type is None or pa.types.is_binary(pa_type) or pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type): scalars = strings elif pa.types.is_timestamp(pa_type): from pandas.core.tools.datetimes import to_datetime scalars = to_datetime(strings, errors='raise') elif pa.types.is_date(pa_type): from pandas.core.tools.datetimes import to_datetime scalars = to_datetime(strings, errors='raise').date elif pa.types.is_duration(pa_type): from pandas.core.tools.timedeltas import to_timedelta scalars = to_timedelta(strings, errors='raise') if pa_type.unit != 'ns': mask = isna(scalars) if not isinstance(strings, (pa.Array, pa.ChunkedArray)): strings = pa.array(strings, type=pa.string(), from_pandas=True) strings = pc.if_else(mask, None, strings) try: scalars = strings.cast(pa.int64()) except pa.ArrowInvalid: pass elif pa.types.is_time(pa_type): from pandas.core.tools.times import to_time scalars = to_time(strings, errors='coerce') elif pa.types.is_boolean(pa_type): if isinstance(strings, (pa.Array, pa.ChunkedArray)): scalars = strings else: scalars = pa.array(strings, type=pa.string(), from_pandas=True) scalars = pc.if_else(pc.equal(scalars, '1.0'), '1', scalars) scalars = pc.if_else(pc.equal(scalars, '0.0'), '0', scalars) scalars = scalars.cast(pa.bool_()) elif pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_decimal(pa_type): from pandas.core.tools.numeric import to_numeric scalars = to_numeric(strings, errors='raise') else: raise NotImplementedError(f'Converting strings to {pa_type} is not implemented.') return cls._from_sequence(scalars, dtype=pa_type, copy=copy) @classmethod def _box_pa(cls, value, pa_type: pa.DataType | None=None) -> pa.Array | pa.ChunkedArray | pa.Scalar: if isinstance(value, pa.Scalar) or not is_list_like(value): return cls._box_pa_scalar(value, pa_type) return cls._box_pa_array(value, pa_type) @classmethod def _box_pa_scalar(cls, value, pa_type: pa.DataType | None=None) -> pa.Scalar: if isinstance(value, pa.Scalar): pa_scalar = value elif isna(value): pa_scalar = pa.scalar(None, type=pa_type) else: if isinstance(value, Timedelta): if pa_type is None: pa_type = pa.duration(value.unit) elif value.unit != pa_type.unit: value = value.as_unit(pa_type.unit) value = value._value elif isinstance(value, Timestamp): if pa_type is None: pa_type = pa.timestamp(value.unit, tz=value.tz) elif value.unit != pa_type.unit: value = value.as_unit(pa_type.unit) value = value._value pa_scalar = pa.scalar(value, type=pa_type, from_pandas=True) if pa_type is not None and pa_scalar.type != pa_type: pa_scalar = pa_scalar.cast(pa_type) return pa_scalar @classmethod def _box_pa_array(cls, value, pa_type: pa.DataType | None=None, copy: bool=False) -> pa.Array | pa.ChunkedArray: if isinstance(value, cls): pa_array = value._pa_array elif isinstance(value, (pa.Array, pa.ChunkedArray)): pa_array = value elif isinstance(value, BaseMaskedArray): if copy: value = value.copy() pa_array = value.__arrow_array__() else: if isinstance(value, np.ndarray) and pa_type is not None and (pa.types.is_large_binary(pa_type) or pa.types.is_large_string(pa_type)): value = value.tolist() elif copy and is_array_like(value): value = value.copy() if pa_type is not None and pa.types.is_duration(pa_type) and (not isinstance(value, np.ndarray) or value.dtype.kind not in 'mi'): from pandas.core.tools.timedeltas import to_timedelta value = to_timedelta(value, unit=pa_type.unit).as_unit(pa_type.unit) value = value.to_numpy() try: pa_array = pa.array(value, type=pa_type, from_pandas=True) except (pa.ArrowInvalid, pa.ArrowTypeError): pa_array = pa.array(value, from_pandas=True) if pa_type is None and pa.types.is_duration(pa_array.type): from pandas.core.tools.timedeltas import to_timedelta value = to_timedelta(value) value = value.to_numpy() pa_array = pa.array(value, type=pa_type, from_pandas=True) if pa.types.is_duration(pa_array.type) and pa_array.null_count > 0: arr = cls(pa_array) arr = arr.fillna(arr.dtype.na_value) pa_array = arr._pa_array if pa_type is not None and pa_array.type != pa_type: if pa.types.is_dictionary(pa_type): pa_array = pa_array.dictionary_encode() if pa_array.type != pa_type: pa_array = pa_array.cast(pa_type) else: try: pa_array = pa_array.cast(pa_type) except (pa.ArrowNotImplementedError, pa.ArrowTypeError): if pa.types.is_string(pa_array.type) or pa.types.is_large_string(pa_array.type): dtype = ArrowDtype(pa_type) return cls._from_sequence_of_strings(value, dtype=dtype)._pa_array else: raise return pa_array def __getitem__(self, item: PositionalIndexer): item = check_array_indexer(self, item) if isinstance(item, np.ndarray): if not len(item): if isinstance(self._dtype, StringDtype) and self._dtype.storage == 'pyarrow': pa_dtype = pa.string() else: pa_dtype = self._dtype.pyarrow_dtype return type(self)(pa.chunked_array([], type=pa_dtype)) elif item.dtype.kind in 'iu': return self.take(item) elif item.dtype.kind == 'b': return type(self)(self._pa_array.filter(item)) else: raise IndexError('Only integers, slices and integer or boolean arrays are valid indices.') elif isinstance(item, tuple): item = unpack_tuple_and_ellipses(item) if item is Ellipsis: item = slice(None) if is_scalar(item) and (not is_integer(item)): raise IndexError('only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices') if isinstance(item, slice): if item.start == item.stop: pass elif item.stop is not None and item.stop < -len(self) and (item.step is not None) and (item.step < 0): item = slice(item.start, None, item.step) value = self._pa_array[item] if isinstance(value, pa.ChunkedArray): return type(self)(value) else: pa_type = self._pa_array.type scalar = value.as_py() if scalar is None: return self._dtype.na_value elif pa.types.is_timestamp(pa_type) and pa_type.unit != 'ns': return Timestamp(scalar).as_unit(pa_type.unit) elif pa.types.is_duration(pa_type) and pa_type.unit != 'ns': return Timedelta(scalar).as_unit(pa_type.unit) else: return scalar def __iter__(self) -> Iterator[Any]: na_value = self._dtype.na_value pa_type = self._pa_array.type box_timestamp = pa.types.is_timestamp(pa_type) and pa_type.unit != 'ns' box_timedelta = pa.types.is_duration(pa_type) and pa_type.unit != 'ns' for value in self._pa_array: val = value.as_py() if val is None: yield na_value elif box_timestamp: yield Timestamp(val).as_unit(pa_type.unit) elif box_timedelta: yield Timedelta(val).as_unit(pa_type.unit) else: yield val def __arrow_array__(self, type=None): return self._pa_array def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: return self.to_numpy(dtype=dtype) def __invert__(self) -> Self: if pa.types.is_integer(self._pa_array.type): return type(self)(pc.bit_wise_not(self._pa_array)) elif pa.types.is_string(self._pa_array.type) or pa.types.is_large_string(self._pa_array.type): raise TypeError('__invert__ is not supported for string dtypes') else: return type(self)(pc.invert(self._pa_array)) def __neg__(self) -> Self: try: return type(self)(pc.negate_checked(self._pa_array)) except pa.ArrowNotImplementedError as err: raise TypeError(f"unary '-' not supported for dtype '{self.dtype}'") from err def __pos__(self) -> Self: return type(self)(self._pa_array) def __abs__(self) -> Self: return type(self)(pc.abs_checked(self._pa_array)) def __getstate__(self): state = self.__dict__.copy() state['_pa_array'] = self._pa_array.combine_chunks() return state def __setstate__(self, state) -> None: if '_data' in state: data = state.pop('_data') else: data = state['_pa_array'] state['_pa_array'] = pa.chunked_array(data) self.__dict__.update(state) def _cmp_method(self, other, op) -> ArrowExtensionArray: pc_func = ARROW_CMP_FUNCS[op.__name__] if isinstance(other, (ArrowExtensionArray, np.ndarray, list, BaseMaskedArray)) or isinstance(getattr(other, 'dtype', None), CategoricalDtype): try: result = pc_func(self._pa_array, self._box_pa(other)) except pa.ArrowNotImplementedError: result = ops.invalid_comparison(self, other, op) result = pa.array(result, type=pa.bool_()) elif is_scalar(other): try: result = pc_func(self._pa_array, self._box_pa(other)) except (pa.lib.ArrowNotImplementedError, pa.lib.ArrowInvalid): mask = isna(self) | isna(other) valid = ~mask result = np.zeros(len(self), dtype='bool') np_array = np.array(self) try: result[valid] = op(np_array[valid], other) except TypeError: result = ops.invalid_comparison(np_array, other, op) result = pa.array(result, type=pa.bool_()) result = pc.if_else(valid, result, None) else: raise NotImplementedError(f'{op.__name__} not implemented for {type(other)}') return ArrowExtensionArray(result) def _op_method_error_message(self, other, op) -> str: if hasattr(other, 'dtype'): other_type = f"dtype '{other.dtype}'" else: other_type = f'object of type {type(other)}' return f"operation '{op.__name__}' not supported for dtype '{self.dtype}' with {other_type}" def _evaluate_op_method(self, other, op, arrow_funcs) -> Self: pa_type = self._pa_array.type other_original = other other = self._box_pa(other) if pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type) or pa.types.is_binary(pa_type): if op in [operator.add, roperator.radd]: sep = pa.scalar('', type=pa_type) try: if op is operator.add: result = pc.binary_join_element_wise(self._pa_array, other, sep) elif op is roperator.radd: result = pc.binary_join_element_wise(other, self._pa_array, sep) except pa.ArrowNotImplementedError as err: raise TypeError(self._op_method_error_message(other_original, op)) from err return type(self)(result) elif op in [operator.mul, roperator.rmul]: binary = self._pa_array integral = other if not pa.types.is_integer(integral.type): raise TypeError('Can only string multiply by an integer.') pa_integral = pc.if_else(pc.less(integral, 0), 0, integral) result = pc.binary_repeat(binary, pa_integral) return type(self)(result) elif (pa.types.is_string(other.type) or pa.types.is_binary(other.type) or pa.types.is_large_string(other.type)) and op in [operator.mul, roperator.rmul]: binary = other integral = self._pa_array if not pa.types.is_integer(integral.type): raise TypeError('Can only string multiply by an integer.') pa_integral = pc.if_else(pc.less(integral, 0), 0, integral) result = pc.binary_repeat(binary, pa_integral) return type(self)(result) if isinstance(other, pa.Scalar) and pc.is_null(other).as_py() and (op.__name__ in ARROW_LOGICAL_FUNCS): other = other.cast(pa_type) pc_func = arrow_funcs[op.__name__] if pc_func is NotImplemented: if pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type): raise TypeError(self._op_method_error_message(other_original, op)) raise NotImplementedError(f'{op.__name__} not implemented.') try: result = pc_func(self._pa_array, other) except pa.ArrowNotImplementedError as err: raise TypeError(self._op_method_error_message(other_original, op)) from err return type(self)(result) def _logical_method(self, other, op) -> Self: if pa.types.is_integer(self._pa_array.type): return self._evaluate_op_method(other, op, ARROW_BIT_WISE_FUNCS) else: return self._evaluate_op_method(other, op, ARROW_LOGICAL_FUNCS) def _arith_method(self, other, op) -> Self: return self._evaluate_op_method(other, op, ARROW_ARITHMETIC_FUNCS) def equals(self, other) -> bool: if not isinstance(other, ArrowExtensionArray): return False return self._pa_array == other._pa_array @property def dtype(self) -> ArrowDtype: return self._dtype @property def nbytes(self) -> int: return self._pa_array.nbytes def __len__(self) -> int: return len(self._pa_array) def __contains__(self, key) -> bool: if isna(key) and key is not self.dtype.na_value: if self.dtype.kind == 'f' and lib.is_float(key): return pc.any(pc.is_nan(self._pa_array)).as_py() return False return bool(super().__contains__(key)) @property def _hasna(self) -> bool: return self._pa_array.null_count > 0 def isna(self) -> npt.NDArray[np.bool_]: null_count = self._pa_array.null_count if null_count == 0: return np.zeros(len(self), dtype=np.bool_) elif null_count == len(self): return np.ones(len(self), dtype=np.bool_) return self._pa_array.is_null().to_numpy() @overload def any(self, *, skipna: Literal[True]=..., **kwargs) -> bool: ... @overload def any(self, *, skipna: bool, **kwargs) -> bool | NAType: ... def any(self, *, skipna: bool=True, **kwargs) -> bool | NAType: return self._reduce('any', skipna=skipna, **kwargs) @overload def all(self, *, skipna: Literal[True]=..., **kwargs) -> bool: ... @overload def all(self, *, skipna: bool, **kwargs) -> bool | NAType: ... def all(self, *, skipna: bool=True, **kwargs) -> bool | NAType: return self._reduce('all', skipna=skipna, **kwargs) def argsort(self, *, ascending: bool=True, kind: SortKind='quicksort', na_position: str='last', **kwargs) -> np.ndarray: order = 'ascending' if ascending else 'descending' null_placement = {'last': 'at_end', 'first': 'at_start'}.get(na_position, None) if null_placement is None: raise ValueError(f'invalid na_position: {na_position}') result = pc.array_sort_indices(self._pa_array, order=order, null_placement=null_placement) np_result = result.to_numpy() return np_result.astype(np.intp, copy=False) def _argmin_max(self, skipna: bool, method: str) -> int: if self._pa_array.length() in (0, self._pa_array.null_count) or (self._hasna and (not skipna)): return getattr(super(), f'arg{method}')(skipna=skipna) data = self._pa_array if pa.types.is_duration(data.type): data = data.cast(pa.int64()) value = getattr(pc, method)(data, skip_nulls=skipna) return pc.index(data, value).as_py() def argmin(self, skipna: bool=True) -> int: return self._argmin_max(skipna, 'min') def argmax(self, skipna: bool=True) -> int: return self._argmin_max(skipna, 'max') def copy(self) -> Self: return type(self)(self._pa_array) def dropna(self) -> Self: return type(self)(pc.drop_null(self._pa_array)) def _pad_or_backfill(self, *, method: FillnaOptions, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, copy: bool=True) -> Self: if not self._hasna: return self if limit is None and limit_area is None: method = missing.clean_fill_method(method) try: if method == 'pad': return type(self)(pc.fill_null_forward(self._pa_array)) elif method == 'backfill': return type(self)(pc.fill_null_backward(self._pa_array)) except pa.ArrowNotImplementedError: pass return super()._pad_or_backfill(method=method, limit=limit, limit_area=limit_area, copy=copy) @doc(ExtensionArray.fillna) def fillna(self, value: object | ArrayLike, limit: int | None=None, copy: bool=True) -> Self: if not self._hasna: return self.copy() if limit is not None: return super().fillna(value=value, limit=limit, copy=copy) if isinstance(value, (np.ndarray, ExtensionArray)): if len(value) != len(self): raise ValueError(f"Length of 'value' does not match. Got ({len(value)}) expected {len(self)}") try: fill_value = self._box_pa(value, pa_type=self._pa_array.type) except pa.ArrowTypeError as err: msg = f"Invalid value '{value!s}' for dtype {self.dtype}" raise TypeError(msg) from err try: return type(self)(pc.fill_null(self._pa_array, fill_value=fill_value)) except pa.ArrowNotImplementedError: pass return super().fillna(value=value, limit=limit, copy=copy) def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: if not len(values): return np.zeros(len(self), dtype=bool) result = pc.is_in(self._pa_array, value_set=pa.array(values, from_pandas=True)) return np.array(result, dtype=np.bool_) def _values_for_factorize(self) -> tuple[np.ndarray, Any]: values = self._pa_array.to_numpy() return (values, self.dtype.na_value) @doc(ExtensionArray.factorize) def factorize(self, use_na_sentinel: bool=True) -> tuple[np.ndarray, ExtensionArray]: null_encoding = 'mask' if use_na_sentinel else 'encode' data = self._pa_array pa_type = data.type if pa_version_under11p0 and pa.types.is_duration(pa_type): data = data.cast(pa.int64()) if pa.types.is_dictionary(data.type): encoded = data else: encoded = data.dictionary_encode(null_encoding=null_encoding) if encoded.length() == 0: indices = np.array([], dtype=np.intp) uniques = type(self)(pa.chunked_array([], type=encoded.type.value_type)) else: combined = encoded.combine_chunks() pa_indices = combined.indices if pa_indices.null_count > 0: pa_indices = pc.fill_null(pa_indices, -1) indices = pa_indices.to_numpy(zero_copy_only=False, writable=True).astype(np.intp, copy=False) uniques = type(self)(combined.dictionary) if pa_version_under11p0 and pa.types.is_duration(pa_type): uniques = cast(ArrowExtensionArray, uniques.astype(self.dtype)) return (indices, uniques) def reshape(self, *args, **kwargs): raise NotImplementedError(f'{type(self)} does not support reshape as backed by a 1D pyarrow.ChunkedArray.') def round(self, decimals: int=0, *args, **kwargs) -> Self: return type(self)(pc.round(self._pa_array, ndigits=decimals)) @doc(ExtensionArray.searchsorted) def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: if self._hasna: raise ValueError('searchsorted requires array to be sorted, which is impossible with NAs present.') if isinstance(value, ExtensionArray): value = value.astype(object) dtype = None if isinstance(self.dtype, ArrowDtype): pa_dtype = self.dtype.pyarrow_dtype if (pa.types.is_timestamp(pa_dtype) or pa.types.is_duration(pa_dtype)) and pa_dtype.unit == 'ns': dtype = object return self.to_numpy(dtype=dtype).searchsorted(value, side=side, sorter=sorter) def take(self, indices: TakeIndexer, allow_fill: bool=False, fill_value: Any=None) -> ArrowExtensionArray: indices_array = np.asanyarray(indices) if len(self._pa_array) == 0 and (indices_array >= 0).any(): raise IndexError('cannot do a non-empty take') if indices_array.size > 0 and indices_array.max() >= len(self._pa_array): raise IndexError("out of bounds value in 'indices'.") if allow_fill: fill_mask = indices_array < 0 if fill_mask.any(): validate_indices(indices_array, len(self._pa_array)) indices_array = pa.array(indices_array, mask=fill_mask) result = self._pa_array.take(indices_array) if isna(fill_value): return type(self)(result) result = type(self)(result) result[fill_mask] = fill_value return result else: return type(self)(self._pa_array.take(indices)) else: if (indices_array < 0).any(): indices_array = np.copy(indices_array) indices_array[indices_array < 0] += len(self._pa_array) return type(self)(self._pa_array.take(indices_array)) def _maybe_convert_datelike_array(self): pa_type = self._pa_array.type if pa.types.is_timestamp(pa_type): return self._to_datetimearray() elif pa.types.is_duration(pa_type): return self._to_timedeltaarray() return self def _to_datetimearray(self) -> DatetimeArray: from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype pa_type = self._pa_array.type assert pa.types.is_timestamp(pa_type) np_dtype = np.dtype(f'M8[{pa_type.unit}]') dtype = tz_to_dtype(pa_type.tz, pa_type.unit) np_array = self._pa_array.to_numpy() np_array = np_array.astype(np_dtype) return DatetimeArray._simple_new(np_array, dtype=dtype) def _to_timedeltaarray(self) -> TimedeltaArray: from pandas.core.arrays.timedeltas import TimedeltaArray pa_type = self._pa_array.type assert pa.types.is_duration(pa_type) np_dtype = np.dtype(f'm8[{pa_type.unit}]') np_array = self._pa_array.to_numpy() np_array = np_array.astype(np_dtype) return TimedeltaArray._simple_new(np_array, dtype=np_dtype) def _values_for_json(self) -> np.ndarray: if is_numeric_dtype(self.dtype): return np.asarray(self, dtype=object) return super()._values_for_json() @doc(ExtensionArray.to_numpy) def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray: original_na_value = na_value (dtype, na_value) = to_numpy_dtype_inference(self, dtype, na_value, self._hasna) pa_type = self._pa_array.type if not self._hasna or isna(na_value) or pa.types.is_null(pa_type): data = self else: data = self.fillna(na_value) copy = False if pa.types.is_timestamp(pa_type) or pa.types.is_duration(pa_type): if dtype != object and na_value is self.dtype.na_value: na_value = lib.no_default result = data._maybe_convert_datelike_array().to_numpy(dtype=dtype, na_value=na_value) elif pa.types.is_time(pa_type) or pa.types.is_date(pa_type): result = np.array(list(data), dtype=dtype) if data._hasna: result[data.isna()] = na_value elif pa.types.is_null(pa_type): if dtype is not None and isna(na_value): na_value = None result = np.full(len(data), fill_value=na_value, dtype=dtype) elif not data._hasna or (pa.types.is_floating(pa_type) and (na_value is np.nan or (original_na_value is lib.no_default and is_float_dtype(dtype)))): result = data._pa_array.to_numpy() if dtype is not None: result = result.astype(dtype, copy=False) if copy: result = result.copy() else: if dtype is None: empty = pa.array([], type=pa_type).to_numpy(zero_copy_only=False) if can_hold_element(empty, na_value): dtype = empty.dtype else: dtype = np.object_ result = np.empty(len(data), dtype=dtype) mask = data.isna() result[mask] = na_value result[~mask] = data[~mask]._pa_array.to_numpy() return result def map(self, mapper, na_action: Literal['ignore'] | None=None): if is_numeric_dtype(self.dtype): return map_array(self.to_numpy(), mapper, na_action=na_action) else: return super().map(mapper, na_action) @doc(ExtensionArray.duplicated) def duplicated(self, keep: Literal['first', 'last', False]='first') -> npt.NDArray[np.bool_]: pa_type = self._pa_array.type if pa.types.is_floating(pa_type) or pa.types.is_integer(pa_type): values = self.to_numpy(na_value=0) elif pa.types.is_boolean(pa_type): values = self.to_numpy(na_value=False) elif pa.types.is_temporal(pa_type): if pa_type.bit_width == 32: pa_type = pa.int32() else: pa_type = pa.int64() arr = self.astype(ArrowDtype(pa_type)) values = arr.to_numpy(na_value=0) else: values = self.factorize()[0] mask = self.isna() if self._hasna else None return algos.duplicated(values, keep=keep, mask=mask) def unique(self) -> Self: pa_type = self._pa_array.type if pa_version_under11p0 and pa.types.is_duration(pa_type): data = self._pa_array.cast(pa.int64()) else: data = self._pa_array pa_result = pc.unique(data) if pa_version_under11p0 and pa.types.is_duration(pa_type): pa_result = pa_result.cast(pa_type) return type(self)(pa_result) def value_counts(self, dropna: bool=True) -> Series: pa_type = self._pa_array.type if pa_version_under11p0 and pa.types.is_duration(pa_type): data = self._pa_array.cast(pa.int64()) else: data = self._pa_array from pandas import Index, Series vc = data.value_counts() values = vc.field(0) counts = vc.field(1) if dropna and data.null_count > 0: mask = values.is_valid() values = values.filter(mask) counts = counts.filter(mask) if pa_version_under11p0 and pa.types.is_duration(pa_type): values = values.cast(pa_type) counts = ArrowExtensionArray(counts) index = Index(type(self)(values)) return Series(counts, index=index, name='count', copy=False) @classmethod def _concat_same_type(cls, to_concat) -> Self: chunks = [array for ea in to_concat for array in ea._pa_array.iterchunks()] if to_concat[0].dtype == 'string': pa_dtype = pa.large_string() else: pa_dtype = to_concat[0].dtype.pyarrow_dtype arr = pa.chunked_array(chunks, type=pa_dtype) return cls(arr) def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> ArrowExtensionArray | ExtensionArray: pyarrow_name = {'cummax': 'cumulative_max', 'cummin': 'cumulative_min', 'cumprod': 'cumulative_prod_checked', 'cumsum': 'cumulative_sum_checked'}.get(name, name) pyarrow_meth = getattr(pc, pyarrow_name, None) if pyarrow_meth is None: return super()._accumulate(name, skipna=skipna, **kwargs) data_to_accum = self._pa_array pa_dtype = data_to_accum.type convert_to_int = pa.types.is_temporal(pa_dtype) and name in ['cummax', 'cummin'] or (pa.types.is_duration(pa_dtype) and name == 'cumsum') if convert_to_int: if pa_dtype.bit_width == 32: data_to_accum = data_to_accum.cast(pa.int32()) else: data_to_accum = data_to_accum.cast(pa.int64()) result = pyarrow_meth(data_to_accum, skip_nulls=skipna, **kwargs) if convert_to_int: result = result.cast(pa_dtype) return type(self)(result) def _reduce_pyarrow(self, name: str, *, skipna: bool=True, **kwargs) -> pa.Scalar: pa_type = self._pa_array.type data_to_reduce = self._pa_array cast_kwargs = {} if pa_version_under13p0 else {'safe': False} if name in ['any', 'all'] and (pa.types.is_integer(pa_type) or pa.types.is_floating(pa_type) or pa.types.is_duration(pa_type) or pa.types.is_decimal(pa_type)): if pa.types.is_duration(pa_type): data_to_cmp = self._pa_array.cast(pa.int64()) else: data_to_cmp = self._pa_array not_eq = pc.not_equal(data_to_cmp, 0) data_to_reduce = not_eq elif name in ['min', 'max', 'sum'] and pa.types.is_duration(pa_type): data_to_reduce = self._pa_array.cast(pa.int64()) elif name in ['median', 'mean', 'std', 'sem'] and pa.types.is_temporal(pa_type): nbits = pa_type.bit_width if nbits == 32: data_to_reduce = self._pa_array.cast(pa.int32()) else: data_to_reduce = self._pa_array.cast(pa.int64()) if name == 'sem': def pyarrow_meth(data, skip_nulls, **kwargs): numerator = pc.stddev(data, skip_nulls=skip_nulls, **kwargs) denominator = pc.sqrt_checked(pc.count(self._pa_array)) return pc.divide_checked(numerator, denominator) else: pyarrow_name = {'median': 'quantile', 'prod': 'product', 'std': 'stddev', 'var': 'variance'}.get(name, name) pyarrow_meth = getattr(pc, pyarrow_name, None) if pyarrow_meth is None: return super()._reduce(name, skipna=skipna, **kwargs) if name in ['any', 'all'] and 'min_count' not in kwargs: kwargs['min_count'] = 0 elif name == 'median': kwargs['q'] = 0.5 try: result = pyarrow_meth(data_to_reduce, skip_nulls=skipna, **kwargs) except (AttributeError, NotImplementedError, TypeError) as err: msg = f"'{type(self).__name__}' with dtype {self.dtype} does not support operation '{name}' with pyarrow version {pa.__version__}. '{name}' may be supported by upgrading pyarrow." raise TypeError(msg) from err if name == 'median': result = result[0] if name in ['min', 'max', 'sum'] and pa.types.is_duration(pa_type): result = result.cast(pa_type) if name in ['median', 'mean'] and pa.types.is_temporal(pa_type): if not pa_version_under13p0: nbits = pa_type.bit_width if nbits == 32: result = result.cast(pa.int32(), **cast_kwargs) else: result = result.cast(pa.int64(), **cast_kwargs) result = result.cast(pa_type) if name in ['std', 'sem'] and pa.types.is_temporal(pa_type): result = result.cast(pa.int64(), **cast_kwargs) if pa.types.is_duration(pa_type): result = result.cast(pa_type) elif pa.types.is_time(pa_type): unit = get_unit_from_pa_dtype(pa_type) result = result.cast(pa.duration(unit)) elif pa.types.is_date(pa_type): result = result.cast(pa.duration('s')) else: result = result.cast(pa.duration(pa_type.unit)) return result def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs) if isinstance(result, pa.Array): return type(self)(result) else: return result def _reduce_calc(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): pa_result = self._reduce_pyarrow(name, skipna=skipna, **kwargs) if keepdims: if isinstance(pa_result, pa.Scalar): result = pa.array([pa_result.as_py()], type=pa_result.type) else: result = pa.array([pa_result], type=to_pyarrow_type(infer_dtype_from_scalar(pa_result)[0])) return result if pc.is_null(pa_result).as_py(): return self.dtype.na_value elif isinstance(pa_result, pa.Scalar): return pa_result.as_py() else: return pa_result def _explode(self): if not pa.types.is_list(self.dtype.pyarrow_dtype): return super()._explode() values = self counts = pa.compute.list_value_length(values._pa_array) counts = counts.fill_null(1).to_numpy() fill_value = pa.scalar([None], type=self._pa_array.type) mask = counts == 0 if mask.any(): values = values.copy() values[mask] = fill_value counts = counts.copy() counts[mask] = 1 values = values.fillna(fill_value) values = type(self)(pa.compute.list_flatten(values._pa_array)) return (values, counts) def __setitem__(self, key, value) -> None: if isinstance(key, tuple) and len(key) == 1: key = key[0] key = check_array_indexer(self, key) value = self._maybe_convert_setitem_value(value) if com.is_null_slice(key): data = self._if_else(True, value, self._pa_array) elif is_integer(key): key = cast(int, key) n = len(self) if key < 0: key += n if not 0 <= key < n: raise IndexError(f'index {key} is out of bounds for axis 0 with size {n}') if isinstance(value, pa.Scalar): value = value.as_py() elif is_list_like(value): raise ValueError('Length of indexer and values mismatch') chunks = [*self._pa_array[:key].chunks, pa.array([value], type=self._pa_array.type, from_pandas=True), *self._pa_array[key + 1:].chunks] data = pa.chunked_array(chunks).combine_chunks() elif is_bool_dtype(key): key = np.asarray(key, dtype=np.bool_) data = self._replace_with_mask(self._pa_array, key, value) elif is_scalar(value) or isinstance(value, pa.Scalar): mask = np.zeros(len(self), dtype=np.bool_) mask[key] = True data = self._if_else(mask, value, self._pa_array) else: indices = np.arange(len(self))[key] if len(indices) != len(value): raise ValueError('Length of indexer and values mismatch') if len(indices) == 0: return (_, argsort) = np.unique(indices, return_index=True) indices = indices[argsort] value = value.take(argsort) mask = np.zeros(len(self), dtype=np.bool_) mask[indices] = True data = self._replace_with_mask(self._pa_array, mask, value) if isinstance(data, pa.Array): data = pa.chunked_array([data]) self._pa_array = data def _rank_calc(self, *, axis: AxisInt=0, method: str='average', na_option: str='keep', ascending: bool=True, pct: bool=False): if axis != 0: ranked = super()._rank(axis=axis, method=method, na_option=na_option, ascending=ascending, pct=pct) if method == 'average' or pct: pa_type = pa.float64() else: pa_type = pa.uint64() result = pa.array(ranked, type=pa_type, from_pandas=True) return result data = self._pa_array.combine_chunks() sort_keys = 'ascending' if ascending else 'descending' null_placement = 'at_start' if na_option == 'top' else 'at_end' tiebreaker = 'min' if method == 'average' else method result = pc.rank(data, sort_keys=sort_keys, null_placement=null_placement, tiebreaker=tiebreaker) if na_option == 'keep': mask = pc.is_null(self._pa_array) null = pa.scalar(None, type=result.type) result = pc.if_else(mask, null, result) if method == 'average': result_max = pc.rank(data, sort_keys=sort_keys, null_placement=null_placement, tiebreaker='max') result_max = result_max.cast(pa.float64()) result_min = result.cast(pa.float64()) result = pc.divide(pc.add(result_min, result_max), 2) if pct: if not pa.types.is_floating(result.type): result = result.cast(pa.float64()) if method == 'dense': divisor = pc.max(result) else: divisor = pc.count(result) result = pc.divide(result, divisor) return result def _rank(self, *, axis: AxisInt=0, method: str='average', na_option: str='keep', ascending: bool=True, pct: bool=False) -> Self: return self._convert_rank_result(self._rank_calc(axis=axis, method=method, na_option=na_option, ascending=ascending, pct=pct)) def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: pa_dtype = self._pa_array.type data = self._pa_array if pa.types.is_temporal(pa_dtype): nbits = pa_dtype.bit_width if nbits == 32: data = data.cast(pa.int32()) else: data = data.cast(pa.int64()) result = pc.quantile(data, q=qs, interpolation=interpolation) if pa.types.is_temporal(pa_dtype): if pa.types.is_floating(result.type): result = pc.floor(result) nbits = pa_dtype.bit_width if nbits == 32: result = result.cast(pa.int32()) else: result = result.cast(pa.int64()) result = result.cast(pa_dtype) return type(self)(result) def _mode(self, dropna: bool=True) -> Self: pa_type = self._pa_array.type if pa.types.is_temporal(pa_type): nbits = pa_type.bit_width if nbits == 32: data = self._pa_array.cast(pa.int32()) elif nbits == 64: data = self._pa_array.cast(pa.int64()) else: raise NotImplementedError(pa_type) else: data = self._pa_array if dropna: data = data.drop_null() res = pc.value_counts(data) most_common = res.field('values').filter(pc.equal(res.field('counts'), pc.max(res.field('counts')))) if pa.types.is_temporal(pa_type): most_common = most_common.cast(pa_type) most_common = most_common.take(pc.array_sort_indices(most_common)) return type(self)(most_common) def _maybe_convert_setitem_value(self, value): try: value = self._box_pa(value, self._pa_array.type) except pa.ArrowTypeError as err: msg = f"Invalid value '{value!s}' for dtype {self.dtype}" raise TypeError(msg) from err return value def interpolate(self, *, method: InterpolateOptions, axis: int, index, limit, limit_direction, limit_area, copy: bool, **kwargs) -> Self: if not self.dtype._is_numeric: raise ValueError('Values must be numeric.') if not pa_version_under13p0 and method == 'linear' and (limit_area is None) and (limit is None) and (limit_direction == 'forward'): values = self._pa_array.combine_chunks() na_value = pa.array([None], type=values.type) y_diff_2 = pc.fill_null_backward(pc.pairwise_diff_checked(values, period=2)) prev_values = pa.concat_arrays([na_value, values[:-2], na_value]) interps = pc.add_checked(prev_values, pc.divide_checked(y_diff_2, 2)) return type(self)(pc.coalesce(self._pa_array, interps)) mask = self.isna() if self.dtype.kind == 'f': data = self._pa_array.to_numpy() elif self.dtype.kind in 'iu': data = self.to_numpy(dtype='f8', na_value=0.0) else: raise NotImplementedError(f'interpolate is not implemented for dtype={self.dtype}') missing.interpolate_2d_inplace(data, method=method, axis=0, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, mask=mask, **kwargs) return type(self)(self._box_pa_array(pa.array(data, mask=mask))) @classmethod def _if_else(cls, cond: npt.NDArray[np.bool_] | bool, left: ArrayLike | Scalar, right: ArrayLike | Scalar) -> pa.Array: try: return pc.if_else(cond, left, right) except pa.ArrowNotImplementedError: pass def _to_numpy_and_type(value) -> tuple[np.ndarray, pa.DataType | None]: if isinstance(value, (pa.Array, pa.ChunkedArray)): pa_type = value.type elif isinstance(value, pa.Scalar): pa_type = value.type value = value.as_py() else: pa_type = None return (np.array(value, dtype=object), pa_type) (left, left_type) = _to_numpy_and_type(left) (right, right_type) = _to_numpy_and_type(right) pa_type = left_type or right_type result = np.where(cond, left, right) return pa.array(result, type=pa_type, from_pandas=True) @classmethod def _replace_with_mask(cls, values: pa.Array | pa.ChunkedArray, mask: npt.NDArray[np.bool_] | bool, replacements: ArrayLike | Scalar) -> pa.Array | pa.ChunkedArray: if isinstance(replacements, pa.ChunkedArray): replacements = replacements.combine_chunks() if isinstance(values, pa.ChunkedArray) and pa.types.is_boolean(values.type): values = values.combine_chunks() try: return pc.replace_with_mask(values, mask, replacements) except pa.ArrowNotImplementedError: pass if isinstance(replacements, pa.Array): replacements = np.array(replacements, dtype=object) elif isinstance(replacements, pa.Scalar): replacements = replacements.as_py() result = np.array(values, dtype=object) result[mask] = replacements return pa.array(result, type=values.type, from_pandas=True) def _to_masked(self): pa_dtype = self._pa_array.type if pa.types.is_floating(pa_dtype) or pa.types.is_integer(pa_dtype): na_value = 1 elif pa.types.is_boolean(pa_dtype): na_value = True else: raise NotImplementedError dtype = _arrow_dtype_mapping()[pa_dtype] mask = self.isna() arr = self.to_numpy(dtype=dtype.numpy_dtype, na_value=na_value) return dtype.construct_array_type()(arr, mask) def _groupby_op(self, *, how: str, has_dropped_na: bool, min_count: int, ngroups: int, ids: npt.NDArray[np.intp], **kwargs): if isinstance(self.dtype, StringDtype): return super()._groupby_op(how=how, has_dropped_na=has_dropped_na, min_count=min_count, ngroups=ngroups, ids=ids, **kwargs) values: ExtensionArray pa_type = self._pa_array.type if pa.types.is_timestamp(pa_type): values = self._to_datetimearray() elif pa.types.is_duration(pa_type): values = self._to_timedeltaarray() else: values = self._to_masked() result = values._groupby_op(how=how, has_dropped_na=has_dropped_na, min_count=min_count, ngroups=ngroups, ids=ids, **kwargs) if isinstance(result, np.ndarray): return result return type(self)._from_sequence(result, copy=False) def _apply_elementwise(self, func: Callable) -> list[list[Any]]: return [[None if val is None else func(val) for val in chunk.to_numpy(zero_copy_only=False)] for chunk in self._pa_array.iterchunks()] def _convert_bool_result(self, result): return type(self)(result) def _convert_int_result(self, result): return type(self)(result) def _convert_rank_result(self, result): return type(self)(result) def _str_count(self, pat: str, flags: int=0) -> Self: if flags: raise NotImplementedError(f'count not implemented with flags={flags!r}') return type(self)(pc.count_substring_regex(self._pa_array, pat)) def _str_repeat(self, repeats: int | Sequence[int]) -> Self: if not isinstance(repeats, int): raise NotImplementedError(f'repeat is not implemented when repeats is {type(repeats).__name__}') return type(self)(pc.binary_repeat(self._pa_array, repeats)) def _str_join(self, sep: str) -> Self: if pa.types.is_string(self._pa_array.type) or pa.types.is_large_string(self._pa_array.type): result = self._apply_elementwise(list) result = pa.chunked_array(result, type=pa.list_(pa.string())) else: result = self._pa_array return type(self)(pc.binary_join(result, sep)) def _str_partition(self, sep: str, expand: bool) -> Self: predicate = lambda val: val.partition(sep) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_rpartition(self, sep: str, expand: bool) -> Self: predicate = lambda val: val.rpartition(sep) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_casefold(self) -> Self: predicate = lambda val: val.casefold() result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_encode(self, encoding: str, errors: str='strict') -> Self: predicate = lambda val: val.encode(encoding, errors) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_extract(self, pat: str, flags: int=0, expand: bool=True): if flags: raise NotImplementedError('Only flags=0 is implemented.') groups = re.compile(pat).groupindex.keys() if len(groups) == 0: raise ValueError(f'pat={pat!r} must contain a symbolic group name.') result = pc.extract_regex(self._pa_array, pat) if expand: return {col: type(self)(pc.struct_field(result, [i])) for (col, i) in zip(groups, range(result.type.num_fields))} else: return type(self)(pc.struct_field(result, [0])) def _str_findall(self, pat: str, flags: int=0) -> Self: regex = re.compile(pat, flags=flags) predicate = lambda val: regex.findall(val) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_get_dummies(self, sep: str='|', dtype: NpDtype | None=None): if dtype is None: dtype = np.bool_ split = pc.split_pattern(self._pa_array, sep) flattened_values = pc.list_flatten(split) uniques = flattened_values.unique() uniques_sorted = uniques.take(pa.compute.array_sort_indices(uniques)) lengths = pc.list_value_length(split).fill_null(0).to_numpy() n_rows = len(self) n_cols = len(uniques) indices = pc.index_in(flattened_values, uniques_sorted).to_numpy() indices = indices + np.arange(n_rows).repeat(lengths) * n_cols _dtype = pandas_dtype(dtype) dummies_dtype: NpDtype if isinstance(_dtype, np.dtype): dummies_dtype = _dtype else: dummies_dtype = np.bool_ dummies = np.zeros(n_rows * n_cols, dtype=dummies_dtype) if dtype == str: dummies[:] = False dummies[indices] = True dummies = dummies.reshape((n_rows, n_cols)) result = type(self)(pa.array(list(dummies))) return (result, uniques_sorted.to_pylist()) def _str_index(self, sub: str, start: int=0, end: int | None=None) -> Self: predicate = lambda val: val.index(sub, start, end) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_rindex(self, sub: str, start: int=0, end: int | None=None) -> Self: predicate = lambda val: val.rindex(sub, start, end) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_normalize(self, form: str) -> Self: predicate = lambda val: unicodedata.normalize(form, val) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_rfind(self, sub: str, start: int=0, end=None) -> Self: predicate = lambda val: val.rfind(sub, start, end) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_split(self, pat: str | None=None, n: int | None=-1, expand: bool=False, regex: bool | None=None) -> Self: if n in {-1, 0}: n = None if pat is None: split_func = pc.utf8_split_whitespace elif regex: split_func = functools.partial(pc.split_pattern_regex, pattern=pat) else: split_func = functools.partial(pc.split_pattern, pattern=pat) return type(self)(split_func(self._pa_array, max_splits=n)) def _str_rsplit(self, pat: str | None=None, n: int | None=-1) -> Self: if n in {-1, 0}: n = None if pat is None: return type(self)(pc.utf8_split_whitespace(self._pa_array, max_splits=n, reverse=True)) return type(self)(pc.split_pattern(self._pa_array, pat, max_splits=n, reverse=True)) def _str_translate(self, table: dict[int, str]) -> Self: predicate = lambda val: val.translate(table) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) def _str_wrap(self, width: int, **kwargs) -> Self: kwargs['width'] = width tw = textwrap.TextWrapper(**kwargs) predicate = lambda val: '\n'.join(tw.wrap(val)) result = self._apply_elementwise(predicate) return type(self)(pa.chunked_array(result)) @property def _dt_days(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().components.days, from_pandas=True, type=pa.int32())) @property def _dt_hours(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().components.hours, from_pandas=True, type=pa.int32())) @property def _dt_minutes(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().components.minutes, from_pandas=True, type=pa.int32())) @property def _dt_seconds(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().components.seconds, from_pandas=True, type=pa.int32())) @property def _dt_milliseconds(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().components.milliseconds, from_pandas=True, type=pa.int32())) @property def _dt_microseconds(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().components.microseconds, from_pandas=True, type=pa.int32())) @property def _dt_nanoseconds(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().components.nanoseconds, from_pandas=True, type=pa.int32())) def _dt_to_pytimedelta(self) -> np.ndarray: data = self._pa_array.to_pylist() if self._dtype.pyarrow_dtype.unit == 'ns': data = [None if ts is None else ts.to_pytimedelta() for ts in data] return np.array(data, dtype=object) def _dt_total_seconds(self) -> Self: return type(self)(pa.array(self._to_timedeltaarray().total_seconds(), from_pandas=True)) def _dt_as_unit(self, unit: str) -> Self: if pa.types.is_date(self.dtype.pyarrow_dtype): raise NotImplementedError('as_unit not implemented for date types') pd_array = self._maybe_convert_datelike_array() return type(self)(pa.array(pd_array.as_unit(unit), from_pandas=True)) @property def _dt_year(self) -> Self: return type(self)(pc.year(self._pa_array)) @property def _dt_day(self) -> Self: return type(self)(pc.day(self._pa_array)) @property def _dt_day_of_week(self) -> Self: return type(self)(pc.day_of_week(self._pa_array)) _dt_dayofweek = _dt_day_of_week _dt_weekday = _dt_day_of_week @property def _dt_day_of_year(self) -> Self: return type(self)(pc.day_of_year(self._pa_array)) _dt_dayofyear = _dt_day_of_year @property def _dt_hour(self) -> Self: return type(self)(pc.hour(self._pa_array)) def _dt_isocalendar(self) -> Self: return type(self)(pc.iso_calendar(self._pa_array)) @property def _dt_is_leap_year(self) -> Self: return type(self)(pc.is_leap_year(self._pa_array)) @property def _dt_is_month_start(self) -> Self: return type(self)(pc.equal(pc.day(self._pa_array), 1)) @property def _dt_is_month_end(self) -> Self: result = pc.equal(pc.days_between(pc.floor_temporal(self._pa_array, unit='day'), pc.ceil_temporal(self._pa_array, unit='month')), 1) return type(self)(result) @property def _dt_is_year_start(self) -> Self: return type(self)(pc.and_(pc.equal(pc.month(self._pa_array), 1), pc.equal(pc.day(self._pa_array), 1))) @property def _dt_is_year_end(self) -> Self: return type(self)(pc.and_(pc.equal(pc.month(self._pa_array), 12), pc.equal(pc.day(self._pa_array), 31))) @property def _dt_is_quarter_start(self) -> Self: result = pc.equal(pc.floor_temporal(self._pa_array, unit='quarter'), pc.floor_temporal(self._pa_array, unit='day')) return type(self)(result) @property def _dt_is_quarter_end(self) -> Self: result = pc.equal(pc.days_between(pc.floor_temporal(self._pa_array, unit='day'), pc.ceil_temporal(self._pa_array, unit='quarter')), 1) return type(self)(result) @property def _dt_days_in_month(self) -> Self: result = pc.days_between(pc.floor_temporal(self._pa_array, unit='month'), pc.ceil_temporal(self._pa_array, unit='month')) return type(self)(result) _dt_daysinmonth = _dt_days_in_month @property def _dt_microsecond(self) -> Self: us = pc.microsecond(self._pa_array) ms_to_us = pc.multiply(pc.millisecond(self._pa_array), 1000) return type(self)(pc.add(us, ms_to_us)) @property def _dt_minute(self) -> Self: return type(self)(pc.minute(self._pa_array)) @property def _dt_month(self) -> Self: return type(self)(pc.month(self._pa_array)) @property def _dt_nanosecond(self) -> Self: return type(self)(pc.nanosecond(self._pa_array)) @property def _dt_quarter(self) -> Self: return type(self)(pc.quarter(self._pa_array)) @property def _dt_second(self) -> Self: return type(self)(pc.second(self._pa_array)) @property def _dt_date(self) -> Self: return type(self)(self._pa_array.cast(pa.date32())) @property def _dt_time(self) -> Self: unit = self.dtype.pyarrow_dtype.unit if self.dtype.pyarrow_dtype.unit in {'us', 'ns'} else 'ns' return type(self)(self._pa_array.cast(pa.time64(unit))) @property def _dt_tz(self): return timezones.maybe_get_tz(self.dtype.pyarrow_dtype.tz) @property def _dt_unit(self): return self.dtype.pyarrow_dtype.unit def _dt_normalize(self) -> Self: return type(self)(pc.floor_temporal(self._pa_array, 1, 'day')) def _dt_strftime(self, format: str) -> Self: return type(self)(pc.strftime(self._pa_array, format=format)) def _round_temporally(self, method: Literal['ceil', 'floor', 'round'], freq, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: if ambiguous != 'raise': raise NotImplementedError('ambiguous is not supported.') if nonexistent != 'raise': raise NotImplementedError('nonexistent is not supported.') offset = to_offset(freq) if offset is None: raise ValueError(f'Must specify a valid frequency: {freq}') pa_supported_unit = {'Y': 'year', 'YS': 'year', 'Q': 'quarter', 'QS': 'quarter', 'M': 'month', 'MS': 'month', 'W': 'week', 'D': 'day', 'h': 'hour', 'min': 'minute', 's': 'second', 'ms': 'millisecond', 'us': 'microsecond', 'ns': 'nanosecond'} unit = pa_supported_unit.get(offset._prefix, None) if unit is None: raise ValueError(f'freq={freq!r} is not supported') multiple = offset.n rounding_method = getattr(pc, f'{method}_temporal') return type(self)(rounding_method(self._pa_array, multiple=multiple, unit=unit)) def _dt_ceil(self, freq, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: return self._round_temporally('ceil', freq, ambiguous, nonexistent) def _dt_floor(self, freq, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: return self._round_temporally('floor', freq, ambiguous, nonexistent) def _dt_round(self, freq, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: return self._round_temporally('round', freq, ambiguous, nonexistent) def _dt_day_name(self, locale: str | None=None) -> Self: if locale is None: locale = 'C' return type(self)(pc.strftime(self._pa_array, format='%A', locale=locale)) def _dt_month_name(self, locale: str | None=None) -> Self: if locale is None: locale = 'C' return type(self)(pc.strftime(self._pa_array, format='%B', locale=locale)) def _dt_to_pydatetime(self) -> Series: from pandas import Series if pa.types.is_date(self.dtype.pyarrow_dtype): raise ValueError(f'to_pydatetime cannot be called with {self.dtype.pyarrow_dtype} type. Convert to pyarrow timestamp type.') data = self._pa_array.to_pylist() if self._dtype.pyarrow_dtype.unit == 'ns': data = [None if ts is None else ts.to_pydatetime(warn=False) for ts in data] return Series(data, dtype=object) def _dt_tz_localize(self, tz, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: if ambiguous != 'raise': raise NotImplementedError(f'ambiguous={ambiguous!r} is not supported') nonexistent_pa = {'raise': 'raise', 'shift_backward': 'earliest', 'shift_forward': 'latest'}.get(nonexistent, None) if nonexistent_pa is None: raise NotImplementedError(f'nonexistent={nonexistent!r} is not supported') if tz is None: result = self._pa_array.cast(pa.timestamp(self.dtype.pyarrow_dtype.unit)) else: result = pc.assume_timezone(self._pa_array, str(tz), ambiguous=ambiguous, nonexistent=nonexistent_pa) return type(self)(result) def _dt_tz_convert(self, tz) -> Self: if self.dtype.pyarrow_dtype.tz is None: raise TypeError('Cannot convert tz-naive timestamps, use tz_localize to localize') current_unit = self.dtype.pyarrow_dtype.unit result = self._pa_array.cast(pa.timestamp(current_unit, tz)) return type(self)(result) def transpose_homogeneous_pyarrow(arrays: Sequence[ArrowExtensionArray]) -> list[ArrowExtensionArray]: arrays = list(arrays) (nrows, ncols) = (len(arrays[0]), len(arrays)) indices = np.arange(nrows * ncols).reshape(ncols, nrows).T.reshape(-1) arr = pa.chunked_array([chunk for arr in arrays for chunk in arr._pa_array.chunks]) arr = arr.take(indices) return [ArrowExtensionArray(arr.slice(i * ncols, ncols)) for i in range(nrows)] # File: pandas-main/pandas/core/arrays/arrow/extension_types.py from __future__ import annotations import json from typing import TYPE_CHECKING import pyarrow from pandas.compat import pa_version_under14p1 from pandas.core.dtypes.dtypes import IntervalDtype, PeriodDtype from pandas.core.arrays.interval import VALID_CLOSED if TYPE_CHECKING: from pandas._typing import IntervalClosedType class ArrowPeriodType(pyarrow.ExtensionType): def __init__(self, freq) -> None: self._freq = freq pyarrow.ExtensionType.__init__(self, pyarrow.int64(), 'pandas.period') @property def freq(self): return self._freq def __arrow_ext_serialize__(self) -> bytes: metadata = {'freq': self.freq} return json.dumps(metadata).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType: metadata = json.loads(serialized.decode()) return ArrowPeriodType(metadata['freq']) def __eq__(self, other): if isinstance(other, pyarrow.BaseExtensionType): return type(self) == type(other) and self.freq == other.freq else: return NotImplemented def __ne__(self, other) -> bool: return not self == other def __hash__(self) -> int: return hash((str(self), self.freq)) def to_pandas_dtype(self) -> PeriodDtype: return PeriodDtype(freq=self.freq) _period_type = ArrowPeriodType('D') pyarrow.register_extension_type(_period_type) class ArrowIntervalType(pyarrow.ExtensionType): def __init__(self, subtype, closed: IntervalClosedType) -> None: assert closed in VALID_CLOSED self._closed: IntervalClosedType = closed if not isinstance(subtype, pyarrow.DataType): subtype = pyarrow.type_for_alias(str(subtype)) self._subtype = subtype storage_type = pyarrow.struct([('left', subtype), ('right', subtype)]) pyarrow.ExtensionType.__init__(self, storage_type, 'pandas.interval') @property def subtype(self): return self._subtype @property def closed(self) -> IntervalClosedType: return self._closed def __arrow_ext_serialize__(self) -> bytes: metadata = {'subtype': str(self.subtype), 'closed': self.closed} return json.dumps(metadata).encode() @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType: metadata = json.loads(serialized.decode()) subtype = pyarrow.type_for_alias(metadata['subtype']) closed = metadata['closed'] return ArrowIntervalType(subtype, closed) def __eq__(self, other): if isinstance(other, pyarrow.BaseExtensionType): return type(self) == type(other) and self.subtype == other.subtype and (self.closed == other.closed) else: return NotImplemented def __ne__(self, other) -> bool: return not self == other def __hash__(self) -> int: return hash((str(self), str(self.subtype), self.closed)) def to_pandas_dtype(self) -> IntervalDtype: return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed) _interval_type = ArrowIntervalType(pyarrow.int64(), 'left') pyarrow.register_extension_type(_interval_type) _ERROR_MSG = "Disallowed deserialization of 'arrow.py_extension_type':\nstorage_type = {storage_type}\nserialized = {serialized}\npickle disassembly:\n{pickle_disassembly}\n\nReading of untrusted Parquet or Feather files with a PyExtensionType column\nallows arbitrary code execution.\nIf you trust this file, you can enable reading the extension type by one of:\n\n- upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)`\n- install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running\n `import pyarrow_hotfix; pyarrow_hotfix.uninstall()`\n\nWe strongly recommend updating your Parquet/Feather files to use extension types\nderived from `pyarrow.ExtensionType` instead, and register this type explicitly.\n" def patch_pyarrow() -> None: if not pa_version_under14p1: return if getattr(pyarrow, '_hotfix_installed', False): return class ForbiddenExtensionType(pyarrow.ExtensionType): def __arrow_ext_serialize__(self) -> bytes: return b'' @classmethod def __arrow_ext_deserialize__(cls, storage_type, serialized): import io import pickletools out = io.StringIO() pickletools.dis(serialized, out) raise RuntimeError(_ERROR_MSG.format(storage_type=storage_type, serialized=serialized, pickle_disassembly=out.getvalue())) pyarrow.unregister_extension_type('arrow.py_extension_type') pyarrow.register_extension_type(ForbiddenExtensionType(pyarrow.null(), 'arrow.py_extension_type')) pyarrow._hotfix_installed = True patch_pyarrow() # File: pandas-main/pandas/core/arrays/base.py """""" from __future__ import annotations import operator from typing import TYPE_CHECKING, Any, ClassVar, Literal, cast, overload import warnings import numpy as np from pandas._libs import algos as libalgos, lib from pandas.compat import set_function_name from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg, validate_insert_loc from pandas.core.dtypes.cast import maybe_cast_pointwise_result from pandas.core.dtypes.common import is_list_like, is_scalar, pandas_dtype from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import arraylike, missing, roperator from pandas.core.algorithms import duplicated, factorize_array, isin, map_array, mode, rank, unique from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.missing import _fill_limit_area_1d from pandas.core.sorting import nargminmax, nargsort if TYPE_CHECKING: from collections.abc import Callable, Iterator, Sequence from pandas._libs.missing import NAType from pandas._typing import ArrayLike, AstypeArg, AxisInt, Dtype, DtypeObj, FillnaOptions, InterpolateOptions, NumpySorter, NumpyValueArrayLike, PositionalIndexer, ScalarIndexer, Self, SequenceIndexer, Shape, SortKind, TakeIndexer, npt from pandas import Index _extension_array_shared_docs: dict[str, str] = {} class ExtensionArray: _typ = 'extension' __pandas_priority__ = 1000 @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: raise AbstractMethodError(cls) @classmethod def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: try: return cls._from_sequence(scalars, dtype=dtype, copy=False) except (ValueError, TypeError): raise except Exception: warnings.warn('_from_scalars should only raise ValueError or TypeError. Consider overriding _from_scalars where appropriate.', stacklevel=find_stack_level()) raise @classmethod def _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy: bool=False) -> Self: raise AbstractMethodError(cls) @classmethod def _from_factorized(cls, values, original): raise AbstractMethodError(cls) @overload def __getitem__(self, item: ScalarIndexer) -> Any: ... @overload def __getitem__(self, item: SequenceIndexer) -> Self: ... def __getitem__(self, item: PositionalIndexer) -> Self | Any: raise AbstractMethodError(self) def __setitem__(self, key, value) -> None: raise NotImplementedError(f'{type(self)} does not implement __setitem__.') def __len__(self) -> int: raise AbstractMethodError(self) def __iter__(self) -> Iterator[Any]: for i in range(len(self)): yield self[i] def __contains__(self, item: object) -> bool | np.bool_: if is_scalar(item) and isna(item): if not self._can_hold_na: return False elif item is self.dtype.na_value or isinstance(item, self.dtype.type): return self._hasna else: return False else: return (item == self).any() def __eq__(self, other: object) -> ArrayLike: raise AbstractMethodError(self) def __ne__(self, other: object) -> ArrayLike: return ~(self == other) def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray: result = np.asarray(self, dtype=dtype) if copy or na_value is not lib.no_default: result = result.copy() if na_value is not lib.no_default: result[self.isna()] = na_value return result @property def dtype(self) -> ExtensionDtype: raise AbstractMethodError(self) @property def shape(self) -> Shape: return (len(self),) @property def size(self) -> int: return np.prod(self.shape) @property def ndim(self) -> int: return 1 @property def nbytes(self) -> int: raise AbstractMethodError(self) @overload def astype(self, dtype: npt.DTypeLike, copy: bool=...) -> np.ndarray: ... @overload def astype(self, dtype: ExtensionDtype, copy: bool=...) -> ExtensionArray: ... @overload def astype(self, dtype: AstypeArg, copy: bool=...) -> ArrayLike: ... def astype(self, dtype: AstypeArg, copy: bool=True) -> ArrayLike: dtype = pandas_dtype(dtype) if dtype == self.dtype: if not copy: return self else: return self.copy() if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() return cls._from_sequence(self, dtype=dtype, copy=copy) elif lib.is_np_dtype(dtype, 'M'): from pandas.core.arrays import DatetimeArray return DatetimeArray._from_sequence(self, dtype=dtype, copy=copy) elif lib.is_np_dtype(dtype, 'm'): from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(self, dtype=dtype, copy=copy) if not copy: return np.asarray(self, dtype=dtype) else: return np.array(self, dtype=dtype, copy=copy) def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll: raise AbstractMethodError(self) @property def _hasna(self) -> bool: return bool(self.isna().any()) def _values_for_argsort(self) -> np.ndarray: return np.array(self) def argsort(self, *, ascending: bool=True, kind: SortKind='quicksort', na_position: str='last', **kwargs) -> np.ndarray: ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) values = self._values_for_argsort() return nargsort(values, kind=kind, ascending=ascending, na_position=na_position, mask=np.asarray(self.isna())) def argmin(self, skipna: bool=True) -> int: validate_bool_kwarg(skipna, 'skipna') if not skipna and self._hasna: raise ValueError('Encountered an NA value with skipna=False') return nargminmax(self, 'argmin') def argmax(self, skipna: bool=True) -> int: validate_bool_kwarg(skipna, 'skipna') if not skipna and self._hasna: raise ValueError('Encountered an NA value with skipna=False') return nargminmax(self, 'argmax') def interpolate(self, *, method: InterpolateOptions, axis: int, index: Index, limit, limit_direction, limit_area, copy: bool, **kwargs) -> Self: raise NotImplementedError(f'{type(self).__name__} does not implement interpolate') def _pad_or_backfill(self, *, method: FillnaOptions, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, copy: bool=True) -> Self: mask = self.isna() if mask.any(): meth = missing.clean_fill_method(method) npmask = np.asarray(mask) if limit_area is not None and (not npmask.all()): _fill_limit_area_1d(npmask, limit_area) if meth == 'pad': indexer = libalgos.get_fill_indexer(npmask, limit=limit) return self.take(indexer, allow_fill=True) else: indexer = libalgos.get_fill_indexer(npmask[::-1], limit=limit)[::-1] return self[::-1].take(indexer, allow_fill=True) else: if not copy: return self new_values = self.copy() return new_values def fillna(self, value: object | ArrayLike, limit: int | None=None, copy: bool=True) -> Self: mask = self.isna() if limit is not None and limit < len(self): modify = mask.cumsum() > limit if modify.any(): mask = mask.copy() mask[modify] = False value = missing.check_value_size(value, mask, len(self)) if mask.any(): if not copy: new_values = self[:] else: new_values = self.copy() new_values[mask] = value elif not copy: new_values = self[:] else: new_values = self.copy() return new_values def dropna(self) -> Self: return self[~self.isna()] def duplicated(self, keep: Literal['first', 'last', False]='first') -> npt.NDArray[np.bool_]: mask = self.isna().astype(np.bool_, copy=False) return duplicated(values=self, keep=keep, mask=mask) def shift(self, periods: int=1, fill_value: object=None) -> ExtensionArray: if not len(self) or periods == 0: return self.copy() if isna(fill_value): fill_value = self.dtype.na_value empty = self._from_sequence([fill_value] * min(abs(periods), len(self)), dtype=self.dtype) if periods > 0: a = empty b = self[:-periods] else: a = self[abs(periods):] b = empty return self._concat_same_type([a, b]) def unique(self) -> Self: uniques = unique(self.astype(object)) return self._from_sequence(uniques, dtype=self.dtype) def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: arr = self.astype(object) if isinstance(value, ExtensionArray): value = value.astype(object) return arr.searchsorted(value, side=side, sorter=sorter) def equals(self, other: object) -> bool: if type(self) != type(other): return False other = cast(ExtensionArray, other) if self.dtype != other.dtype: return False elif len(self) != len(other): return False else: equal_values = self == other if isinstance(equal_values, ExtensionArray): equal_values = equal_values.fillna(False) equal_na = self.isna() & other.isna() return bool((equal_values | equal_na).all()) def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: return isin(np.asarray(self), values) def _values_for_factorize(self) -> tuple[np.ndarray, Any]: return (self.astype(object), np.nan) def factorize(self, use_na_sentinel: bool=True) -> tuple[np.ndarray, ExtensionArray]: (arr, na_value) = self._values_for_factorize() (codes, uniques) = factorize_array(arr, use_na_sentinel=use_na_sentinel, na_value=na_value) uniques_ea = self._from_factorized(uniques, self) return (codes, uniques_ea) _extension_array_shared_docs['repeat'] = "\n Repeat elements of a %(klass)s.\n\n Returns a new %(klass)s where each element of the current %(klass)s\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n %(klass)s.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n %(klass)s\n Newly created %(klass)s with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n Index.repeat : Equivalent function for Index.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n ExtensionArray.take : Take arbitrary positions.\n\n Examples\n --------\n >>> cat = pd.Categorical(['a', 'b', 'c'])\n >>> cat\n ['a', 'b', 'c']\n Categories (3, object): ['a', 'b', 'c']\n >>> cat.repeat(2)\n ['a', 'a', 'b', 'b', 'c', 'c']\n Categories (3, object): ['a', 'b', 'c']\n >>> cat.repeat([1, 2, 3])\n ['a', 'b', 'b', 'c', 'c', 'c']\n Categories (3, object): ['a', 'b', 'c']\n " @Substitution(klass='ExtensionArray') @Appender(_extension_array_shared_docs['repeat']) def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None=None) -> Self: nv.validate_repeat((), {'axis': axis}) ind = np.arange(len(self)).repeat(repeats) return self.take(ind) def take(self, indices: TakeIndexer, *, allow_fill: bool=False, fill_value: Any=None) -> Self: raise AbstractMethodError(self) def copy(self) -> Self: raise AbstractMethodError(self) def view(self, dtype: Dtype | None=None) -> ArrayLike: if dtype is not None: raise NotImplementedError(dtype) return self[:] def __repr__(self) -> str: if self.ndim > 1: return self._repr_2d() from pandas.io.formats.printing import format_object_summary data = format_object_summary(self, self._formatter(), indent_for_name=False).rstrip(', \n') class_name = f'<{type(self).__name__}>\n' footer = self._get_repr_footer() return f'{class_name}{data}\n{footer}' def _get_repr_footer(self) -> str: if self.ndim > 1: return f'Shape: {self.shape}, dtype: {self.dtype}' return f'Length: {len(self)}, dtype: {self.dtype}' def _repr_2d(self) -> str: from pandas.io.formats.printing import format_object_summary lines = [format_object_summary(x, self._formatter(), indent_for_name=False).rstrip(', \n') for x in self] data = ',\n'.join(lines) class_name = f'<{type(self).__name__}>' footer = self._get_repr_footer() return f'{class_name}\n[\n{data}\n]\n{footer}' def _formatter(self, boxed: bool=False) -> Callable[[Any], str | None]: if boxed: return str return repr def transpose(self, *axes: int) -> Self: return self[:] @property def T(self) -> Self: return self.transpose() def ravel(self, order: Literal['C', 'F', 'A', 'K'] | None='C') -> Self: return self @classmethod def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self: raise AbstractMethodError(cls) @cache_readonly def _can_hold_na(self) -> bool: return self.dtype._can_hold_na def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> ExtensionArray: raise NotImplementedError(f'cannot perform {name} with type {self.dtype}') def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): meth = getattr(self, name, None) if meth is None: raise TypeError(f"'{type(self).__name__}' with dtype {self.dtype} does not support operation '{name}'") result = meth(skipna=skipna, **kwargs) if keepdims: if name in ['min', 'max']: result = self._from_sequence([result], dtype=self.dtype) else: result = np.array([result]) return result __hash__: ClassVar[None] def _values_for_json(self) -> np.ndarray: return np.asarray(self) def _hash_pandas_object(self, *, encoding: str, hash_key: str, categorize: bool) -> npt.NDArray[np.uint64]: from pandas.core.util.hashing import hash_array (values, _) = self._values_for_factorize() return hash_array(values, encoding=encoding, hash_key=hash_key, categorize=categorize) def _explode(self) -> tuple[Self, npt.NDArray[np.uint64]]: values = self.copy() counts = np.ones(shape=(len(self),), dtype=np.uint64) return (values, counts) def tolist(self) -> list: if self.ndim > 1: return [x.tolist() for x in self] return list(self) def delete(self, loc: PositionalIndexer) -> Self: indexer = np.delete(np.arange(len(self)), loc) return self.take(indexer) def insert(self, loc: int, item) -> Self: loc = validate_insert_loc(loc, len(self)) item_arr = type(self)._from_sequence([item], dtype=self.dtype) return type(self)._concat_same_type([self[:loc], item_arr, self[loc:]]) def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: if is_list_like(value): val = value[mask] else: val = value self[mask] = val def _where(self, mask: npt.NDArray[np.bool_], value) -> Self: result = self.copy() if is_list_like(value): val = value[~mask] else: val = value result[~mask] = val return result def _rank(self, *, axis: AxisInt=0, method: str='average', na_option: str='keep', ascending: bool=True, pct: bool=False): if axis != 0: raise NotImplementedError return rank(self, axis=axis, method=method, na_option=na_option, ascending=ascending, pct=pct) @classmethod def _empty(cls, shape: Shape, dtype: ExtensionDtype): obj = cls._from_sequence([], dtype=dtype) taker = np.broadcast_to(np.intp(-1), shape) result = obj.take(taker, allow_fill=True) if not isinstance(result, cls) or dtype != result.dtype: raise NotImplementedError(f"Default 'empty' implementation is invalid for dtype='{dtype}'") return result def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: mask = np.asarray(self.isna()) arr = np.asarray(self) fill_value = np.nan res_values = quantile_with_mask(arr, mask, fill_value, qs, interpolation) return type(self)._from_sequence(res_values) def _mode(self, dropna: bool=True) -> Self: return mode(self, dropna=dropna) def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): if any((isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)) for other in inputs)): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if 'out' in kwargs: return arraylike.dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) if method == 'reduce': result = arraylike.dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result return arraylike.default_array_ufunc(self, ufunc, method, *inputs, **kwargs) def map(self, mapper, na_action: Literal['ignore'] | None=None): return map_array(self, mapper, na_action=na_action) def _groupby_op(self, *, how: str, has_dropped_na: bool, min_count: int, ngroups: int, ids: npt.NDArray[np.intp], **kwargs) -> ArrayLike: from pandas.core.arrays.string_ import StringDtype from pandas.core.groupby.ops import WrappedCythonOp kind = WrappedCythonOp.get_kind_from_how(how) op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) if isinstance(self.dtype, StringDtype): if op.how not in ['any', 'all']: op._get_cython_function(op.kind, op.how, np.dtype(object), False) npvalues = self.to_numpy(object, na_value=np.nan) else: raise NotImplementedError(f'function is not implemented for this dtype: {self.dtype}') res_values = op._cython_op_ndim_compat(npvalues, min_count=min_count, ngroups=ngroups, comp_ids=ids, mask=None, **kwargs) if op.how in op.cast_blocklist: return res_values if isinstance(self.dtype, StringDtype): dtype = self.dtype string_array_cls = dtype.construct_array_type() return string_array_cls._from_sequence(res_values, dtype=dtype) else: raise NotImplementedError class ExtensionArraySupportsAnyAll(ExtensionArray): @overload def any(self, *, skipna: Literal[True]=...) -> bool: ... @overload def any(self, *, skipna: bool) -> bool | NAType: ... def any(self, *, skipna: bool=True) -> bool | NAType: raise AbstractMethodError(self) @overload def all(self, *, skipna: Literal[True]=...) -> bool: ... @overload def all(self, *, skipna: bool) -> bool | NAType: ... def all(self, *, skipna: bool=True) -> bool | NAType: raise AbstractMethodError(self) class ExtensionOpsMixin: @classmethod def _create_arithmetic_method(cls, op): raise AbstractMethodError(cls) @classmethod def _add_arithmetic_ops(cls) -> None: setattr(cls, '__add__', cls._create_arithmetic_method(operator.add)) setattr(cls, '__radd__', cls._create_arithmetic_method(roperator.radd)) setattr(cls, '__sub__', cls._create_arithmetic_method(operator.sub)) setattr(cls, '__rsub__', cls._create_arithmetic_method(roperator.rsub)) setattr(cls, '__mul__', cls._create_arithmetic_method(operator.mul)) setattr(cls, '__rmul__', cls._create_arithmetic_method(roperator.rmul)) setattr(cls, '__pow__', cls._create_arithmetic_method(operator.pow)) setattr(cls, '__rpow__', cls._create_arithmetic_method(roperator.rpow)) setattr(cls, '__mod__', cls._create_arithmetic_method(operator.mod)) setattr(cls, '__rmod__', cls._create_arithmetic_method(roperator.rmod)) setattr(cls, '__floordiv__', cls._create_arithmetic_method(operator.floordiv)) setattr(cls, '__rfloordiv__', cls._create_arithmetic_method(roperator.rfloordiv)) setattr(cls, '__truediv__', cls._create_arithmetic_method(operator.truediv)) setattr(cls, '__rtruediv__', cls._create_arithmetic_method(roperator.rtruediv)) setattr(cls, '__divmod__', cls._create_arithmetic_method(divmod)) setattr(cls, '__rdivmod__', cls._create_arithmetic_method(roperator.rdivmod)) @classmethod def _create_comparison_method(cls, op): raise AbstractMethodError(cls) @classmethod def _add_comparison_ops(cls) -> None: setattr(cls, '__eq__', cls._create_comparison_method(operator.eq)) setattr(cls, '__ne__', cls._create_comparison_method(operator.ne)) setattr(cls, '__lt__', cls._create_comparison_method(operator.lt)) setattr(cls, '__gt__', cls._create_comparison_method(operator.gt)) setattr(cls, '__le__', cls._create_comparison_method(operator.le)) setattr(cls, '__ge__', cls._create_comparison_method(operator.ge)) @classmethod def _create_logical_method(cls, op): raise AbstractMethodError(cls) @classmethod def _add_logical_ops(cls) -> None: setattr(cls, '__and__', cls._create_logical_method(operator.and_)) setattr(cls, '__rand__', cls._create_logical_method(roperator.rand_)) setattr(cls, '__or__', cls._create_logical_method(operator.or_)) setattr(cls, '__ror__', cls._create_logical_method(roperator.ror_)) setattr(cls, '__xor__', cls._create_logical_method(operator.xor)) setattr(cls, '__rxor__', cls._create_logical_method(roperator.rxor)) class ExtensionScalarOpsMixin(ExtensionOpsMixin): @classmethod def _create_method(cls, op, coerce_to_dtype: bool=True, result_dtype=None): def _binop(self, other): def convert_values(param): if isinstance(param, ExtensionArray) or is_list_like(param): ovalues = param else: ovalues = [param] * len(self) return ovalues if isinstance(other, (ABCSeries, ABCIndex, ABCDataFrame)): return NotImplemented lvalues = self rvalues = convert_values(other) res = [op(a, b) for (a, b) in zip(lvalues, rvalues)] def _maybe_convert(arr): if coerce_to_dtype: res = maybe_cast_pointwise_result(arr, self.dtype, same_dtype=False) if not isinstance(res, type(self)): res = np.asarray(arr) else: res = np.asarray(arr, dtype=result_dtype) return res if op.__name__ in {'divmod', 'rdivmod'}: (a, b) = zip(*res) return (_maybe_convert(a), _maybe_convert(b)) return _maybe_convert(res) op_name = f'__{op.__name__}__' return set_function_name(_binop, op_name, cls) @classmethod def _create_arithmetic_method(cls, op): return cls._create_method(op) @classmethod def _create_comparison_method(cls, op): return cls._create_method(op, coerce_to_dtype=False, result_dtype=bool) # File: pandas-main/pandas/core/arrays/boolean.py from __future__ import annotations import numbers from typing import TYPE_CHECKING, ClassVar, cast import numpy as np from pandas._libs import lib, missing as libmissing from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.dtypes import register_extension_dtype from pandas.core.dtypes.missing import isna from pandas.core import ops from pandas.core.array_algos import masked_accumulations from pandas.core.arrays.masked import BaseMaskedArray, BaseMaskedDtype if TYPE_CHECKING: import pyarrow from pandas._typing import DtypeObj, Self, npt, type_t from pandas.core.dtypes.dtypes import ExtensionDtype @register_extension_dtype class BooleanDtype(BaseMaskedDtype): name: ClassVar[str] = 'boolean' _internal_fill_value = False @property def type(self) -> type: return np.bool_ @property def kind(self) -> str: return 'b' @property def numpy_dtype(self) -> np.dtype: return np.dtype('bool') @classmethod def construct_array_type(cls) -> type_t[BooleanArray]: return BooleanArray def __repr__(self) -> str: return 'BooleanDtype' @property def _is_boolean(self) -> bool: return True @property def _is_numeric(self) -> bool: return True def __from_arrow__(self, array: pyarrow.Array | pyarrow.ChunkedArray) -> BooleanArray: import pyarrow if array.type != pyarrow.bool_() and (not pyarrow.types.is_null(array.type)): raise TypeError(f'Expected array of boolean type, got {array.type} instead') if isinstance(array, pyarrow.Array): chunks = [array] length = len(array) else: chunks = array.chunks length = array.length() if pyarrow.types.is_null(array.type): mask = np.ones(length, dtype=bool) data = np.empty(length, dtype=bool) return BooleanArray(data, mask) results = [] for arr in chunks: buflist = arr.buffers() data = pyarrow.BooleanArray.from_buffers(arr.type, len(arr), [None, buflist[1]], offset=arr.offset).to_numpy(zero_copy_only=False) if arr.null_count != 0: mask = pyarrow.BooleanArray.from_buffers(arr.type, len(arr), [None, buflist[0]], offset=arr.offset).to_numpy(zero_copy_only=False) mask = ~mask else: mask = np.zeros(len(arr), dtype=bool) bool_arr = BooleanArray(data, mask) results.append(bool_arr) if not results: return BooleanArray(np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)) else: return BooleanArray._concat_same_type(results) def coerce_to_array(values, mask=None, copy: bool=False) -> tuple[np.ndarray, np.ndarray]: if isinstance(values, BooleanArray): if mask is not None: raise ValueError('cannot pass mask for BooleanArray input') (values, mask) = (values._data, values._mask) if copy: values = values.copy() mask = mask.copy() return (values, mask) mask_values = None if isinstance(values, np.ndarray) and values.dtype == np.bool_: if copy: values = values.copy() elif isinstance(values, np.ndarray) and values.dtype.kind in 'iufcb': mask_values = isna(values) values_bool = np.zeros(len(values), dtype=bool) values_bool[~mask_values] = values[~mask_values].astype(bool) if not np.all(values_bool[~mask_values].astype(values.dtype) == values[~mask_values]): raise TypeError('Need to pass bool-like values') values = values_bool else: values_object = np.asarray(values, dtype=object) inferred_dtype = lib.infer_dtype(values_object, skipna=True) integer_like = ('floating', 'integer', 'mixed-integer-float') if inferred_dtype not in ('boolean', 'empty') + integer_like: raise TypeError('Need to pass bool-like values') mask_values = cast('npt.NDArray[np.bool_]', isna(values_object)) values = np.zeros(len(values), dtype=bool) values[~mask_values] = values_object[~mask_values].astype(bool) if inferred_dtype in integer_like and (not np.all(values[~mask_values].astype(float) == values_object[~mask_values].astype(float))): raise TypeError('Need to pass bool-like values') if mask is None and mask_values is None: mask = np.zeros(values.shape, dtype=bool) elif mask is None: mask = mask_values elif isinstance(mask, np.ndarray) and mask.dtype == np.bool_: if mask_values is not None: mask = mask | mask_values elif copy: mask = mask.copy() else: mask = np.array(mask, dtype=bool) if mask_values is not None: mask = mask | mask_values if values.shape != mask.shape: raise ValueError('values.shape and mask.shape must match') return (values, mask) class BooleanArray(BaseMaskedArray): _TRUE_VALUES = {'True', 'TRUE', 'true', '1', '1.0'} _FALSE_VALUES = {'False', 'FALSE', 'false', '0', '0.0'} @classmethod def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self: result = super()._simple_new(values, mask) result._dtype = BooleanDtype() return result def __init__(self, values: np.ndarray, mask: np.ndarray, copy: bool=False) -> None: if not (isinstance(values, np.ndarray) and values.dtype == np.bool_): raise TypeError("values should be boolean numpy array. Use the 'pd.array' function instead") self._dtype = BooleanDtype() super().__init__(values, mask, copy=copy) @property def dtype(self) -> BooleanDtype: return self._dtype @classmethod def _from_sequence_of_strings(cls, strings: list[str], *, dtype: ExtensionDtype, copy: bool=False, true_values: list[str] | None=None, false_values: list[str] | None=None, none_values: list[str] | None=None) -> BooleanArray: true_values_union = cls._TRUE_VALUES.union(true_values or []) false_values_union = cls._FALSE_VALUES.union(false_values or []) if none_values is None: none_values = [] def map_string(s) -> bool | None: if s in true_values_union: return True elif s in false_values_union: return False elif s in none_values: return None else: raise ValueError(f'{s} cannot be cast to bool') scalars = np.array(strings, dtype=object) mask = isna(scalars) scalars[~mask] = list(map(map_string, scalars[~mask])) return cls._from_sequence(scalars, dtype=dtype, copy=copy) _HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_) @classmethod def _coerce_to_array(cls, value, *, dtype: DtypeObj, copy: bool=False) -> tuple[np.ndarray, np.ndarray]: if dtype: assert dtype == 'boolean' return coerce_to_array(value, copy=copy) def _logical_method(self, other, op): assert op.__name__ in {'or_', 'ror_', 'and_', 'rand_', 'xor', 'rxor'} other_is_scalar = lib.is_scalar(other) mask = None if isinstance(other, BooleanArray): (other, mask) = (other._data, other._mask) elif is_list_like(other): other = np.asarray(other, dtype='bool') if other.ndim > 1: raise NotImplementedError('can only perform ops with 1-d structures') (other, mask) = coerce_to_array(other, copy=False) elif isinstance(other, np.bool_): other = other.item() if other_is_scalar and other is not libmissing.NA and (not lib.is_bool(other)): raise TypeError(f"'other' should be pandas.NA or a bool. Got {type(other).__name__} instead.") if not other_is_scalar and len(self) != len(other): raise ValueError('Lengths must match') if op.__name__ in {'or_', 'ror_'}: (result, mask) = ops.kleene_or(self._data, other, self._mask, mask) elif op.__name__ in {'and_', 'rand_'}: (result, mask) = ops.kleene_and(self._data, other, self._mask, mask) else: (result, mask) = ops.kleene_xor(self._data, other, self._mask, mask) return self._maybe_mask_result(result, mask) def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> BaseMaskedArray: data = self._data mask = self._mask if name in ('cummin', 'cummax'): op = getattr(masked_accumulations, name) (data, mask) = op(data, mask, skipna=skipna, **kwargs) return self._simple_new(data, mask) else: from pandas.core.arrays import IntegerArray return IntegerArray(data.astype(int), mask)._accumulate(name, skipna=skipna, **kwargs) # File: pandas-main/pandas/core/arrays/categorical.py from __future__ import annotations from csv import QUOTE_NONNUMERIC from functools import partial import operator from shutil import get_terminal_size from typing import TYPE_CHECKING, Literal, cast, overload import numpy as np from pandas._config import get_option from pandas._libs import NaT, algos as libalgos, lib from pandas._libs.arrays import NDArrayBacked from pandas.compat.numpy import function as nv from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import coerce_indexer_dtype, find_common_type from pandas.core.dtypes.common import ensure_int64, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_dict_like, is_hashable, is_integer_dtype, is_list_like, is_scalar, needs_i8_conversion, pandas_dtype from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype, CategoricalDtypeType, ExtensionDtype from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna from pandas.core import algorithms, arraylike, ops from pandas.core.accessor import PandasDelegate, delegate_names from pandas.core.algorithms import factorize, take_nd from pandas.core.arrays._mixins import NDArrayBackedExtensionArray, ravel_compat from pandas.core.base import ExtensionArray, NoNewAttributesMixin, PandasObject import pandas.core.common as com from pandas.core.construction import extract_array, sanitize_array from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.sorting import nargsort from pandas.core.strings.object_array import ObjectStringArrayMixin from pandas.io.formats import console if TYPE_CHECKING: from collections.abc import Callable, Hashable, Iterator, Sequence from pandas._typing import ArrayLike, AstypeArg, AxisInt, Dtype, DtypeObj, NpDtype, Ordered, Self, Shape, SortKind, npt from pandas import DataFrame, Index, Series def _cat_compare_op(op): opname = f'__{op.__name__}__' fill_value = op is operator.ne @unpack_zerodim_and_defer(opname) def func(self, other): hashable = is_hashable(other) if is_list_like(other) and len(other) != len(self) and (not hashable): raise ValueError('Lengths must match.') if not self.ordered: if opname in ['__lt__', '__gt__', '__le__', '__ge__']: raise TypeError('Unordered Categoricals can only compare equality or not') if isinstance(other, Categorical): msg = "Categoricals can only be compared if 'categories' are the same." if not self._categories_match_up_to_permutation(other): raise TypeError(msg) if not self.ordered and (not self.categories.equals(other.categories)): other_codes = recode_for_categories(other.codes, other.categories, self.categories, copy=False) else: other_codes = other._codes ret = op(self._codes, other_codes) mask = (self._codes == -1) | (other_codes == -1) if mask.any(): ret[mask] = fill_value return ret if hashable: if other in self.categories: i = self._unbox_scalar(other) ret = op(self._codes, i) if opname not in {'__eq__', '__ge__', '__gt__'}: mask = self._codes == -1 ret[mask] = fill_value return ret else: return ops.invalid_comparison(self, other, op) else: if opname not in ['__eq__', '__ne__']: raise TypeError(f"Cannot compare a Categorical for op {opname} with type {type(other)}.\nIf you want to compare values, use 'np.asarray(cat) other'.") if isinstance(other, ExtensionArray) and needs_i8_conversion(other.dtype): return op(other, self) return getattr(np.array(self), opname)(np.array(other)) func.__name__ = opname return func def contains(cat, key, container) -> bool: hash(key) try: loc = cat.categories.get_loc(key) except (KeyError, TypeError): return False if is_scalar(loc): return loc in container else: return any((loc_ in container for loc_ in loc)) class Categorical(NDArrayBackedExtensionArray, PandasObject, ObjectStringArrayMixin): __array_priority__ = 1000 _hidden_attrs = PandasObject._hidden_attrs | frozenset(['tolist']) _typ = 'categorical' _dtype: CategoricalDtype @classmethod def _simple_new(cls, codes: np.ndarray, dtype: CategoricalDtype) -> Self: codes = coerce_indexer_dtype(codes, dtype.categories) dtype = CategoricalDtype(ordered=False).update_dtype(dtype) return super()._simple_new(codes, dtype) def __init__(self, values, categories=None, ordered=None, dtype: Dtype | None=None, copy: bool=True) -> None: dtype = CategoricalDtype._from_values_or_dtype(values, categories, ordered, dtype) if not is_list_like(values): raise TypeError('Categorical input must be list-like') null_mask = np.array(False) vdtype = getattr(values, 'dtype', None) if isinstance(vdtype, CategoricalDtype): if dtype.categories is None: dtype = CategoricalDtype(values.categories, dtype.ordered) elif isinstance(values, range): from pandas.core.indexes.range import RangeIndex values = RangeIndex(values) elif not isinstance(values, (ABCIndex, ABCSeries, ExtensionArray)): values = com.convert_to_list_like(values) if isinstance(values, list) and len(values) == 0: values = np.array([], dtype=object) elif isinstance(values, np.ndarray): if values.ndim > 1: raise NotImplementedError('> 1 ndim Categorical are not supported at this time') values = sanitize_array(values, None) else: arr = sanitize_array(values, None) null_mask = isna(arr) if null_mask.any(): arr_list = [values[idx] for idx in np.where(~null_mask)[0]] if arr_list or arr.dtype == 'object': sanitize_dtype = None else: sanitize_dtype = arr.dtype arr = sanitize_array(arr_list, None, dtype=sanitize_dtype) values = arr if dtype.categories is None: if isinstance(values.dtype, ArrowDtype) and issubclass(values.dtype.type, CategoricalDtypeType): arr = values._pa_array.combine_chunks() categories = arr.dictionary.to_pandas(types_mapper=ArrowDtype) codes = arr.indices.to_numpy() dtype = CategoricalDtype(categories, values.dtype.pyarrow_dtype.ordered) else: if not isinstance(values, ABCIndex): values = sanitize_array(values, None) try: (codes, categories) = factorize(values, sort=True) except TypeError as err: (codes, categories) = factorize(values, sort=False) if dtype.ordered: raise TypeError("'values' is not ordered, please explicitly specify the categories order by passing in a categories argument.") from err dtype = CategoricalDtype(categories, dtype.ordered) elif isinstance(values.dtype, CategoricalDtype): old_codes = extract_array(values)._codes codes = recode_for_categories(old_codes, values.dtype.categories, dtype.categories, copy=copy) else: codes = _get_codes_for_values(values, dtype.categories) if null_mask.any(): full_codes = -np.ones(null_mask.shape, dtype=codes.dtype) full_codes[~null_mask] = codes codes = full_codes dtype = CategoricalDtype(ordered=False).update_dtype(dtype) arr = coerce_indexer_dtype(codes, dtype.categories) super().__init__(arr, dtype) @property def dtype(self) -> CategoricalDtype: return self._dtype @property def _internal_fill_value(self) -> int: dtype = self._ndarray.dtype return dtype.type(-1) @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: return cls(scalars, dtype=dtype, copy=copy) @classmethod def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: if dtype is None: raise NotImplementedError res = cls._from_sequence(scalars, dtype=dtype) mask = isna(scalars) if not (mask == res.isna()).all(): raise ValueError return res @overload def astype(self, dtype: npt.DTypeLike, copy: bool=...) -> np.ndarray: ... @overload def astype(self, dtype: ExtensionDtype, copy: bool=...) -> ExtensionArray: ... @overload def astype(self, dtype: AstypeArg, copy: bool=...) -> ArrayLike: ... def astype(self, dtype: AstypeArg, copy: bool=True) -> ArrayLike: dtype = pandas_dtype(dtype) result: Categorical | np.ndarray if self.dtype is dtype: result = self.copy() if copy else self elif isinstance(dtype, CategoricalDtype): dtype = self.dtype.update_dtype(dtype) self = self.copy() if copy else self result = self._set_dtype(dtype) elif isinstance(dtype, ExtensionDtype): return super().astype(dtype, copy=copy) elif dtype.kind in 'iu' and self.isna().any(): raise ValueError('Cannot convert float NaN to integer') elif len(self.codes) == 0 or len(self.categories) == 0: result = np.array(self, dtype=dtype, copy=copy) else: new_cats = self.categories._values try: new_cats = new_cats.astype(dtype=dtype, copy=copy) fill_value = self.categories._na_value if not is_valid_na_for_dtype(fill_value, dtype): fill_value = lib.item_from_zerodim(np.array(self.categories._na_value).astype(dtype)) except (TypeError, ValueError) as err: msg = f'Cannot cast {self.categories.dtype} dtype to {dtype}' raise ValueError(msg) from err result = take_nd(new_cats, ensure_platform_int(self._codes), fill_value=fill_value) return result @classmethod def _from_inferred_categories(cls, inferred_categories, inferred_codes, dtype, true_values=None) -> Self: from pandas import Index, to_datetime, to_numeric, to_timedelta cats = Index(inferred_categories) known_categories = isinstance(dtype, CategoricalDtype) and dtype.categories is not None if known_categories: if is_any_real_numeric_dtype(dtype.categories.dtype): cats = to_numeric(inferred_categories, errors='coerce') elif lib.is_np_dtype(dtype.categories.dtype, 'M'): cats = to_datetime(inferred_categories, errors='coerce') elif lib.is_np_dtype(dtype.categories.dtype, 'm'): cats = to_timedelta(inferred_categories, errors='coerce') elif is_bool_dtype(dtype.categories.dtype): if true_values is None: true_values = ['True', 'TRUE', 'true'] cats = cats.isin(true_values) if known_categories: categories = dtype.categories codes = recode_for_categories(inferred_codes, cats, categories) elif not cats.is_monotonic_increasing: unsorted = cats.copy() categories = cats.sort_values() codes = recode_for_categories(inferred_codes, unsorted, categories) dtype = CategoricalDtype(categories, ordered=False) else: dtype = CategoricalDtype(cats, ordered=False) codes = inferred_codes return cls._simple_new(codes, dtype=dtype) @classmethod def from_codes(cls, codes, categories=None, ordered=None, dtype: Dtype | None=None, validate: bool=True) -> Self: dtype = CategoricalDtype._from_values_or_dtype(categories=categories, ordered=ordered, dtype=dtype) if dtype.categories is None: msg = "The categories must be provided in 'categories' or 'dtype'. Both were None." raise ValueError(msg) if validate: codes = cls._validate_codes_for_dtype(codes, dtype=dtype) return cls._simple_new(codes, dtype=dtype) @property def categories(self) -> Index: return self.dtype.categories @property def ordered(self) -> Ordered: return self.dtype.ordered @property def codes(self) -> np.ndarray: v = self._codes.view() v.flags.writeable = False return v def _set_categories(self, categories, fastpath: bool=False) -> None: if fastpath: new_dtype = CategoricalDtype._from_fastpath(categories, self.ordered) else: new_dtype = CategoricalDtype(categories, ordered=self.ordered) if not fastpath and self.dtype.categories is not None and (len(new_dtype.categories) != len(self.dtype.categories)): raise ValueError('new categories need to have the same number of items as the old categories!') super().__init__(self._ndarray, new_dtype) def _set_dtype(self, dtype: CategoricalDtype) -> Self: codes = recode_for_categories(self.codes, self.categories, dtype.categories) return type(self)._simple_new(codes, dtype=dtype) def set_ordered(self, value: bool) -> Self: new_dtype = CategoricalDtype(self.categories, ordered=value) cat = self.copy() NDArrayBacked.__init__(cat, cat._ndarray, new_dtype) return cat def as_ordered(self) -> Self: return self.set_ordered(True) def as_unordered(self) -> Self: return self.set_ordered(False) def set_categories(self, new_categories, ordered=None, rename: bool=False) -> Self: if ordered is None: ordered = self.dtype.ordered new_dtype = CategoricalDtype(new_categories, ordered=ordered) cat = self.copy() if rename: if cat.dtype.categories is not None and len(new_dtype.categories) < len(cat.dtype.categories): cat._codes[cat._codes >= len(new_dtype.categories)] = -1 codes = cat._codes else: codes = recode_for_categories(cat.codes, cat.categories, new_dtype.categories) NDArrayBacked.__init__(cat, codes, new_dtype) return cat def rename_categories(self, new_categories) -> Self: if is_dict_like(new_categories): new_categories = [new_categories.get(item, item) for item in self.categories] elif callable(new_categories): new_categories = [new_categories(item) for item in self.categories] cat = self.copy() cat._set_categories(new_categories) return cat def reorder_categories(self, new_categories, ordered=None) -> Self: if len(self.categories) != len(new_categories) or not self.categories.difference(new_categories).empty: raise ValueError('items in new_categories are not the same as in old categories') return self.set_categories(new_categories, ordered=ordered) def add_categories(self, new_categories) -> Self: if not is_list_like(new_categories): new_categories = [new_categories] already_included = set(new_categories) & set(self.dtype.categories) if len(already_included) != 0: raise ValueError(f'new categories must not include old categories: {already_included}') if hasattr(new_categories, 'dtype'): from pandas import Series dtype = find_common_type([self.dtype.categories.dtype, new_categories.dtype]) new_categories = Series(list(self.dtype.categories) + list(new_categories), dtype=dtype) else: new_categories = list(self.dtype.categories) + list(new_categories) new_dtype = CategoricalDtype(new_categories, self.ordered) cat = self.copy() codes = coerce_indexer_dtype(cat._ndarray, new_dtype.categories) NDArrayBacked.__init__(cat, codes, new_dtype) return cat def remove_categories(self, removals) -> Self: from pandas import Index if not is_list_like(removals): removals = [removals] removals = Index(removals).unique().dropna() new_categories = self.dtype.categories.difference(removals, sort=False) if self.dtype.ordered is True else self.dtype.categories.difference(removals) not_included = removals.difference(self.dtype.categories) if len(not_included) != 0: not_included = set(not_included) raise ValueError(f'removals must all be in old categories: {not_included}') return self.set_categories(new_categories, ordered=self.ordered, rename=False) def remove_unused_categories(self) -> Self: (idx, inv) = np.unique(self._codes, return_inverse=True) if idx.size != 0 and idx[0] == -1: (idx, inv) = (idx[1:], inv - 1) new_categories = self.dtype.categories.take(idx) new_dtype = CategoricalDtype._from_fastpath(new_categories, ordered=self.ordered) new_codes = coerce_indexer_dtype(inv, new_dtype.categories) cat = self.copy() NDArrayBacked.__init__(cat, new_codes, new_dtype) return cat def map(self, mapper, na_action: Literal['ignore'] | None=None): assert callable(mapper) or is_dict_like(mapper) new_categories = self.categories.map(mapper) has_nans = np.any(self._codes == -1) na_val = np.nan if na_action is None and has_nans: na_val = mapper(np.nan) if callable(mapper) else mapper.get(np.nan, np.nan) if new_categories.is_unique and (not new_categories.hasnans) and (na_val is np.nan): new_dtype = CategoricalDtype(new_categories, ordered=self.ordered) return self.from_codes(self._codes.copy(), dtype=new_dtype, validate=False) if has_nans: new_categories = new_categories.insert(len(new_categories), na_val) return np.take(new_categories, self._codes) __eq__ = _cat_compare_op(operator.eq) __ne__ = _cat_compare_op(operator.ne) __lt__ = _cat_compare_op(operator.lt) __gt__ = _cat_compare_op(operator.gt) __le__ = _cat_compare_op(operator.le) __ge__ = _cat_compare_op(operator.ge) def _validate_setitem_value(self, value): if not is_hashable(value): return self._validate_listlike(value) else: return self._validate_scalar(value) def _validate_scalar(self, fill_value): if is_valid_na_for_dtype(fill_value, self.categories.dtype): fill_value = -1 elif fill_value in self.categories: fill_value = self._unbox_scalar(fill_value) else: raise TypeError(f'Cannot setitem on a Categorical with a new category ({fill_value}), set the categories first') from None return fill_value @classmethod def _validate_codes_for_dtype(cls, codes, *, dtype: CategoricalDtype) -> np.ndarray: if isinstance(codes, ExtensionArray) and is_integer_dtype(codes.dtype): if isna(codes).any(): raise ValueError('codes cannot contain NA values') codes = codes.to_numpy(dtype=np.int64) else: codes = np.asarray(codes) if len(codes) and codes.dtype.kind not in 'iu': raise ValueError('codes need to be array-like integers') if len(codes) and (codes.max() >= len(dtype.categories) or codes.min() < -1): raise ValueError('codes need to be between -1 and len(categories)-1') return codes @ravel_compat def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: ret = take_nd(self.categories._values, self._codes) if dtype and np.dtype(dtype) != self.categories.dtype: return np.asarray(ret, dtype) return np.asarray(ret) def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): result = arraylike.maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if 'out' in kwargs: return arraylike.dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) if method == 'reduce': result = arraylike.dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result raise TypeError(f'Object with dtype {self.dtype} cannot perform the numpy op {ufunc.__name__}') def __setstate__(self, state) -> None: if not isinstance(state, dict): return super().__setstate__(state) if '_dtype' not in state: state['_dtype'] = CategoricalDtype(state['_categories'], state['_ordered']) if '_codes' in state and '_ndarray' not in state: state['_ndarray'] = state.pop('_codes') super().__setstate__(state) @property def nbytes(self) -> int: return self._codes.nbytes + self.dtype.categories.values.nbytes def memory_usage(self, deep: bool=False) -> int: return self._codes.nbytes + self.dtype.categories.memory_usage(deep=deep) def isna(self) -> npt.NDArray[np.bool_]: return self._codes == -1 isnull = isna def notna(self) -> npt.NDArray[np.bool_]: return ~self.isna() notnull = notna def value_counts(self, dropna: bool=True) -> Series: from pandas import CategoricalIndex, Series (code, cat) = (self._codes, self.categories) (ncat, mask) = (len(cat), code >= 0) (ix, clean) = (np.arange(ncat), mask.all()) if dropna or clean: obs = code if clean else code[mask] count = np.bincount(obs, minlength=ncat or 0) else: count = np.bincount(np.where(mask, code, ncat)) ix = np.append(ix, -1) ix = coerce_indexer_dtype(ix, self.dtype.categories) ix_categorical = self._from_backing_data(ix) return Series(count, index=CategoricalIndex(ix_categorical), dtype='int64', name='count', copy=False) @classmethod def _empty(cls, shape: Shape, dtype: CategoricalDtype) -> Self: arr = cls._from_sequence([], dtype=dtype) backing = np.zeros(shape, dtype=arr._ndarray.dtype) return arr._from_backing_data(backing) def _internal_get_values(self) -> ArrayLike: if needs_i8_conversion(self.categories.dtype): return self.categories.take(self._codes, fill_value=NaT)._values elif is_integer_dtype(self.categories.dtype) and -1 in self._codes: return self.categories.astype('object').take(self._codes, fill_value=np.nan)._values return np.array(self) def check_for_ordered(self, op) -> None: if not self.ordered: raise TypeError(f'Categorical is not ordered for operation {op}\nyou can use .as_ordered() to change the Categorical to an ordered one\n') def argsort(self, *, ascending: bool=True, kind: SortKind='quicksort', **kwargs) -> npt.NDArray[np.intp]: return super().argsort(ascending=ascending, kind=kind, **kwargs) @overload def sort_values(self, *, inplace: Literal[False]=..., ascending: bool=..., na_position: str=...) -> Self: ... @overload def sort_values(self, *, inplace: Literal[True], ascending: bool=..., na_position: str=...) -> None: ... def sort_values(self, *, inplace: bool=False, ascending: bool=True, na_position: str='last') -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if na_position not in ['last', 'first']: raise ValueError(f'invalid na_position: {na_position!r}') sorted_idx = nargsort(self, ascending=ascending, na_position=na_position) if not inplace: codes = self._codes[sorted_idx] return self._from_backing_data(codes) self._codes[:] = self._codes[sorted_idx] return None def _rank(self, *, axis: AxisInt=0, method: str='average', na_option: str='keep', ascending: bool=True, pct: bool=False): if axis != 0: raise NotImplementedError vff = self._values_for_rank() return algorithms.rank(vff, axis=axis, method=method, na_option=na_option, ascending=ascending, pct=pct) def _values_for_rank(self) -> np.ndarray: from pandas import Series if self.ordered: values = self.codes mask = values == -1 if mask.any(): values = values.astype('float64') values[mask] = np.nan elif is_any_real_numeric_dtype(self.categories.dtype): values = np.array(self) else: values = np.array(self.rename_categories(Series(self.categories, copy=False).rank().values)) return values def _hash_pandas_object(self, *, encoding: str, hash_key: str, categorize: bool) -> npt.NDArray[np.uint64]: from pandas.core.util.hashing import hash_array values = np.asarray(self.categories._values) hashed = hash_array(values, encoding, hash_key, categorize=False) mask = self.isna() if len(hashed): result = hashed.take(self._codes) else: result = np.zeros(len(mask), dtype='uint64') if mask.any(): result[mask] = lib.u8max return result @property def _codes(self) -> np.ndarray: return self._ndarray def _box_func(self, i: int): if i == -1: return np.nan return self.categories[i] def _unbox_scalar(self, key) -> int: code = self.categories.get_loc(key) code = self._ndarray.dtype.type(code) return code def __iter__(self) -> Iterator: if self.ndim == 1: return iter(self._internal_get_values().tolist()) else: return (self[n] for n in range(len(self))) def __contains__(self, key) -> bool: if is_valid_na_for_dtype(key, self.categories.dtype): return bool(self.isna().any()) return contains(self, key, container=self._codes) def _formatter(self, boxed: bool=False) -> None: return None def _repr_categories(self) -> list[str]: max_categories = 10 if get_option('display.max_categories') == 0 else get_option('display.max_categories') from pandas.io.formats import format as fmt format_array = partial(fmt.format_array, formatter=None, quoting=QUOTE_NONNUMERIC) if len(self.categories) > max_categories: num = max_categories // 2 head = format_array(self.categories[:num]._values) tail = format_array(self.categories[-num:]._values) category_strs = head + ['...'] + tail else: category_strs = format_array(self.categories._values) category_strs = [x.strip() for x in category_strs] return category_strs def _get_repr_footer(self) -> str: category_strs = self._repr_categories() dtype = str(self.categories.dtype) levheader = f'Categories ({len(self.categories)}, {dtype}): ' (width, _) = get_terminal_size() max_width = get_option('display.width') or width if console.in_ipython_frontend(): max_width = 0 levstring = '' start = True cur_col_len = len(levheader) (sep_len, sep) = (3, ' < ') if self.ordered else (2, ', ') linesep = f'{sep.rstrip()}\n' for val in category_strs: if max_width != 0 and cur_col_len + sep_len + len(val) > max_width: levstring += linesep + ' ' * (len(levheader) + 1) cur_col_len = len(levheader) + 1 elif not start: levstring += sep cur_col_len += len(val) levstring += val start = False return f"{levheader}[{levstring.replace(' < ... < ', ' ... ')}]" def _get_values_repr(self) -> str: from pandas.io.formats import format as fmt assert len(self) > 0 vals = self._internal_get_values() fmt_values = fmt.format_array(vals, None, float_format=None, na_rep='NaN', quoting=QUOTE_NONNUMERIC) fmt_values = [i.strip() for i in fmt_values] joined = ', '.join(fmt_values) result = '[' + joined + ']' return result def __repr__(self) -> str: footer = self._get_repr_footer() length = len(self) max_len = 10 if length > max_len: num = max_len // 2 head = self[:num]._get_values_repr() tail = self[-(max_len - num):]._get_values_repr() body = f'{head[:-1]}, ..., {tail[1:]}' length_info = f'Length: {len(self)}' result = f'{body}\n{length_info}\n{footer}' elif length > 0: body = self._get_values_repr() result = f'{body}\n{footer}' else: body = '[]' result = f'{body}, {footer}' return result def _validate_listlike(self, value): value = extract_array(value, extract_numpy=True) if isinstance(value, Categorical): if self.dtype != value.dtype: raise TypeError('Cannot set a Categorical with another, without identical categories') value = self._encode_with_my_categories(value) return value._codes from pandas import Index to_add = Index._with_infer(value, tupleize_cols=False).difference(self.categories) if len(to_add) and (not isna(to_add).all()): raise TypeError('Cannot setitem on a Categorical with a new category, set the categories first') codes = self.categories.get_indexer(value) return codes.astype(self._ndarray.dtype, copy=False) def _reverse_indexer(self) -> dict[Hashable, npt.NDArray[np.intp]]: categories = self.categories (r, counts) = libalgos.groupsort_indexer(ensure_platform_int(self.codes), categories.size) counts = ensure_int64(counts).cumsum() _result = (r[start:end] for (start, end) in zip(counts, counts[1:])) return dict(zip(categories, _result)) def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): result = super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) if name in ['argmax', 'argmin']: return result if keepdims: return type(self)(result, dtype=self.dtype) else: return result def min(self, *, skipna: bool=True, **kwargs): nv.validate_minmax_axis(kwargs.get('axis', 0)) nv.validate_min((), kwargs) self.check_for_ordered('min') if not len(self._codes): return self.dtype.na_value good = self._codes != -1 if not good.all(): if skipna and good.any(): pointer = self._codes[good].min() else: return np.nan else: pointer = self._codes.min() return self._wrap_reduction_result(None, pointer) def max(self, *, skipna: bool=True, **kwargs): nv.validate_minmax_axis(kwargs.get('axis', 0)) nv.validate_max((), kwargs) self.check_for_ordered('max') if not len(self._codes): return self.dtype.na_value good = self._codes != -1 if not good.all(): if skipna and good.any(): pointer = self._codes[good].max() else: return np.nan else: pointer = self._codes.max() return self._wrap_reduction_result(None, pointer) def _mode(self, dropna: bool=True) -> Categorical: codes = self._codes mask = None if dropna: mask = self.isna() res_codes = algorithms.mode(codes, mask=mask) res_codes = cast(np.ndarray, res_codes) assert res_codes.dtype == codes.dtype res = self._from_backing_data(res_codes) return res def unique(self) -> Self: return super().unique() def equals(self, other: object) -> bool: if not isinstance(other, Categorical): return False elif self._categories_match_up_to_permutation(other): other = self._encode_with_my_categories(other) return np.array_equal(self._codes, other._codes) return False def _accumulate(self, name: str, skipna: bool=True, **kwargs) -> Self: func: Callable if name == 'cummin': func = np.minimum.accumulate elif name == 'cummax': func = np.maximum.accumulate else: raise TypeError(f'Accumulation {name} not supported for {type(self)}') self.check_for_ordered(name) codes = self.codes.copy() mask = self.isna() if func == np.minimum.accumulate: codes[mask] = np.iinfo(codes.dtype.type).max if not skipna: mask = np.maximum.accumulate(mask) codes = func(codes) codes[mask] = -1 return self._simple_new(codes, dtype=self._dtype) @classmethod def _concat_same_type(cls, to_concat: Sequence[Self], axis: AxisInt=0) -> Self: from pandas.core.dtypes.concat import union_categoricals first = to_concat[0] if axis >= first.ndim: raise ValueError(f'axis {axis} is out of bounds for array of dimension {first.ndim}') if axis == 1: if not all((x.ndim == 2 for x in to_concat)): raise ValueError tc_flat = [] for obj in to_concat: tc_flat.extend([obj[:, i] for i in range(obj.shape[1])]) res_flat = cls._concat_same_type(tc_flat, axis=0) result = res_flat.reshape(len(first), -1, order='F') return result result = union_categoricals(to_concat) return result def _encode_with_my_categories(self, other: Categorical) -> Categorical: codes = recode_for_categories(other.codes, other.categories, self.categories, copy=False) return self._from_backing_data(codes) def _categories_match_up_to_permutation(self, other: Categorical) -> bool: return hash(self.dtype) == hash(other.dtype) def describe(self) -> DataFrame: counts = self.value_counts(dropna=False) freqs = counts / counts.sum() from pandas import Index from pandas.core.reshape.concat import concat result = concat([counts, freqs], ignore_index=True, axis=1) result.columns = Index(['counts', 'freqs']) result.index.name = 'categories' return result def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: null_mask = np.asarray(isna(values)) code_values = self.categories.get_indexer_for(values) code_values = code_values[null_mask | (code_values >= 0)] return algorithms.isin(self.codes, code_values) def _str_map(self, f, na_value=np.nan, dtype=np.dtype('object'), convert: bool=True): from pandas.core.arrays import NumpyExtensionArray categories = self.categories codes = self.codes result = NumpyExtensionArray(categories.to_numpy())._str_map(f, na_value, dtype) return take_nd(result, codes, fill_value=na_value) def _str_get_dummies(self, sep: str='|', dtype: NpDtype | None=None): from pandas.core.arrays import NumpyExtensionArray return NumpyExtensionArray(self.astype(str))._str_get_dummies(sep, dtype) def _groupby_op(self, *, how: str, has_dropped_na: bool, min_count: int, ngroups: int, ids: npt.NDArray[np.intp], **kwargs): from pandas.core.groupby.ops import WrappedCythonOp kind = WrappedCythonOp.get_kind_from_how(how) op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) dtype = self.dtype if how in ['sum', 'prod', 'cumsum', 'cumprod', 'skew']: raise TypeError(f'{dtype} type does not support {how} operations') if how in ['min', 'max', 'rank', 'idxmin', 'idxmax'] and (not dtype.ordered): raise TypeError(f'Cannot perform {how} with non-ordered Categorical') if how not in ['rank', 'any', 'all', 'first', 'last', 'min', 'max', 'idxmin', 'idxmax']: if kind == 'transform': raise TypeError(f'{dtype} type does not support {how} operations') raise TypeError(f"{dtype} dtype does not support aggregation '{how}'") result_mask = None mask = self.isna() if how == 'rank': assert self.ordered npvalues = self._ndarray elif how in ['first', 'last', 'min', 'max', 'idxmin', 'idxmax']: npvalues = self._ndarray result_mask = np.zeros(ngroups, dtype=bool) else: npvalues = self.astype(bool) res_values = op._cython_op_ndim_compat(npvalues, min_count=min_count, ngroups=ngroups, comp_ids=ids, mask=mask, result_mask=result_mask, **kwargs) if how in op.cast_blocklist: return res_values elif how in ['first', 'last', 'min', 'max']: res_values[result_mask == 1] = -1 return self._from_backing_data(res_values) @delegate_names(delegate=Categorical, accessors=['categories', 'ordered'], typ='property') @delegate_names(delegate=Categorical, accessors=['rename_categories', 'reorder_categories', 'add_categories', 'remove_categories', 'remove_unused_categories', 'set_categories', 'as_ordered', 'as_unordered'], typ='method') class CategoricalAccessor(PandasDelegate, PandasObject, NoNewAttributesMixin): def __init__(self, data) -> None: self._validate(data) self._parent = data.values self._index = data.index self._name = data.name self._freeze() @staticmethod def _validate(data) -> None: if not isinstance(data.dtype, CategoricalDtype): raise AttributeError("Can only use .cat accessor with a 'category' dtype") def _delegate_property_get(self, name: str): return getattr(self._parent, name) def _delegate_property_set(self, name: str, new_values) -> None: setattr(self._parent, name, new_values) @property def codes(self) -> Series: from pandas import Series return Series(self._parent.codes, index=self._index) def _delegate_method(self, name: str, *args, **kwargs): from pandas import Series method = getattr(self._parent, name) res = method(*args, **kwargs) if res is not None: return Series(res, index=self._index, name=self._name) def _get_codes_for_values(values: Index | Series | ExtensionArray | np.ndarray, categories: Index) -> np.ndarray: codes = categories.get_indexer_for(values) return coerce_indexer_dtype(codes, categories) def recode_for_categories(codes: np.ndarray, old_categories, new_categories, copy: bool=True) -> np.ndarray: if len(old_categories) == 0: if copy: return codes.copy() return codes elif new_categories.equals(old_categories): if copy: return codes.copy() return codes indexer = coerce_indexer_dtype(new_categories.get_indexer_for(old_categories), new_categories) new_codes = take_nd(indexer, codes, fill_value=-1) return new_codes def factorize_from_iterable(values) -> tuple[np.ndarray, Index]: from pandas import CategoricalIndex if not is_list_like(values): raise TypeError('Input must be list-like') categories: Index vdtype = getattr(values, 'dtype', None) if isinstance(vdtype, CategoricalDtype): values = extract_array(values) cat_codes = np.arange(len(values.categories), dtype=values.codes.dtype) cat = Categorical.from_codes(cat_codes, dtype=values.dtype, validate=False) categories = CategoricalIndex(cat) codes = values.codes else: cat = Categorical(values, ordered=False) categories = cat.categories codes = cat.codes return (codes, categories) def factorize_from_iterables(iterables) -> tuple[list[np.ndarray], list[Index]]: if len(iterables) == 0: return ([], []) (codes, categories) = zip(*(factorize_from_iterable(it) for it in iterables)) return (list(codes), list(categories)) # File: pandas-main/pandas/core/arrays/datetimelike.py from __future__ import annotations from datetime import datetime, timedelta from functools import wraps import operator from typing import TYPE_CHECKING, Any, Literal, Union, cast, final, overload import warnings import numpy as np from pandas._config import using_string_dtype from pandas._config.config import get_option from pandas._libs import algos, lib from pandas._libs.tslibs import BaseOffset, IncompatibleFrequency, NaT, NaTType, Period, Resolution, Tick, Timedelta, Timestamp, add_overflowsafe, astype_overflowsafe, get_unit_from_dtype, iNaT, ints_to_pydatetime, ints_to_pytimedelta, periods_per_day, to_offset from pandas._libs.tslibs.fields import RoundTo, round_nsint64 from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions from pandas._libs.tslibs.timedeltas import get_unit_for_round from pandas._libs.tslibs.timestamps import integer_op_not_supported from pandas._typing import ArrayLike, AxisInt, DatetimeLikeScalar, Dtype, DtypeObj, F, InterpolateOptions, NpDtype, PositionalIndexer2D, PositionalIndexerTuple, ScalarIndexer, Self, SequenceIndexer, TakeIndexer, TimeAmbiguous, TimeNonexistent, npt from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError, InvalidComparison, PerformanceWarning from pandas.util._decorators import Appender, Substitution, cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import is_all_strings, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype, pandas_dtype from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, PeriodDtype from pandas.core.dtypes.generic import ABCCategorical, ABCMultiIndex from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna from pandas.core import algorithms, missing, nanops, ops from pandas.core.algorithms import isin, map_array, unique1d from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import NDArrayBackedExtensionArray, ravel_compat from pandas.core.arrays.arrow.array import ArrowExtensionArray from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.integer import IntegerArray import pandas.core.common as com from pandas.core.construction import array as pd_array, ensure_wrapped_if_datetimelike, extract_array from pandas.core.indexers import check_array_indexer, check_setitem_lengths from pandas.core.ops.common import unpack_zerodim_and_defer from pandas.core.ops.invalid import invalid_comparison, make_invalid_op from pandas.tseries import frequencies if TYPE_CHECKING: from collections.abc import Callable, Iterator, Sequence from pandas import Index from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray DTScalarOrNaT = Union[DatetimeLikeScalar, NaTType] def _make_unpacked_invalid_op(op_name: str): op = make_invalid_op(op_name) return unpack_zerodim_and_defer(op_name)(op) def _period_dispatch(meth: F) -> F: @wraps(meth) def new_meth(self, *args, **kwargs): if not isinstance(self.dtype, PeriodDtype): return meth(self, *args, **kwargs) arr = self.view('M8[ns]') result = meth(arr, *args, **kwargs) if result is NaT: return NaT elif isinstance(result, Timestamp): return self._box_func(result._value) res_i8 = result.view('i8') return self._from_backing_data(res_i8) return cast(F, new_meth) class DatetimeLikeArrayMixin(OpsMixin, NDArrayBackedExtensionArray): _infer_matches: tuple[str, ...] _is_recognized_dtype: Callable[[DtypeObj], bool] _recognized_scalars: tuple[type, ...] _ndarray: np.ndarray freq: BaseOffset | None @cache_readonly def _can_hold_na(self) -> bool: return True def __init__(self, data, dtype: Dtype | None=None, freq=None, copy: bool=False) -> None: raise AbstractMethodError(self) @property def _scalar_type(self) -> type[DatetimeLikeScalar]: raise AbstractMethodError(self) def _scalar_from_string(self, value: str) -> DTScalarOrNaT: raise AbstractMethodError(self) def _unbox_scalar(self, value: DTScalarOrNaT) -> np.int64 | np.datetime64 | np.timedelta64: raise AbstractMethodError(self) def _check_compatible_with(self, other: DTScalarOrNaT) -> None: raise AbstractMethodError(self) def _box_func(self, x): raise AbstractMethodError(self) def _box_values(self, values) -> np.ndarray: return lib.map_infer(values, self._box_func, convert=False) def __iter__(self) -> Iterator: if self.ndim > 1: return (self[n] for n in range(len(self))) else: return (self._box_func(v) for v in self.asi8) @property def asi8(self) -> npt.NDArray[np.int64]: return self._ndarray.view('i8') def _format_native_types(self, *, na_rep: str | float='NaT', date_format=None) -> npt.NDArray[np.object_]: raise AbstractMethodError(self) def _formatter(self, boxed: bool=False) -> Callable[[object], str]: return "'{}'".format def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: if is_object_dtype(dtype): return np.array(list(self), dtype=object) return self._ndarray @overload def __getitem__(self, key: ScalarIndexer) -> DTScalarOrNaT: ... @overload def __getitem__(self, key: SequenceIndexer | PositionalIndexerTuple) -> Self: ... def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT: result = cast('Union[Self, DTScalarOrNaT]', super().__getitem__(key)) if lib.is_scalar(result): return result else: result = cast(Self, result) result._freq = self._get_getitem_freq(key) return result def _get_getitem_freq(self, key) -> BaseOffset | None: is_period = isinstance(self.dtype, PeriodDtype) if is_period: freq = self.freq elif self.ndim != 1: freq = None else: key = check_array_indexer(self, key) freq = None if isinstance(key, slice): if self.freq is not None and key.step is not None: freq = key.step * self.freq else: freq = self.freq elif key is Ellipsis: freq = self.freq elif com.is_bool_indexer(key): new_key = lib.maybe_booleans_to_slice(key.view(np.uint8)) if isinstance(new_key, slice): return self._get_getitem_freq(new_key) return freq def __setitem__(self, key: int | Sequence[int] | Sequence[bool] | slice, value: NaTType | Any | Sequence[Any]) -> None: no_op = check_setitem_lengths(key, value, self) super().__setitem__(key, value) if no_op: return self._maybe_clear_freq() def _maybe_clear_freq(self) -> None: pass def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if dtype == object: if self.dtype.kind == 'M': self = cast('DatetimeArray', self) i8data = self.asi8 converted = ints_to_pydatetime(i8data, tz=self.tz, box='timestamp', reso=self._creso) return converted elif self.dtype.kind == 'm': return ints_to_pytimedelta(self._ndarray, box=True) return self._box_values(self.asi8.ravel()).reshape(self.shape) elif isinstance(dtype, ExtensionDtype): return super().astype(dtype, copy=copy) elif is_string_dtype(dtype): return self._format_native_types() elif dtype.kind in 'iu': values = self.asi8 if dtype != np.int64: raise TypeError(f"Converting from {self.dtype} to {dtype} is not supported. Do obj.astype('int64').astype(dtype) instead") if copy: values = values.copy() return values elif dtype.kind in 'mM' and self.dtype != dtype or dtype.kind == 'f': msg = f'Cannot cast {type(self).__name__} to dtype {dtype}' raise TypeError(msg) else: return np.asarray(self, dtype=dtype) @overload def view(self) -> Self: ... @overload def view(self, dtype: Literal['M8[ns]']) -> DatetimeArray: ... @overload def view(self, dtype: Literal['m8[ns]']) -> TimedeltaArray: ... @overload def view(self, dtype: Dtype | None=...) -> ArrayLike: ... def view(self, dtype: Dtype | None=None) -> ArrayLike: return super().view(dtype) def _validate_comparison_value(self, other): if isinstance(other, str): try: other = self._scalar_from_string(other) except (ValueError, IncompatibleFrequency) as err: raise InvalidComparison(other) from err if isinstance(other, self._recognized_scalars) or other is NaT: other = self._scalar_type(other) try: self._check_compatible_with(other) except (TypeError, IncompatibleFrequency) as err: raise InvalidComparison(other) from err elif not is_list_like(other): raise InvalidComparison(other) elif len(other) != len(self): raise ValueError('Lengths must match') else: try: other = self._validate_listlike(other, allow_object=True) self._check_compatible_with(other) except (TypeError, IncompatibleFrequency) as err: if is_object_dtype(getattr(other, 'dtype', None)): pass else: raise InvalidComparison(other) from err return other def _validate_scalar(self, value, *, allow_listlike: bool=False, unbox: bool=True): if isinstance(value, self._scalar_type): pass elif isinstance(value, str): try: value = self._scalar_from_string(value) except ValueError as err: msg = self._validation_error_message(value, allow_listlike) raise TypeError(msg) from err elif is_valid_na_for_dtype(value, self.dtype): value = NaT elif isna(value): msg = self._validation_error_message(value, allow_listlike) raise TypeError(msg) elif isinstance(value, self._recognized_scalars): value = self._scalar_type(value) else: msg = self._validation_error_message(value, allow_listlike) raise TypeError(msg) if not unbox: return value return self._unbox_scalar(value) def _validation_error_message(self, value, allow_listlike: bool=False) -> str: if hasattr(value, 'dtype') and getattr(value, 'ndim', 0) > 0: msg_got = f'{value.dtype} array' else: msg_got = f"'{type(value).__name__}'" if allow_listlike: msg = f"value should be a '{self._scalar_type.__name__}', 'NaT', or array of those. Got {msg_got} instead." else: msg = f"value should be a '{self._scalar_type.__name__}' or 'NaT'. Got {msg_got} instead." return msg def _validate_listlike(self, value, allow_object: bool=False): if isinstance(value, type(self)): if self.dtype.kind in 'mM' and (not allow_object) and (self.unit != value.unit): value = value.as_unit(self.unit, round_ok=False) return value if isinstance(value, list) and len(value) == 0: return type(self)._from_sequence([], dtype=self.dtype) if hasattr(value, 'dtype') and value.dtype == object: if lib.infer_dtype(value) in self._infer_matches: try: value = type(self)._from_sequence(value) except (ValueError, TypeError) as err: if allow_object: return value msg = self._validation_error_message(value, True) raise TypeError(msg) from err value = extract_array(value, extract_numpy=True) value = pd_array(value) value = extract_array(value, extract_numpy=True) if is_all_strings(value): try: value = type(self)._from_sequence(value, dtype=self.dtype) except ValueError: pass if isinstance(value.dtype, CategoricalDtype): if value.categories.dtype == self.dtype: value = value._internal_get_values() value = extract_array(value, extract_numpy=True) if allow_object and is_object_dtype(value.dtype): pass elif not type(self)._is_recognized_dtype(value.dtype): msg = self._validation_error_message(value, True) raise TypeError(msg) if self.dtype.kind in 'mM' and (not allow_object): value = value.as_unit(self.unit, round_ok=False) return value def _validate_setitem_value(self, value): if is_list_like(value): value = self._validate_listlike(value) else: return self._validate_scalar(value, allow_listlike=True) return self._unbox(value) @final def _unbox(self, other) -> np.int64 | np.datetime64 | np.timedelta64 | np.ndarray: if lib.is_scalar(other): other = self._unbox_scalar(other) else: self._check_compatible_with(other) other = other._ndarray return other @ravel_compat def map(self, mapper, na_action: Literal['ignore'] | None=None): from pandas import Index result = map_array(self, mapper, na_action=na_action) result = Index(result) if isinstance(result, ABCMultiIndex): return result.to_numpy() else: return result.array def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: if values.dtype.kind in 'fiuc': return np.zeros(self.shape, dtype=bool) values = ensure_wrapped_if_datetimelike(values) if not isinstance(values, type(self)): if values.dtype == object: values = lib.maybe_convert_objects(values, convert_non_numeric=True, dtype_if_all_nat=self.dtype) if values.dtype != object: return self.isin(values) else: return isin(self.astype(object), values) return np.zeros(self.shape, dtype=bool) if self.dtype.kind in 'mM': self = cast('DatetimeArray | TimedeltaArray', self) values = values.as_unit(self.unit) try: self._check_compatible_with(values) except (TypeError, ValueError): return np.zeros(self.shape, dtype=bool) return isin(self.asi8, values.asi8) def isna(self) -> npt.NDArray[np.bool_]: return self._isnan @property def _isnan(self) -> npt.NDArray[np.bool_]: return self.asi8 == iNaT @property def _hasna(self) -> bool: return bool(self._isnan.any()) def _maybe_mask_results(self, result: np.ndarray, fill_value=iNaT, convert=None) -> np.ndarray: if self._hasna: if convert: result = result.astype(convert) if fill_value is None: fill_value = np.nan np.putmask(result, self._isnan, fill_value) return result @property def freqstr(self) -> str | None: if self.freq is None: return None return self.freq.freqstr @property def inferred_freq(self) -> str | None: if self.ndim != 1: return None try: return frequencies.infer_freq(self) except ValueError: return None @property def _resolution_obj(self) -> Resolution | None: freqstr = self.freqstr if freqstr is None: return None try: return Resolution.get_reso_from_freqstr(freqstr) except KeyError: return None @property def resolution(self) -> str: return self._resolution_obj.attrname @property def _is_monotonic_increasing(self) -> bool: return algos.is_monotonic(self.asi8, timelike=True)[0] @property def _is_monotonic_decreasing(self) -> bool: return algos.is_monotonic(self.asi8, timelike=True)[1] @property def _is_unique(self) -> bool: return len(unique1d(self.asi8.ravel('K'))) == self.size def _cmp_method(self, other, op): if self.ndim > 1 and getattr(other, 'shape', None) == self.shape: return op(self.ravel(), other.ravel()).reshape(self.shape) try: other = self._validate_comparison_value(other) except InvalidComparison: return invalid_comparison(self, other, op) dtype = getattr(other, 'dtype', None) if is_object_dtype(dtype): result = ops.comp_method_OBJECT_ARRAY(op, np.asarray(self.astype(object)), other) return result if other is NaT: if op is operator.ne: result = np.ones(self.shape, dtype=bool) else: result = np.zeros(self.shape, dtype=bool) return result if not isinstance(self.dtype, PeriodDtype): self = cast(TimelikeOps, self) if self._creso != other._creso: if not isinstance(other, type(self)): try: other = other.as_unit(self.unit, round_ok=False) except ValueError: other_arr = np.array(other.asm8) return compare_mismatched_resolutions(self._ndarray, other_arr, op) else: other_arr = other._ndarray return compare_mismatched_resolutions(self._ndarray, other_arr, op) other_vals = self._unbox(other) result = op(self._ndarray.view('i8'), other_vals.view('i8')) o_mask = isna(other) mask = self._isnan | o_mask if mask.any(): nat_result = op is operator.ne np.putmask(result, mask, nat_result) return result __pow__ = _make_unpacked_invalid_op('__pow__') __rpow__ = _make_unpacked_invalid_op('__rpow__') __mul__ = _make_unpacked_invalid_op('__mul__') __rmul__ = _make_unpacked_invalid_op('__rmul__') __truediv__ = _make_unpacked_invalid_op('__truediv__') __rtruediv__ = _make_unpacked_invalid_op('__rtruediv__') __floordiv__ = _make_unpacked_invalid_op('__floordiv__') __rfloordiv__ = _make_unpacked_invalid_op('__rfloordiv__') __mod__ = _make_unpacked_invalid_op('__mod__') __rmod__ = _make_unpacked_invalid_op('__rmod__') __divmod__ = _make_unpacked_invalid_op('__divmod__') __rdivmod__ = _make_unpacked_invalid_op('__rdivmod__') @final def _get_i8_values_and_mask(self, other) -> tuple[int | npt.NDArray[np.int64], None | npt.NDArray[np.bool_]]: if isinstance(other, Period): i8values = other.ordinal mask = None elif isinstance(other, (Timestamp, Timedelta)): i8values = other._value mask = None else: mask = other._isnan i8values = other.asi8 return (i8values, mask) @final def _get_arithmetic_result_freq(self, other) -> BaseOffset | None: if isinstance(self.dtype, PeriodDtype): return self.freq elif not lib.is_scalar(other): return None elif isinstance(self.freq, Tick): return self.freq return None @final def _add_datetimelike_scalar(self, other) -> DatetimeArray: if not lib.is_np_dtype(self.dtype, 'm'): raise TypeError(f'cannot add {type(self).__name__} and {type(other).__name__}') self = cast('TimedeltaArray', self) from pandas.core.arrays import DatetimeArray from pandas.core.arrays.datetimes import tz_to_dtype assert other is not NaT if isna(other): result = self._ndarray + NaT.to_datetime64().astype(f'M8[{self.unit}]') return DatetimeArray._simple_new(result, dtype=result.dtype) other = Timestamp(other) (self, other) = self._ensure_matching_resos(other) self = cast('TimedeltaArray', self) (other_i8, o_mask) = self._get_i8_values_and_mask(other) result = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype='i8')) res_values = result.view(f'M8[{self.unit}]') dtype = tz_to_dtype(tz=other.tz, unit=self.unit) res_values = result.view(f'M8[{self.unit}]') new_freq = self._get_arithmetic_result_freq(other) return DatetimeArray._simple_new(res_values, dtype=dtype, freq=new_freq) @final def _add_datetime_arraylike(self, other: DatetimeArray) -> DatetimeArray: if not lib.is_np_dtype(self.dtype, 'm'): raise TypeError(f'cannot add {type(self).__name__} and {type(other).__name__}') return other + self @final def _sub_datetimelike_scalar(self, other: datetime | np.datetime64) -> TimedeltaArray: if self.dtype.kind != 'M': raise TypeError(f'cannot subtract a datelike from a {type(self).__name__}') self = cast('DatetimeArray', self) if isna(other): return self - NaT ts = Timestamp(other) (self, ts) = self._ensure_matching_resos(ts) return self._sub_datetimelike(ts) @final def _sub_datetime_arraylike(self, other: DatetimeArray) -> TimedeltaArray: if self.dtype.kind != 'M': raise TypeError(f'cannot subtract a datelike from a {type(self).__name__}') if len(self) != len(other): raise ValueError('cannot add indices of unequal length') self = cast('DatetimeArray', self) (self, other) = self._ensure_matching_resos(other) return self._sub_datetimelike(other) @final def _sub_datetimelike(self, other: Timestamp | DatetimeArray) -> TimedeltaArray: self = cast('DatetimeArray', self) from pandas.core.arrays import TimedeltaArray try: self._assert_tzawareness_compat(other) except TypeError as err: new_message = str(err).replace('compare', 'subtract') raise type(err)(new_message) from err (other_i8, o_mask) = self._get_i8_values_and_mask(other) res_values = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype='i8')) res_m8 = res_values.view(f'timedelta64[{self.unit}]') new_freq = self._get_arithmetic_result_freq(other) new_freq = cast('Tick | None', new_freq) return TimedeltaArray._simple_new(res_m8, dtype=res_m8.dtype, freq=new_freq) @final def _add_period(self, other: Period) -> PeriodArray: if not lib.is_np_dtype(self.dtype, 'm'): raise TypeError(f'cannot add Period to a {type(self).__name__}') from pandas.core.arrays.period import PeriodArray i8vals = np.broadcast_to(other.ordinal, self.shape) dtype = PeriodDtype(other.freq) parr = PeriodArray(i8vals, dtype=dtype) return parr + self def _add_offset(self, offset): raise AbstractMethodError(self) def _add_timedeltalike_scalar(self, other): if isna(other): new_values = np.empty(self.shape, dtype='i8').view(self._ndarray.dtype) new_values.fill(iNaT) return type(self)._simple_new(new_values, dtype=self.dtype) self = cast('DatetimeArray | TimedeltaArray', self) other = Timedelta(other) (self, other) = self._ensure_matching_resos(other) return self._add_timedeltalike(other) def _add_timedelta_arraylike(self, other: TimedeltaArray) -> Self: if len(self) != len(other): raise ValueError('cannot add indices of unequal length') (self, other) = cast('DatetimeArray | TimedeltaArray', self)._ensure_matching_resos(other) return self._add_timedeltalike(other) @final def _add_timedeltalike(self, other: Timedelta | TimedeltaArray) -> Self: (other_i8, o_mask) = self._get_i8_values_and_mask(other) new_values = add_overflowsafe(self.asi8, np.asarray(other_i8, dtype='i8')) res_values = new_values.view(self._ndarray.dtype) new_freq = self._get_arithmetic_result_freq(other) return type(self)._simple_new(res_values, dtype=self.dtype, freq=new_freq) @final def _add_nat(self) -> Self: if isinstance(self.dtype, PeriodDtype): raise TypeError(f'Cannot add {type(self).__name__} and {type(NaT).__name__}') result = np.empty(self.shape, dtype=np.int64) result.fill(iNaT) result = result.view(self._ndarray.dtype) return type(self)._simple_new(result, dtype=self.dtype, freq=None) @final def _sub_nat(self) -> np.ndarray: result = np.empty(self.shape, dtype=np.int64) result.fill(iNaT) if self.dtype.kind in 'mM': self = cast('DatetimeArray| TimedeltaArray', self) return result.view(f'timedelta64[{self.unit}]') else: return result.view('timedelta64[ns]') @final def _sub_periodlike(self, other: Period | PeriodArray) -> npt.NDArray[np.object_]: if not isinstance(self.dtype, PeriodDtype): raise TypeError(f'cannot subtract {type(other).__name__} from {type(self).__name__}') self = cast('PeriodArray', self) self._check_compatible_with(other) (other_i8, o_mask) = self._get_i8_values_and_mask(other) new_i8_data = add_overflowsafe(self.asi8, np.asarray(-other_i8, dtype='i8')) new_data = np.array([self.freq.base * x for x in new_i8_data]) if o_mask is None: mask = self._isnan else: mask = self._isnan | o_mask new_data[mask] = NaT return new_data @final def _addsub_object_array(self, other: npt.NDArray[np.object_], op) -> np.ndarray: assert op in [operator.add, operator.sub] if len(other) == 1 and self.ndim == 1: return op(self, other[0]) if get_option('performance_warnings'): warnings.warn(f'Adding/subtracting object-dtype array to {type(self).__name__} not vectorized.', PerformanceWarning, stacklevel=find_stack_level()) assert self.shape == other.shape, (self.shape, other.shape) res_values = op(self.astype('O'), np.asarray(other)) return res_values def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> Self: if name not in {'cummin', 'cummax'}: raise TypeError(f'Accumulation {name} not supported for {type(self)}') op = getattr(datetimelike_accumulations, name) result = op(self.copy(), skipna=skipna, **kwargs) return type(self)._simple_new(result, dtype=self.dtype) @unpack_zerodim_and_defer('__add__') def __add__(self, other): other_dtype = getattr(other, 'dtype', None) other = ensure_wrapped_if_datetimelike(other) if other is NaT: result: np.ndarray | DatetimeLikeArrayMixin = self._add_nat() elif isinstance(other, (Tick, timedelta, np.timedelta64)): result = self._add_timedeltalike_scalar(other) elif isinstance(other, BaseOffset): result = self._add_offset(other) elif isinstance(other, (datetime, np.datetime64)): result = self._add_datetimelike_scalar(other) elif isinstance(other, Period) and lib.is_np_dtype(self.dtype, 'm'): result = self._add_period(other) elif lib.is_integer(other): if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast('PeriodArray', self) result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add) elif lib.is_np_dtype(other_dtype, 'm'): result = self._add_timedelta_arraylike(other) elif is_object_dtype(other_dtype): result = self._addsub_object_array(other, operator.add) elif lib.is_np_dtype(other_dtype, 'M') or isinstance(other_dtype, DatetimeTZDtype): return self._add_datetime_arraylike(other) elif is_integer_dtype(other_dtype): if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast('PeriodArray', self) result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.add) else: return NotImplemented if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, 'm'): from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(result) return result def __radd__(self, other): return self.__add__(other) @unpack_zerodim_and_defer('__sub__') def __sub__(self, other): other_dtype = getattr(other, 'dtype', None) other = ensure_wrapped_if_datetimelike(other) if other is NaT: result: np.ndarray | DatetimeLikeArrayMixin = self._sub_nat() elif isinstance(other, (Tick, timedelta, np.timedelta64)): result = self._add_timedeltalike_scalar(-other) elif isinstance(other, BaseOffset): result = self._add_offset(-other) elif isinstance(other, (datetime, np.datetime64)): result = self._sub_datetimelike_scalar(other) elif lib.is_integer(other): if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast('PeriodArray', self) result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub) elif isinstance(other, Period): result = self._sub_periodlike(other) elif lib.is_np_dtype(other_dtype, 'm'): result = self._add_timedelta_arraylike(-other) elif is_object_dtype(other_dtype): result = self._addsub_object_array(other, operator.sub) elif lib.is_np_dtype(other_dtype, 'M') or isinstance(other_dtype, DatetimeTZDtype): result = self._sub_datetime_arraylike(other) elif isinstance(other_dtype, PeriodDtype): result = self._sub_periodlike(other) elif is_integer_dtype(other_dtype): if not isinstance(self.dtype, PeriodDtype): raise integer_op_not_supported(self) obj = cast('PeriodArray', self) result = obj._addsub_int_array_or_scalar(other * obj.dtype._n, operator.sub) else: return NotImplemented if isinstance(result, np.ndarray) and lib.is_np_dtype(result.dtype, 'm'): from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(result) return result def __rsub__(self, other): other_dtype = getattr(other, 'dtype', None) other_is_dt64 = lib.is_np_dtype(other_dtype, 'M') or isinstance(other_dtype, DatetimeTZDtype) if other_is_dt64 and lib.is_np_dtype(self.dtype, 'm'): if lib.is_scalar(other): return Timestamp(other) - self if not isinstance(other, DatetimeLikeArrayMixin): from pandas.core.arrays import DatetimeArray other = DatetimeArray._from_sequence(other) return other - self elif self.dtype.kind == 'M' and hasattr(other, 'dtype') and (not other_is_dt64): raise TypeError(f'cannot subtract {type(self).__name__} from {type(other).__name__}') elif isinstance(self.dtype, PeriodDtype) and lib.is_np_dtype(other_dtype, 'm'): raise TypeError(f'cannot subtract {type(self).__name__} from {other.dtype}') elif lib.is_np_dtype(self.dtype, 'm'): self = cast('TimedeltaArray', self) return -self + other return -(self - other) def __iadd__(self, other) -> Self: result = self + other self[:] = result[:] if not isinstance(self.dtype, PeriodDtype): self._freq = result.freq return self def __isub__(self, other) -> Self: result = self - other self[:] = result[:] if not isinstance(self.dtype, PeriodDtype): self._freq = result.freq return self @_period_dispatch def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> Self: return super()._quantile(qs=qs, interpolation=interpolation) @_period_dispatch def min(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs): nv.validate_min((), kwargs) nv.validate_minmax_axis(axis, self.ndim) result = nanops.nanmin(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) @_period_dispatch def max(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs): nv.validate_max((), kwargs) nv.validate_minmax_axis(axis, self.ndim) result = nanops.nanmax(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def mean(self, *, skipna: bool=True, axis: AxisInt | None=0): if isinstance(self.dtype, PeriodDtype): raise TypeError(f"mean is not implemented for {type(self).__name__} since the meaning is ambiguous. An alternative is obj.to_timestamp(how='start').mean()") result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) return self._wrap_reduction_result(axis, result) @_period_dispatch def median(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs): nv.validate_median((), kwargs) if axis is not None and abs(axis) >= self.ndim: raise ValueError('abs(axis) must be less than ndim') result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def _mode(self, dropna: bool=True): mask = None if dropna: mask = self.isna() i8modes = algorithms.mode(self.view('i8'), mask=mask) npmodes = i8modes.view(self._ndarray.dtype) npmodes = cast(np.ndarray, npmodes) return self._from_backing_data(npmodes) def _groupby_op(self, *, how: str, has_dropped_na: bool, min_count: int, ngroups: int, ids: npt.NDArray[np.intp], **kwargs): dtype = self.dtype if dtype.kind == 'M': if how in ['sum', 'prod', 'cumsum', 'cumprod', 'var', 'skew']: raise TypeError(f"datetime64 type does not support operation '{how}'") if how in ['any', 'all']: raise TypeError(f"'{how}' with datetime64 dtypes is no longer supported. Use (obj != pd.Timestamp(0)).{how}() instead.") elif isinstance(dtype, PeriodDtype): if how in ['sum', 'prod', 'cumsum', 'cumprod', 'var', 'skew']: raise TypeError(f'Period type does not support {how} operations') if how in ['any', 'all']: raise TypeError(f"'{how}' with PeriodDtype is no longer supported. Use (obj != pd.Period(0, freq)).{how}() instead.") elif how in ['prod', 'cumprod', 'skew', 'var']: raise TypeError(f'timedelta64 type does not support {how} operations') npvalues = self._ndarray.view('M8[ns]') from pandas.core.groupby.ops import WrappedCythonOp kind = WrappedCythonOp.get_kind_from_how(how) op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) res_values = op._cython_op_ndim_compat(npvalues, min_count=min_count, ngroups=ngroups, comp_ids=ids, mask=None, **kwargs) if op.how in op.cast_blocklist: return res_values assert res_values.dtype == 'M8[ns]' if how in ['std', 'sem']: from pandas.core.arrays import TimedeltaArray if isinstance(self.dtype, PeriodDtype): raise TypeError("'std' and 'sem' are not valid for PeriodDtype") self = cast('DatetimeArray | TimedeltaArray', self) new_dtype = f'm8[{self.unit}]' res_values = res_values.view(new_dtype) return TimedeltaArray._simple_new(res_values, dtype=res_values.dtype) res_values = res_values.view(self._ndarray.dtype) return self._from_backing_data(res_values) class DatelikeOps(DatetimeLikeArrayMixin): @Substitution(URL='https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior') def strftime(self, date_format: str) -> npt.NDArray[np.object_]: result = self._format_native_types(date_format=date_format, na_rep=np.nan) if using_string_dtype(): from pandas import StringDtype return pd_array(result, dtype=StringDtype(na_value=np.nan)) return result.astype(object, copy=False) _round_doc = '\n Perform {op} operation on the data to the specified `freq`.\n\n Parameters\n ----------\n freq : str or Offset\n The frequency level to {op} the index to. Must be a fixed\n frequency like \'s\' (second) not \'ME\' (month end). See\n :ref:`frequency aliases ` for\n a list of possible `freq` values.\n ambiguous : \'infer\', bool-ndarray, \'NaT\', default \'raise\'\n Only relevant for DatetimeIndex:\n\n - \'infer\' will attempt to infer fall dst-transition hours based on\n order\n - bool-ndarray where True signifies a DST time, False designates\n a non-DST time (note that this flag is only applicable for\n ambiguous times)\n - \'NaT\' will return NaT where there are ambiguous times\n - \'raise\' will raise a ValueError if there are ambiguous\n times.\n\n nonexistent : \'shift_forward\', \'shift_backward\', \'NaT\', timedelta, default \'raise\'\n A nonexistent time does not exist in a particular timezone\n where clocks moved forward due to DST.\n\n - \'shift_forward\' will shift the nonexistent time forward to the\n closest existing time\n - \'shift_backward\' will shift the nonexistent time backward to the\n closest existing time\n - \'NaT\' will return NaT where there are nonexistent times\n - timedelta objects will shift nonexistent times by the timedelta\n - \'raise\' will raise a ValueError if there are\n nonexistent times.\n\n Returns\n -------\n DatetimeIndex, TimedeltaIndex, or Series\n Index of the same type for a DatetimeIndex or TimedeltaIndex,\n or a Series with the same index for a Series.\n\n Raises\n ------\n ValueError if the `freq` cannot be converted.\n\n See Also\n --------\n DatetimeIndex.floor : Perform floor operation on the data to the specified `freq`.\n DatetimeIndex.snap : Snap time stamps to nearest occurring frequency.\n\n Notes\n -----\n If the timestamps have a timezone, {op}ing will take place relative to the\n local ("wall") time and re-localized to the same timezone. When {op}ing\n near daylight savings time, use ``nonexistent`` and ``ambiguous`` to\n control the re-localization behavior.\n\n Examples\n --------\n **DatetimeIndex**\n\n >>> rng = pd.date_range(\'1/1/2018 11:59:00\', periods=3, freq=\'min\')\n >>> rng\n DatetimeIndex([\'2018-01-01 11:59:00\', \'2018-01-01 12:00:00\',\n \'2018-01-01 12:01:00\'],\n dtype=\'datetime64[ns]\', freq=\'min\')\n ' _round_example = '>>> rng.round(\'h\')\n DatetimeIndex([\'2018-01-01 12:00:00\', \'2018-01-01 12:00:00\',\n \'2018-01-01 12:00:00\'],\n dtype=\'datetime64[ns]\', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.round("h")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n\n When rounding near a daylight savings time transition, use ``ambiguous`` or\n ``nonexistent`` to control how the timestamp should be re-localized.\n\n >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")\n\n >>> rng_tz.floor("2h", ambiguous=False)\n DatetimeIndex([\'2021-10-31 02:00:00+01:00\'],\n dtype=\'datetime64[s, Europe/Amsterdam]\', freq=None)\n\n >>> rng_tz.floor("2h", ambiguous=True)\n DatetimeIndex([\'2021-10-31 02:00:00+02:00\'],\n dtype=\'datetime64[s, Europe/Amsterdam]\', freq=None)\n ' _floor_example = '>>> rng.floor(\'h\')\n DatetimeIndex([\'2018-01-01 11:00:00\', \'2018-01-01 12:00:00\',\n \'2018-01-01 12:00:00\'],\n dtype=\'datetime64[ns]\', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.floor("h")\n 0 2018-01-01 11:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 12:00:00\n dtype: datetime64[ns]\n\n When rounding near a daylight savings time transition, use ``ambiguous`` or\n ``nonexistent`` to control how the timestamp should be re-localized.\n\n >>> rng_tz = pd.DatetimeIndex(["2021-10-31 03:30:00"], tz="Europe/Amsterdam")\n\n >>> rng_tz.floor("2h", ambiguous=False)\n DatetimeIndex([\'2021-10-31 02:00:00+01:00\'],\n dtype=\'datetime64[s, Europe/Amsterdam]\', freq=None)\n\n >>> rng_tz.floor("2h", ambiguous=True)\n DatetimeIndex([\'2021-10-31 02:00:00+02:00\'],\n dtype=\'datetime64[s, Europe/Amsterdam]\', freq=None)\n ' _ceil_example = '>>> rng.ceil(\'h\')\n DatetimeIndex([\'2018-01-01 12:00:00\', \'2018-01-01 12:00:00\',\n \'2018-01-01 13:00:00\'],\n dtype=\'datetime64[ns]\', freq=None)\n\n **Series**\n\n >>> pd.Series(rng).dt.ceil("h")\n 0 2018-01-01 12:00:00\n 1 2018-01-01 12:00:00\n 2 2018-01-01 13:00:00\n dtype: datetime64[ns]\n\n When rounding near a daylight savings time transition, use ``ambiguous`` or\n ``nonexistent`` to control how the timestamp should be re-localized.\n\n >>> rng_tz = pd.DatetimeIndex(["2021-10-31 01:30:00"], tz="Europe/Amsterdam")\n\n >>> rng_tz.ceil("h", ambiguous=False)\n DatetimeIndex([\'2021-10-31 02:00:00+01:00\'],\n dtype=\'datetime64[s, Europe/Amsterdam]\', freq=None)\n\n >>> rng_tz.ceil("h", ambiguous=True)\n DatetimeIndex([\'2021-10-31 02:00:00+02:00\'],\n dtype=\'datetime64[s, Europe/Amsterdam]\', freq=None)\n ' class TimelikeOps(DatetimeLikeArrayMixin): @classmethod def _validate_dtype(cls, values, dtype): raise AbstractMethodError(cls) @property def freq(self): return self._freq @freq.setter def freq(self, value) -> None: if value is not None: value = to_offset(value) self._validate_frequency(self, value) if self.dtype.kind == 'm' and (not isinstance(value, Tick)): raise TypeError('TimedeltaArray/Index freq must be a Tick') if self.ndim > 1: raise ValueError('Cannot set freq with ndim > 1') self._freq = value @final def _maybe_pin_freq(self, freq, validate_kwds: dict) -> None: if freq is None: self._freq = None elif freq == 'infer': if self._freq is None: self._freq = to_offset(self.inferred_freq) elif freq is lib.no_default: pass elif self._freq is None: freq = to_offset(freq) type(self)._validate_frequency(self, freq, **validate_kwds) self._freq = freq else: freq = to_offset(freq) _validate_inferred_freq(freq, self._freq) @final @classmethod def _validate_frequency(cls, index, freq: BaseOffset, **kwargs) -> None: inferred = index.inferred_freq if index.size == 0 or inferred == freq.freqstr: return None try: on_freq = cls._generate_range(start=index[0], end=None, periods=len(index), freq=freq, unit=index.unit, **kwargs) if not np.array_equal(index.asi8, on_freq.asi8): raise ValueError except ValueError as err: if 'non-fixed' in str(err): raise err raise ValueError(f'Inferred frequency {inferred} from passed values does not conform to passed frequency {freq.freqstr}') from err @classmethod def _generate_range(cls, start, end, periods: int | None, freq, *args, **kwargs) -> Self: raise AbstractMethodError(cls) @cache_readonly def _creso(self) -> int: return get_unit_from_dtype(self._ndarray.dtype) @cache_readonly def unit(self) -> str: return dtype_to_unit(self.dtype) def as_unit(self, unit: str, round_ok: bool=True) -> Self: if unit not in ['s', 'ms', 'us', 'ns']: raise ValueError("Supported units are 's', 'ms', 'us', 'ns'") dtype = np.dtype(f'{self.dtype.kind}8[{unit}]') new_values = astype_overflowsafe(self._ndarray, dtype, round_ok=round_ok) if isinstance(self.dtype, np.dtype): new_dtype = new_values.dtype else: tz = cast('DatetimeArray', self).tz new_dtype = DatetimeTZDtype(tz=tz, unit=unit) return type(self)._simple_new(new_values, dtype=new_dtype, freq=self.freq) def _ensure_matching_resos(self, other): if self._creso != other._creso: if self._creso < other._creso: self = self.as_unit(other.unit) else: other = other.as_unit(self.unit) return (self, other) def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): if ufunc in [np.isnan, np.isinf, np.isfinite] and len(inputs) == 1 and (inputs[0] is self): return getattr(ufunc, method)(self._ndarray, **kwargs) return super().__array_ufunc__(ufunc, method, *inputs, **kwargs) def _round(self, freq, mode, ambiguous, nonexistent): if isinstance(self.dtype, DatetimeTZDtype): self = cast('DatetimeArray', self) naive = self.tz_localize(None) result = naive._round(freq, mode, ambiguous, nonexistent) return result.tz_localize(self.tz, ambiguous=ambiguous, nonexistent=nonexistent) values = self.view('i8') values = cast(np.ndarray, values) nanos = get_unit_for_round(freq, self._creso) if nanos == 0: return self.copy() result_i8 = round_nsint64(values, mode, nanos) result = self._maybe_mask_results(result_i8, fill_value=iNaT) result = result.view(self._ndarray.dtype) return self._simple_new(result, dtype=self.dtype) @Appender((_round_doc + _round_example).format(op='round')) def round(self, freq, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: return self._round(freq, RoundTo.NEAREST_HALF_EVEN, ambiguous, nonexistent) @Appender((_round_doc + _floor_example).format(op='floor')) def floor(self, freq, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: return self._round(freq, RoundTo.MINUS_INFTY, ambiguous, nonexistent) @Appender((_round_doc + _ceil_example).format(op='ceil')) def ceil(self, freq, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: return self._round(freq, RoundTo.PLUS_INFTY, ambiguous, nonexistent) def any(self, *, axis: AxisInt | None=None, skipna: bool=True) -> bool: return nanops.nanany(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) def all(self, *, axis: AxisInt | None=None, skipna: bool=True) -> bool: return nanops.nanall(self._ndarray, axis=axis, skipna=skipna, mask=self.isna()) def _maybe_clear_freq(self) -> None: self._freq = None def _with_freq(self, freq) -> Self: if freq is None: pass elif len(self) == 0 and isinstance(freq, BaseOffset): if self.dtype.kind == 'm' and (not isinstance(freq, Tick)): raise TypeError('TimedeltaArray/Index freq must be a Tick') else: assert freq == 'infer' freq = to_offset(self.inferred_freq) arr = self.view() arr._freq = freq return arr def _values_for_json(self) -> np.ndarray: if isinstance(self.dtype, np.dtype): return self._ndarray return super()._values_for_json() def factorize(self, use_na_sentinel: bool=True, sort: bool=False): if self.freq is not None: if sort and self.freq.n < 0: codes = np.arange(len(self) - 1, -1, -1, dtype=np.intp) uniques = self[::-1] else: codes = np.arange(len(self), dtype=np.intp) uniques = self.copy() return (codes, uniques) if sort: raise NotImplementedError(f"The 'sort' keyword in {type(self).__name__}.factorize is ignored unless arr.freq is not None. To factorize with sort, call pd.factorize(obj, sort=True) instead.") return super().factorize(use_na_sentinel=use_na_sentinel) @classmethod def _concat_same_type(cls, to_concat: Sequence[Self], axis: AxisInt=0) -> Self: new_obj = super()._concat_same_type(to_concat, axis) obj = to_concat[0] if axis == 0: to_concat = [x for x in to_concat if len(x)] if obj.freq is not None and all((x.freq == obj.freq for x in to_concat)): pairs = zip(to_concat[:-1], to_concat[1:]) if all((pair[0][-1] + obj.freq == pair[1][0] for pair in pairs)): new_freq = obj.freq new_obj._freq = new_freq return new_obj def copy(self, order: str='C') -> Self: new_obj = super().copy(order=order) new_obj._freq = self.freq return new_obj def interpolate(self, *, method: InterpolateOptions, axis: int, index: Index, limit, limit_direction, limit_area, copy: bool, **kwargs) -> Self: if method != 'linear': raise NotImplementedError if not copy: out_data = self._ndarray else: out_data = self._ndarray.copy() missing.interpolate_2d_inplace(out_data, method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, **kwargs) if not copy: return self return type(self)._simple_new(out_data, dtype=self.dtype) def take(self, indices: TakeIndexer, *, allow_fill: bool=False, fill_value: Any=None, axis: AxisInt=0) -> Self: result = super().take(indices=indices, allow_fill=allow_fill, fill_value=fill_value, axis=axis) indices = np.asarray(indices, dtype=np.intp) maybe_slice = lib.maybe_indices_to_slice(indices, len(self)) if isinstance(maybe_slice, slice): freq = self._get_getitem_freq(maybe_slice) result._freq = freq return result @property def _is_dates_only(self) -> bool: if not lib.is_np_dtype(self.dtype): return False values_int = self.asi8 consider_values = values_int != iNaT reso = get_unit_from_dtype(self.dtype) ppd = periods_per_day(reso) even_days = np.logical_and(consider_values, values_int % ppd != 0).sum() == 0 return even_days def ensure_arraylike_for_datetimelike(data, copy: bool, cls_name: str) -> tuple[ArrayLike, bool]: if not hasattr(data, 'dtype'): if not isinstance(data, (list, tuple)) and np.ndim(data) == 0: data = list(data) data = construct_1d_object_array_from_listlike(data) copy = False elif isinstance(data, ABCMultiIndex): raise TypeError(f'Cannot create a {cls_name} from a MultiIndex.') else: data = extract_array(data, extract_numpy=True) if isinstance(data, IntegerArray) or (isinstance(data, ArrowExtensionArray) and data.dtype.kind in 'iu'): data = data.to_numpy('int64', na_value=iNaT) copy = False elif isinstance(data, ArrowExtensionArray): data = data._maybe_convert_datelike_array() data = data.to_numpy() copy = False elif not isinstance(data, (np.ndarray, ExtensionArray)): data = np.asarray(data) elif isinstance(data, ABCCategorical): data = data.categories.take(data.codes, fill_value=NaT)._values copy = False return (data, copy) @overload def validate_periods(periods: None) -> None: ... @overload def validate_periods(periods: int) -> int: ... def validate_periods(periods: int | None) -> int | None: if periods is not None and (not lib.is_integer(periods)): raise TypeError(f'periods must be an integer, got {periods}') return periods def _validate_inferred_freq(freq: BaseOffset | None, inferred_freq: BaseOffset | None) -> BaseOffset | None: if inferred_freq is not None: if freq is not None and freq != inferred_freq: raise ValueError(f'Inferred frequency {inferred_freq} from passed values does not conform to passed frequency {freq.freqstr}') if freq is None: freq = inferred_freq return freq def dtype_to_unit(dtype: DatetimeTZDtype | np.dtype | ArrowDtype) -> str: if isinstance(dtype, DatetimeTZDtype): return dtype.unit elif isinstance(dtype, ArrowDtype): if dtype.kind not in 'mM': raise ValueError(f'dtype={dtype!r} does not have a resolution.') return dtype.pyarrow_dtype.unit return np.datetime_data(dtype)[0] # File: pandas-main/pandas/core/arrays/datetimes.py from __future__ import annotations from datetime import datetime, timedelta, tzinfo from typing import TYPE_CHECKING, TypeVar, cast, overload import warnings import numpy as np from pandas._config import using_string_dtype from pandas._config.config import get_option from pandas._libs import lib, tslib from pandas._libs.tslibs import BaseOffset, NaT, NaTType, Resolution, Timestamp, astype_overflowsafe, fields, get_resolution, get_supported_dtype, get_unit_from_dtype, ints_to_pydatetime, is_date_array_normalized, is_supported_dtype, is_unitless, normalize_i8_timestamps, timezones, to_offset, tz_convert_from_utc, tzconversion from pandas._libs.tslibs.dtypes import abbrev_to_npy_unit from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_inclusive from pandas.core.dtypes.common import DT64NS_DTYPE, INT64_DTYPE, is_bool_dtype, is_float_dtype, is_string_dtype, pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, ExtensionDtype, PeriodDtype from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.tseries.frequencies import get_period_alias from pandas.tseries.offsets import Day, Tick if TYPE_CHECKING: from collections.abc import Generator, Iterator from pandas._typing import ArrayLike, DateTimeErrorChoices, DtypeObj, IntervalClosedType, Self, TimeAmbiguous, TimeNonexistent, npt from pandas import DataFrame, Timedelta from pandas.core.arrays import PeriodArray _TimestampNoneT1 = TypeVar('_TimestampNoneT1', Timestamp, None) _TimestampNoneT2 = TypeVar('_TimestampNoneT2', Timestamp, None) _ITER_CHUNKSIZE = 10000 @overload def tz_to_dtype(tz: tzinfo, unit: str=...) -> DatetimeTZDtype: ... @overload def tz_to_dtype(tz: None, unit: str=...) -> np.dtype[np.datetime64]: ... def tz_to_dtype(tz: tzinfo | None, unit: str='ns') -> np.dtype[np.datetime64] | DatetimeTZDtype: if tz is None: return np.dtype(f'M8[{unit}]') else: return DatetimeTZDtype(tz=tz, unit=unit) def _field_accessor(name: str, field: str, docstring: str | None=None): def f(self): values = self._local_timestamps() if field in self._bool_ops: result: np.ndarray if field.endswith(('start', 'end')): freq = self.freq month_kw = 12 if freq: kwds = freq.kwds month_kw = kwds.get('startingMonth', kwds.get('month', month_kw)) if freq is not None: freq_name = freq.name else: freq_name = None result = fields.get_start_end_field(values, field, freq_name, month_kw, reso=self._creso) else: result = fields.get_date_field(values, field, reso=self._creso) return result result = fields.get_date_field(values, field, reso=self._creso) result = self._maybe_mask_results(result, fill_value=None, convert='float64') return result f.__name__ = name f.__doc__ = docstring return property(f) class DatetimeArray(dtl.TimelikeOps, dtl.DatelikeOps): _typ = 'datetimearray' _internal_fill_value = np.datetime64('NaT', 'ns') _recognized_scalars = (datetime, np.datetime64) _is_recognized_dtype = lambda x: lib.is_np_dtype(x, 'M') or isinstance(x, DatetimeTZDtype) _infer_matches = ('datetime', 'datetime64', 'date') @property def _scalar_type(self) -> type[Timestamp]: return Timestamp _bool_ops: list[str] = ['is_month_start', 'is_month_end', 'is_quarter_start', 'is_quarter_end', 'is_year_start', 'is_year_end', 'is_leap_year'] _field_ops: list[str] = ['year', 'month', 'day', 'hour', 'minute', 'second', 'weekday', 'dayofweek', 'day_of_week', 'dayofyear', 'day_of_year', 'quarter', 'days_in_month', 'daysinmonth', 'microsecond', 'nanosecond'] _other_ops: list[str] = ['date', 'time', 'timetz'] _datetimelike_ops: list[str] = _field_ops + _bool_ops + _other_ops + ['unit', 'freq', 'tz'] _datetimelike_methods: list[str] = ['to_period', 'tz_localize', 'tz_convert', 'normalize', 'strftime', 'round', 'floor', 'ceil', 'month_name', 'day_name', 'as_unit'] __array_priority__ = 1000 _dtype: np.dtype[np.datetime64] | DatetimeTZDtype _freq: BaseOffset | None = None @classmethod def _from_scalars(cls, scalars, *, dtype: DtypeObj) -> Self: if lib.infer_dtype(scalars, skipna=True) not in ['datetime', 'datetime64']: raise ValueError return cls._from_sequence(scalars, dtype=dtype) @classmethod def _validate_dtype(cls, values, dtype): dtype = _validate_dt64_dtype(dtype) _validate_dt64_dtype(values.dtype) if isinstance(dtype, np.dtype): if values.dtype != dtype: raise ValueError('Values resolution does not match dtype.') else: vunit = np.datetime_data(values.dtype)[0] if vunit != dtype.unit: raise ValueError('Values resolution does not match dtype.') return dtype @classmethod def _simple_new(cls, values: npt.NDArray[np.datetime64], freq: BaseOffset | None=None, dtype: np.dtype[np.datetime64] | DatetimeTZDtype=DT64NS_DTYPE) -> Self: assert isinstance(values, np.ndarray) assert dtype.kind == 'M' if isinstance(dtype, np.dtype): assert dtype == values.dtype assert not is_unitless(dtype) else: assert dtype._creso == get_unit_from_dtype(values.dtype) result = super()._simple_new(values, dtype) result._freq = freq return result @classmethod def _from_sequence(cls, scalars, *, dtype=None, copy: bool=False) -> Self: return cls._from_sequence_not_strict(scalars, dtype=dtype, copy=copy) @classmethod def _from_sequence_not_strict(cls, data, *, dtype=None, copy: bool=False, tz=lib.no_default, freq: str | BaseOffset | lib.NoDefault | None=lib.no_default, dayfirst: bool=False, yearfirst: bool=False, ambiguous: TimeAmbiguous='raise') -> Self: explicit_tz_none = tz is None if tz is lib.no_default: tz = None else: tz = timezones.maybe_get_tz(tz) dtype = _validate_dt64_dtype(dtype) tz = _validate_tz_from_dtype(dtype, tz, explicit_tz_none) unit = None if dtype is not None: unit = dtl.dtype_to_unit(dtype) (data, copy) = dtl.ensure_arraylike_for_datetimelike(data, copy, cls_name='DatetimeArray') inferred_freq = None if isinstance(data, DatetimeArray): inferred_freq = data.freq (subarr, tz) = _sequence_to_dt64(data, copy=copy, tz=tz, dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous, out_unit=unit) _validate_tz_from_dtype(dtype, tz, explicit_tz_none) if tz is not None and explicit_tz_none: raise ValueError("Passed data is timezone-aware, incompatible with 'tz=None'. Use obj.tz_localize(None) instead.") data_unit = np.datetime_data(subarr.dtype)[0] data_dtype = tz_to_dtype(tz, data_unit) result = cls._simple_new(subarr, freq=inferred_freq, dtype=data_dtype) if unit is not None and unit != result.unit: result = result.as_unit(unit) validate_kwds = {'ambiguous': ambiguous} result._maybe_pin_freq(freq, validate_kwds) return result @classmethod def _generate_range(cls, start, end, periods: int | None, freq, tz=None, normalize: bool=False, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise', inclusive: IntervalClosedType='both', *, unit: str | None=None) -> Self: periods = dtl.validate_periods(periods) if freq is None and any((x is None for x in [periods, start, end])): raise ValueError('Must provide freq argument if no data is supplied') if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and freq, exactly three must be specified') freq = to_offset(freq) if start is not None: start = Timestamp(start) if end is not None: end = Timestamp(end) if start is NaT or end is NaT: raise ValueError('Neither `start` nor `end` can be NaT') if unit is not None: if unit not in ['s', 'ms', 'us', 'ns']: raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'") else: unit = 'ns' if start is not None: start = start.as_unit(unit, round_ok=False) if end is not None: end = end.as_unit(unit, round_ok=False) (left_inclusive, right_inclusive) = validate_inclusive(inclusive) (start, end) = _maybe_normalize_endpoints(start, end, normalize) tz = _infer_tz_from_endpoints(start, end, tz) if tz is not None: start = _maybe_localize_point(start, freq, tz, ambiguous, nonexistent) end = _maybe_localize_point(end, freq, tz, ambiguous, nonexistent) if freq is not None: if isinstance(freq, Day): if start is not None: start = start.tz_localize(None) if end is not None: end = end.tz_localize(None) if isinstance(freq, Tick): i8values = generate_regular_range(start, end, periods, freq, unit=unit) else: xdr = _generate_range(start=start, end=end, periods=periods, offset=freq, unit=unit) i8values = np.array([x._value for x in xdr], dtype=np.int64) endpoint_tz = start.tz if start is not None else end.tz if tz is not None and endpoint_tz is None: if not timezones.is_utc(tz): creso = abbrev_to_npy_unit(unit) i8values = tzconversion.tz_localize_to_utc(i8values, tz, ambiguous=ambiguous, nonexistent=nonexistent, creso=creso) if start is not None: start = start.tz_localize(tz, ambiguous, nonexistent) if end is not None: end = end.tz_localize(tz, ambiguous, nonexistent) else: periods = cast(int, periods) i8values = np.linspace(0, end._value - start._value, periods, dtype='int64') + start._value if i8values.dtype != 'i8': i8values = i8values.astype('i8') if start == end: if not left_inclusive and (not right_inclusive): i8values = i8values[1:-1] else: start_i8 = Timestamp(start)._value end_i8 = Timestamp(end)._value if not left_inclusive or not right_inclusive: if not left_inclusive and len(i8values) and (i8values[0] == start_i8): i8values = i8values[1:] if not right_inclusive and len(i8values) and (i8values[-1] == end_i8): i8values = i8values[:-1] dt64_values = i8values.view(f'datetime64[{unit}]') dtype = tz_to_dtype(tz, unit=unit) return cls._simple_new(dt64_values, freq=freq, dtype=dtype) def _unbox_scalar(self, value) -> np.datetime64: if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timestamp.") self._check_compatible_with(value) if value is NaT: return np.datetime64(value._value, self.unit) else: return value.as_unit(self.unit, round_ok=False).asm8 def _scalar_from_string(self, value) -> Timestamp | NaTType: return Timestamp(value, tz=self.tz) def _check_compatible_with(self, other) -> None: if other is NaT: return self._assert_tzawareness_compat(other) def _box_func(self, x: np.datetime64) -> Timestamp | NaTType: value = x.view('i8') ts = Timestamp._from_value_and_reso(value, reso=self._creso, tz=self.tz) return ts @property def dtype(self) -> np.dtype[np.datetime64] | DatetimeTZDtype: return self._dtype @property def tz(self) -> tzinfo | None: return getattr(self.dtype, 'tz', None) @tz.setter def tz(self, value): raise AttributeError('Cannot directly set timezone. Use tz_localize() or tz_convert() as appropriate') @property def tzinfo(self) -> tzinfo | None: return self.tz @property def is_normalized(self) -> bool: return is_date_array_normalized(self.asi8, self.tz, reso=self._creso) @property def _resolution_obj(self) -> Resolution: return get_resolution(self.asi8, self.tz, reso=self._creso) def __array__(self, dtype=None, copy=None) -> np.ndarray: if dtype is None and self.tz: dtype = object return super().__array__(dtype=dtype, copy=copy) def __iter__(self) -> Iterator: if self.ndim > 1: for i in range(len(self)): yield self[i] else: data = self.asi8 length = len(self) chunksize = _ITER_CHUNKSIZE chunks = length // chunksize + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, length) converted = ints_to_pydatetime(data[start_i:end_i], tz=self.tz, box='timestamp', reso=self._creso) yield from converted def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if dtype == self.dtype: if copy: return self.copy() return self elif isinstance(dtype, ExtensionDtype): if not isinstance(dtype, DatetimeTZDtype): return super().astype(dtype, copy=copy) elif self.tz is None: raise TypeError('Cannot use .astype to convert from timezone-naive dtype to timezone-aware dtype. Use obj.tz_localize instead or series.dt.tz_localize instead') else: np_dtype = np.dtype(dtype.str) res_values = astype_overflowsafe(self._ndarray, np_dtype, copy=copy) return type(self)._simple_new(res_values, dtype=dtype, freq=self.freq) elif self.tz is None and lib.is_np_dtype(dtype, 'M') and (not is_unitless(dtype)) and is_supported_dtype(dtype): res_values = astype_overflowsafe(self._ndarray, dtype, copy=True) return type(self)._simple_new(res_values, dtype=res_values.dtype) elif self.tz is not None and lib.is_np_dtype(dtype, 'M'): raise TypeError("Cannot use .astype to convert from timezone-aware dtype to timezone-naive dtype. Use obj.tz_localize(None) or obj.tz_convert('UTC').tz_localize(None) instead.") elif self.tz is None and lib.is_np_dtype(dtype, 'M') and (dtype != self.dtype) and is_unitless(dtype): raise TypeError("Casting to unit-less dtype 'datetime64' is not supported. Pass e.g. 'datetime64[ns]' instead.") elif isinstance(dtype, PeriodDtype): return self.to_period(freq=dtype.freq) return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy) def _format_native_types(self, *, na_rep: str | float='NaT', date_format=None, **kwargs) -> npt.NDArray[np.object_]: if date_format is None and self._is_dates_only: date_format = '%Y-%m-%d' return tslib.format_array_from_datetime(self.asi8, tz=self.tz, format=date_format, na_rep=na_rep, reso=self._creso) def _assert_tzawareness_compat(self, other) -> None: other_tz = getattr(other, 'tzinfo', None) other_dtype = getattr(other, 'dtype', None) if isinstance(other_dtype, DatetimeTZDtype): other_tz = other.dtype.tz if other is NaT: pass elif self.tz is None: if other_tz is not None: raise TypeError('Cannot compare tz-naive and tz-aware datetime-like objects.') elif other_tz is None: raise TypeError('Cannot compare tz-naive and tz-aware datetime-like objects') def _add_offset(self, offset: BaseOffset) -> Self: assert not isinstance(offset, Tick) if self.tz is not None: values = self.tz_localize(None) else: values = self try: res_values = offset._apply_array(values._ndarray) if res_values.dtype.kind == 'i': res_values = res_values.view(values.dtype) except NotImplementedError: if get_option('performance_warnings'): warnings.warn('Non-vectorized DateOffset being applied to Series or DatetimeIndex.', PerformanceWarning, stacklevel=find_stack_level()) res_values = self.astype('O') + offset result = type(self)._from_sequence(res_values).as_unit(self.unit) if not len(self): return result.tz_localize(self.tz) else: result = type(self)._simple_new(res_values, dtype=res_values.dtype) if offset.normalize: result = result.normalize() result._freq = None if self.tz is not None: result = result.tz_localize(self.tz) return result def _local_timestamps(self) -> npt.NDArray[np.int64]: if self.tz is None or timezones.is_utc(self.tz): return self.asi8 return tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) def tz_convert(self, tz) -> Self: tz = timezones.maybe_get_tz(tz) if self.tz is None: raise TypeError('Cannot convert tz-naive timestamps, use tz_localize to localize') dtype = tz_to_dtype(tz, unit=self.unit) return self._simple_new(self._ndarray, dtype=dtype, freq=self.freq) @dtl.ravel_compat def tz_localize(self, tz, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and (not isinstance(nonexistent, timedelta)): raise ValueError("The nonexistent argument must be one of 'raise', 'NaT', 'shift_forward', 'shift_backward' or a timedelta object") if self.tz is not None: if tz is None: new_dates = tz_convert_from_utc(self.asi8, self.tz, reso=self._creso) else: raise TypeError('Already tz-aware, use tz_convert to convert.') else: tz = timezones.maybe_get_tz(tz) new_dates = tzconversion.tz_localize_to_utc(self.asi8, tz, ambiguous=ambiguous, nonexistent=nonexistent, creso=self._creso) new_dates_dt64 = new_dates.view(f'M8[{self.unit}]') dtype = tz_to_dtype(tz, unit=self.unit) freq = None if timezones.is_utc(tz) or (len(self) == 1 and (not isna(new_dates_dt64[0]))): freq = self.freq elif tz is None and self.tz is None: freq = self.freq return self._simple_new(new_dates_dt64, dtype=dtype, freq=freq) def to_pydatetime(self) -> npt.NDArray[np.object_]: return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso) def normalize(self) -> Self: new_values = normalize_i8_timestamps(self.asi8, self.tz, reso=self._creso) dt64_values = new_values.view(self._ndarray.dtype) dta = type(self)._simple_new(dt64_values, dtype=dt64_values.dtype) dta = dta._with_freq('infer') if self.tz is not None: dta = dta.tz_localize(self.tz) return dta def to_period(self, freq=None) -> PeriodArray: from pandas.core.arrays import PeriodArray if self.tz is not None: warnings.warn('Converting to PeriodArray/Index representation will drop timezone information.', UserWarning, stacklevel=find_stack_level()) if freq is None: freq = self.freqstr or self.inferred_freq if isinstance(self.freq, BaseOffset) and hasattr(self.freq, '_period_dtype_code'): freq = PeriodDtype(self.freq)._freqstr if freq is None: raise ValueError('You must pass a freq argument as current index has none.') res = get_period_alias(freq) if res is None: res = freq freq = res return PeriodArray._from_datetime64(self._ndarray, freq, tz=self.tz) def month_name(self, locale=None) -> npt.NDArray[np.object_]: values = self._local_timestamps() result = fields.get_date_name_field(values, 'month_name', locale=locale, reso=self._creso) result = self._maybe_mask_results(result, fill_value=None) if using_string_dtype(): from pandas import StringDtype, array as pd_array return pd_array(result, dtype=StringDtype(na_value=np.nan)) return result def day_name(self, locale=None) -> npt.NDArray[np.object_]: values = self._local_timestamps() result = fields.get_date_name_field(values, 'day_name', locale=locale, reso=self._creso) result = self._maybe_mask_results(result, fill_value=None) if using_string_dtype(): from pandas import StringDtype, array as pd_array return pd_array(result, dtype=StringDtype(na_value=np.nan)) return result @property def time(self) -> npt.NDArray[np.object_]: timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box='time', reso=self._creso) @property def timetz(self) -> npt.NDArray[np.object_]: return ints_to_pydatetime(self.asi8, self.tz, box='time', reso=self._creso) @property def date(self) -> npt.NDArray[np.object_]: timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box='date', reso=self._creso) def isocalendar(self) -> DataFrame: from pandas import DataFrame values = self._local_timestamps() sarray = fields.build_isocalendar_sarray(values, reso=self._creso) iso_calendar_df = DataFrame(sarray, columns=['year', 'week', 'day'], dtype='UInt32') if self._hasna: iso_calendar_df.iloc[self._isnan] = None return iso_calendar_df year = _field_accessor('year', 'Y', '\n The year of the datetime.\n\n See Also\n --------\n DatetimeIndex.month: The month as January=1, December=12.\n DatetimeIndex.day: The day of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="YE")\n ... )\n >>> datetime_series\n 0 2000-12-31\n 1 2001-12-31\n 2 2002-12-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.year\n 0 2000\n 1 2001\n 2 2002\n dtype: int32\n ') month = _field_accessor('month', 'M', '\n The month as January=1, December=12.\n\n See Also\n --------\n DatetimeIndex.year: The year of the datetime.\n DatetimeIndex.day: The day of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="ME")\n ... )\n >>> datetime_series\n 0 2000-01-31\n 1 2000-02-29\n 2 2000-03-31\n dtype: datetime64[ns]\n >>> datetime_series.dt.month\n 0 1\n 1 2\n 2 3\n dtype: int32\n ') day = _field_accessor('day', 'D', '\n The day of the datetime.\n\n See Also\n --------\n DatetimeIndex.year: The year of the datetime.\n DatetimeIndex.month: The month as January=1, December=12.\n DatetimeIndex.hour: The hours of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="D")\n ... )\n >>> datetime_series\n 0 2000-01-01\n 1 2000-01-02\n 2 2000-01-03\n dtype: datetime64[ns]\n >>> datetime_series.dt.day\n 0 1\n 1 2\n 2 3\n dtype: int32\n ') hour = _field_accessor('hour', 'h', '\n The hours of the datetime.\n\n See Also\n --------\n DatetimeIndex.day: The day of the datetime.\n DatetimeIndex.minute: The minutes of the datetime.\n DatetimeIndex.second: The seconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="h")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 01:00:00\n 2 2000-01-01 02:00:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.hour\n 0 0\n 1 1\n 2 2\n dtype: int32\n ') minute = _field_accessor('minute', 'm', '\n The minutes of the datetime.\n\n See Also\n --------\n DatetimeIndex.hour: The hours of the datetime.\n DatetimeIndex.second: The seconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="min")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:01:00\n 2 2000-01-01 00:02:00\n dtype: datetime64[ns]\n >>> datetime_series.dt.minute\n 0 0\n 1 1\n 2 2\n dtype: int32\n ') second = _field_accessor('second', 's', '\n The seconds of the datetime.\n\n See Also\n --------\n DatetimeIndex.minute: The minutes of the datetime.\n DatetimeIndex.microsecond: The microseconds of the datetime.\n DatetimeIndex.nanosecond: The nanoseconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="s")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00\n 1 2000-01-01 00:00:01\n 2 2000-01-01 00:00:02\n dtype: datetime64[ns]\n >>> datetime_series.dt.second\n 0 0\n 1 1\n 2 2\n dtype: int32\n ') microsecond = _field_accessor('microsecond', 'us', '\n The microseconds of the datetime.\n\n See Also\n --------\n DatetimeIndex.second: The seconds of the datetime.\n DatetimeIndex.nanosecond: The nanoseconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="us")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00.000000\n 1 2000-01-01 00:00:00.000001\n 2 2000-01-01 00:00:00.000002\n dtype: datetime64[ns]\n >>> datetime_series.dt.microsecond\n 0 0\n 1 1\n 2 2\n dtype: int32\n ') nanosecond = _field_accessor('nanosecond', 'ns', '\n The nanoseconds of the datetime.\n\n See Also\n --------\n DatetimeIndex.second: The seconds of the datetime.\n DatetimeIndex.microsecond: The microseconds of the datetime.\n\n Examples\n --------\n >>> datetime_series = pd.Series(\n ... pd.date_range("2000-01-01", periods=3, freq="ns")\n ... )\n >>> datetime_series\n 0 2000-01-01 00:00:00.000000000\n 1 2000-01-01 00:00:00.000000001\n 2 2000-01-01 00:00:00.000000002\n dtype: datetime64[ns]\n >>> datetime_series.dt.nanosecond\n 0 0\n 1 1\n 2 2\n dtype: int32\n ') _dayofweek_doc = "\n The day of the week with Monday=0, Sunday=6.\n\n Return the day of the week. It is assumed the week starts on\n Monday, which is denoted by 0 and ends on Sunday which is denoted\n by 6. This method is available on both Series with datetime\n values (using the `dt` accessor) or DatetimeIndex.\n\n Returns\n -------\n Series or Index\n Containing integers indicating the day number.\n\n See Also\n --------\n Series.dt.dayofweek : Alias.\n Series.dt.weekday : Alias.\n Series.dt.day_name : Returns the name of the day of the week.\n\n Examples\n --------\n >>> s = pd.date_range('2016-12-31', '2017-01-08', freq='D').to_series()\n >>> s.dt.dayofweek\n 2016-12-31 5\n 2017-01-01 6\n 2017-01-02 0\n 2017-01-03 1\n 2017-01-04 2\n 2017-01-05 3\n 2017-01-06 4\n 2017-01-07 5\n 2017-01-08 6\n Freq: D, dtype: int32\n " day_of_week = _field_accessor('day_of_week', 'dow', _dayofweek_doc) dayofweek = day_of_week weekday = day_of_week day_of_year = _field_accessor('dayofyear', 'doy', '\n The ordinal day of the year.\n\n See Also\n --------\n DatetimeIndex.dayofweek : The day of the week with Monday=0, Sunday=6.\n DatetimeIndex.day : The day of the datetime.\n\n Examples\n --------\n For Series:\n\n >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])\n >>> s = pd.to_datetime(s)\n >>> s\n 0 2020-01-01 10:00:00+00:00\n 1 2020-02-01 11:00:00+00:00\n dtype: datetime64[s, UTC]\n >>> s.dt.dayofyear\n 0 1\n 1 32\n dtype: int32\n\n For DatetimeIndex:\n\n >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",\n ... "2/1/2020 11:00:00+00:00"])\n >>> idx.dayofyear\n Index([1, 32], dtype=\'int32\')\n ') dayofyear = day_of_year quarter = _field_accessor('quarter', 'q', '\n The quarter of the date.\n\n See Also\n --------\n DatetimeIndex.snap : Snap time stamps to nearest occurring frequency.\n DatetimeIndex.time : Returns numpy array of datetime.time objects.\n The time part of the Timestamps.\n\n Examples\n --------\n For Series:\n\n >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "4/1/2020 11:00:00+00:00"])\n >>> s = pd.to_datetime(s)\n >>> s\n 0 2020-01-01 10:00:00+00:00\n 1 2020-04-01 11:00:00+00:00\n dtype: datetime64[s, UTC]\n >>> s.dt.quarter\n 0 1\n 1 2\n dtype: int32\n\n For DatetimeIndex:\n\n >>> idx = pd.DatetimeIndex(["1/1/2020 10:00:00+00:00",\n ... "2/1/2020 11:00:00+00:00"])\n >>> idx.quarter\n Index([1, 1], dtype=\'int32\')\n ') days_in_month = _field_accessor('days_in_month', 'dim', '\n The number of days in the month.\n\n See Also\n --------\n Series.dt.day : Return the day of the month.\n Series.dt.is_month_end : Return a boolean indicating if the\n date is the last day of the month.\n Series.dt.is_month_start : Return a boolean indicating if the\n date is the first day of the month.\n Series.dt.month : Return the month as January=1 through December=12.\n\n Examples\n --------\n >>> s = pd.Series(["1/1/2020 10:00:00+00:00", "2/1/2020 11:00:00+00:00"])\n >>> s = pd.to_datetime(s)\n >>> s\n 0 2020-01-01 10:00:00+00:00\n 1 2020-02-01 11:00:00+00:00\n dtype: datetime64[s, UTC]\n >>> s.dt.daysinmonth\n 0 31\n 1 29\n dtype: int32\n ') daysinmonth = days_in_month _is_month_doc = '\n Indicates whether the date is the {first_or_last} day of the month.\n\n Returns\n -------\n Series or array\n For Series, returns a Series with boolean values.\n For DatetimeIndex, returns a boolean array.\n\n See Also\n --------\n is_month_start : Return a boolean indicating whether the date\n is the first day of the month.\n is_month_end : Return a boolean indicating whether the date\n is the last day of the month.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> s = pd.Series(pd.date_range("2018-02-27", periods=3))\n >>> s\n 0 2018-02-27\n 1 2018-02-28\n 2 2018-03-01\n dtype: datetime64[ns]\n >>> s.dt.is_month_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n >>> s.dt.is_month_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range("2018-02-27", periods=3)\n >>> idx.is_month_start\n array([False, False, True])\n >>> idx.is_month_end\n array([False, True, False])\n ' is_month_start = _field_accessor('is_month_start', 'is_month_start', _is_month_doc.format(first_or_last='first')) is_month_end = _field_accessor('is_month_end', 'is_month_end', _is_month_doc.format(first_or_last='last')) is_quarter_start = _field_accessor('is_quarter_start', 'is_quarter_start', '\n Indicator for whether the date is the first day of a quarter.\n\n Returns\n -------\n is_quarter_start : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_end : Similar property for indicating the quarter end.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({\'dates\': pd.date_range("2017-03-30",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_start=df.dates.dt.is_quarter_start)\n dates quarter is_quarter_start\n 0 2017-03-30 1 False\n 1 2017-03-31 1 False\n 2 2017-04-01 2 True\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range(\'2017-03-30\', periods=4)\n >>> idx\n DatetimeIndex([\'2017-03-30\', \'2017-03-31\', \'2017-04-01\', \'2017-04-02\'],\n dtype=\'datetime64[ns]\', freq=\'D\')\n\n >>> idx.is_quarter_start\n array([False, False, True, False])\n ') is_quarter_end = _field_accessor('is_quarter_end', 'is_quarter_end', '\n Indicator for whether the date is the last day of a quarter.\n\n Returns\n -------\n is_quarter_end : Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n quarter : Return the quarter of the date.\n is_quarter_start : Similar property indicating the quarter start.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> df = pd.DataFrame({\'dates\': pd.date_range("2017-03-30",\n ... periods=4)})\n >>> df.assign(quarter=df.dates.dt.quarter,\n ... is_quarter_end=df.dates.dt.is_quarter_end)\n dates quarter is_quarter_end\n 0 2017-03-30 1 False\n 1 2017-03-31 1 True\n 2 2017-04-01 2 False\n 3 2017-04-02 2 False\n\n >>> idx = pd.date_range(\'2017-03-30\', periods=4)\n >>> idx\n DatetimeIndex([\'2017-03-30\', \'2017-03-31\', \'2017-04-01\', \'2017-04-02\'],\n dtype=\'datetime64[ns]\', freq=\'D\')\n\n >>> idx.is_quarter_end\n array([False, True, False, False])\n ') is_year_start = _field_accessor('is_year_start', 'is_year_start', '\n Indicate whether the date is the first day of a year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_end : Similar property indicating the last day of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_start\n 0 False\n 1 False\n 2 True\n dtype: bool\n\n >>> idx = pd.date_range("2017-12-30", periods=3)\n >>> idx\n DatetimeIndex([\'2017-12-30\', \'2017-12-31\', \'2018-01-01\'],\n dtype=\'datetime64[ns]\', freq=\'D\')\n\n >>> idx.is_year_start\n array([False, False, True])\n\n This method, when applied to Series with datetime values under\n the ``.dt`` accessor, will lose information about Business offsets.\n\n >>> dates = pd.Series(pd.date_range("2020-10-30", periods=4, freq="BYS"))\n >>> dates\n 0 2021-01-01\n 1 2022-01-03\n 2 2023-01-02\n 3 2024-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_start\n 0 True\n 1 False\n 2 False\n 3 True\n dtype: bool\n\n >>> idx = pd.date_range("2020-10-30", periods=4, freq="BYS")\n >>> idx\n DatetimeIndex([\'2021-01-01\', \'2022-01-03\', \'2023-01-02\', \'2024-01-01\'],\n dtype=\'datetime64[ns]\', freq=\'BYS-JAN\')\n\n >>> idx.is_year_start\n array([ True, True, True, True])\n ') is_year_end = _field_accessor('is_year_end', 'is_year_end', '\n Indicate whether the date is the last day of the year.\n\n Returns\n -------\n Series or DatetimeIndex\n The same type as the original data with boolean values. Series will\n have the same name and index. DatetimeIndex will have the same\n name.\n\n See Also\n --------\n is_year_start : Similar property indicating the start of the year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> dates = pd.Series(pd.date_range("2017-12-30", periods=3))\n >>> dates\n 0 2017-12-30\n 1 2017-12-31\n 2 2018-01-01\n dtype: datetime64[ns]\n\n >>> dates.dt.is_year_end\n 0 False\n 1 True\n 2 False\n dtype: bool\n\n >>> idx = pd.date_range("2017-12-30", periods=3)\n >>> idx\n DatetimeIndex([\'2017-12-30\', \'2017-12-31\', \'2018-01-01\'],\n dtype=\'datetime64[ns]\', freq=\'D\')\n\n >>> idx.is_year_end\n array([False, True, False])\n ') is_leap_year = _field_accessor('is_leap_year', 'is_leap_year', '\n Boolean indicator if the date belongs to a leap year.\n\n A leap year is a year, which has 366 days (instead of 365) including\n 29th of February as an intercalary day.\n Leap years are years which are multiples of four with the exception\n of years divisible by 100 but not by 400.\n\n Returns\n -------\n Series or ndarray\n Booleans indicating if dates belong to a leap year.\n\n See Also\n --------\n DatetimeIndex.is_year_end : Indicate whether the date is the\n last day of the year.\n DatetimeIndex.is_year_start : Indicate whether the date is the first\n day of a year.\n\n Examples\n --------\n This method is available on Series with datetime values under\n the ``.dt`` accessor, and directly on DatetimeIndex.\n\n >>> idx = pd.date_range("2012-01-01", "2015-01-01", freq="YE")\n >>> idx\n DatetimeIndex([\'2012-12-31\', \'2013-12-31\', \'2014-12-31\'],\n dtype=\'datetime64[ns]\', freq=\'YE-DEC\')\n >>> idx.is_leap_year\n array([ True, False, False])\n\n >>> dates_series = pd.Series(idx)\n >>> dates_series\n 0 2012-12-31\n 1 2013-12-31\n 2 2014-12-31\n dtype: datetime64[ns]\n >>> dates_series.dt.is_leap_year\n 0 True\n 1 False\n 2 False\n dtype: bool\n ') def to_julian_date(self) -> npt.NDArray[np.float64]: year = np.asarray(self.year) month = np.asarray(self.month) day = np.asarray(self.day) testarr = month < 3 year[testarr] -= 1 month[testarr] += 12 return day + np.fix((153 * month - 457) / 5) + 365 * year + np.floor(year / 4) - np.floor(year / 100) + np.floor(year / 400) + 1721118.5 + (self.hour + self.minute / 60 + self.second / 3600 + self.microsecond / 3600 / 10 ** 6 + self.nanosecond / 3600 / 10 ** 9) / 24 def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): result = super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) if keepdims and isinstance(result, np.ndarray): if name == 'std': from pandas.core.arrays import TimedeltaArray return TimedeltaArray._from_sequence(result) else: return self._from_sequence(result, dtype=self.dtype) return result def std(self, axis=None, dtype=None, out=None, ddof: int=1, keepdims: bool=False, skipna: bool=True) -> Timedelta: from pandas.core.arrays import TimedeltaArray dtype_str = self._ndarray.dtype.name.replace('datetime64', 'timedelta64') dtype = np.dtype(dtype_str) tda = TimedeltaArray._simple_new(self._ndarray.view(dtype), dtype=dtype) return tda.std(axis=axis, out=out, ddof=ddof, keepdims=keepdims, skipna=skipna) def _sequence_to_dt64(data: ArrayLike, *, copy: bool=False, tz: tzinfo | None=None, dayfirst: bool=False, yearfirst: bool=False, ambiguous: TimeAmbiguous='raise', out_unit: str | None=None) -> tuple[np.ndarray, tzinfo | None]: (data, copy) = maybe_convert_dtype(data, copy, tz=tz) data_dtype = getattr(data, 'dtype', None) out_dtype = DT64NS_DTYPE if out_unit is not None: out_dtype = np.dtype(f'M8[{out_unit}]') if data_dtype == object or is_string_dtype(data_dtype): data = cast(np.ndarray, data) copy = False if lib.infer_dtype(data, skipna=False) == 'integer': data = data.astype(np.int64) elif tz is not None and ambiguous == 'raise': obj_data = np.asarray(data, dtype=object) result = tslib.array_to_datetime_with_tz(obj_data, tz=tz, dayfirst=dayfirst, yearfirst=yearfirst, creso=abbrev_to_npy_unit(out_unit)) return (result, tz) else: (converted, inferred_tz) = objects_to_datetime64(data, dayfirst=dayfirst, yearfirst=yearfirst, allow_object=False, out_unit=out_unit) copy = False if tz and inferred_tz: result = converted elif inferred_tz: tz = inferred_tz result = converted else: (result, _) = _construct_from_dt64_naive(converted, tz=tz, copy=copy, ambiguous=ambiguous) return (result, tz) data_dtype = data.dtype if isinstance(data_dtype, DatetimeTZDtype): data = cast(DatetimeArray, data) tz = _maybe_infer_tz(tz, data.tz) result = data._ndarray elif lib.is_np_dtype(data_dtype, 'M'): if isinstance(data, DatetimeArray): data = data._ndarray data = cast(np.ndarray, data) (result, copy) = _construct_from_dt64_naive(data, tz=tz, copy=copy, ambiguous=ambiguous) else: if data.dtype != INT64_DTYPE: data = data.astype(np.int64, copy=False) copy = False data = cast(np.ndarray, data) result = data.view(out_dtype) if copy: result = result.copy() assert isinstance(result, np.ndarray), type(result) assert result.dtype.kind == 'M' assert result.dtype != 'M8' assert is_supported_dtype(result.dtype) return (result, tz) def _construct_from_dt64_naive(data: np.ndarray, *, tz: tzinfo | None, copy: bool, ambiguous: TimeAmbiguous) -> tuple[np.ndarray, bool]: new_dtype = data.dtype if not is_supported_dtype(new_dtype): new_dtype = get_supported_dtype(new_dtype) data = astype_overflowsafe(data, dtype=new_dtype, copy=False) copy = False if data.dtype.byteorder == '>': data = data.astype(data.dtype.newbyteorder('<')) new_dtype = data.dtype copy = False if tz is not None: shape = data.shape if data.ndim > 1: data = data.ravel() data_unit = get_unit_from_dtype(new_dtype) data = tzconversion.tz_localize_to_utc(data.view('i8'), tz, ambiguous=ambiguous, creso=data_unit) data = data.view(new_dtype) data = data.reshape(shape) assert data.dtype == new_dtype, data.dtype result = data return (result, copy) def objects_to_datetime64(data: np.ndarray, dayfirst, yearfirst, utc: bool=False, errors: DateTimeErrorChoices='raise', allow_object: bool=False, out_unit: str | None=None) -> tuple[np.ndarray, tzinfo | None]: assert errors in ['raise', 'coerce'] data = np.asarray(data, dtype=np.object_) (result, tz_parsed) = tslib.array_to_datetime(data, errors=errors, utc=utc, dayfirst=dayfirst, yearfirst=yearfirst, creso=abbrev_to_npy_unit(out_unit)) if tz_parsed is not None: return (result, tz_parsed) elif result.dtype.kind == 'M': return (result, tz_parsed) elif result.dtype == object: if allow_object: return (result, tz_parsed) raise TypeError('DatetimeIndex has mixed timezones') else: raise TypeError(result) def maybe_convert_dtype(data, copy: bool, tz: tzinfo | None=None): if not hasattr(data, 'dtype'): return (data, copy) if is_float_dtype(data.dtype): data = data.astype(DT64NS_DTYPE).view('i8') copy = False elif lib.is_np_dtype(data.dtype, 'm') or is_bool_dtype(data.dtype): raise TypeError(f'dtype {data.dtype} cannot be converted to datetime64[ns]') elif isinstance(data.dtype, PeriodDtype): raise TypeError('Passing PeriodDtype data is invalid. Use `data.to_timestamp()` instead') elif isinstance(data.dtype, ExtensionDtype) and (not isinstance(data.dtype, DatetimeTZDtype)): data = np.array(data, dtype=np.object_) copy = False return (data, copy) def _maybe_infer_tz(tz: tzinfo | None, inferred_tz: tzinfo | None) -> tzinfo | None: if tz is None: tz = inferred_tz elif inferred_tz is None: pass elif not timezones.tz_compare(tz, inferred_tz): raise TypeError(f'data is already tz-aware {inferred_tz}, unable to set specified tz: {tz}') return tz def _validate_dt64_dtype(dtype): if dtype is not None: dtype = pandas_dtype(dtype) if dtype == np.dtype('M8'): msg = "Passing in 'datetime64' dtype with no precision is not allowed. Please pass in 'datetime64[ns]' instead." raise ValueError(msg) if isinstance(dtype, np.dtype) and (dtype.kind != 'M' or not is_supported_dtype(dtype)) or not isinstance(dtype, (np.dtype, DatetimeTZDtype)): raise ValueError(f"Unexpected value for 'dtype': '{dtype}'. Must be 'datetime64[s]', 'datetime64[ms]', 'datetime64[us]', 'datetime64[ns]' or DatetimeTZDtype'.") if getattr(dtype, 'tz', None): dtype = cast(DatetimeTZDtype, dtype) dtype = DatetimeTZDtype(unit=dtype.unit, tz=timezones.tz_standardize(dtype.tz)) return dtype def _validate_tz_from_dtype(dtype, tz: tzinfo | None, explicit_tz_none: bool=False) -> tzinfo | None: if dtype is not None: if isinstance(dtype, str): try: dtype = DatetimeTZDtype.construct_from_string(dtype) except TypeError: pass dtz = getattr(dtype, 'tz', None) if dtz is not None: if tz is not None and (not timezones.tz_compare(tz, dtz)): raise ValueError('cannot supply both a tz and a dtype with a tz') if explicit_tz_none: raise ValueError('Cannot pass both a timezone-aware dtype and tz=None') tz = dtz if tz is not None and lib.is_np_dtype(dtype, 'M'): if tz is not None and (not timezones.tz_compare(tz, dtz)): raise ValueError('cannot supply both a tz and a timezone-naive dtype (i.e. datetime64[ns])') return tz def _infer_tz_from_endpoints(start: Timestamp, end: Timestamp, tz: tzinfo | None) -> tzinfo | None: try: inferred_tz = timezones.infer_tzinfo(start, end) except AssertionError as err: raise TypeError('Start and end cannot both be tz-aware with different timezones') from err inferred_tz = timezones.maybe_get_tz(inferred_tz) tz = timezones.maybe_get_tz(tz) if tz is not None and inferred_tz is not None: if not timezones.tz_compare(inferred_tz, tz): raise AssertionError('Inferred time zone not equal to passed time zone') elif inferred_tz is not None: tz = inferred_tz return tz def _maybe_normalize_endpoints(start: _TimestampNoneT1, end: _TimestampNoneT2, normalize: bool) -> tuple[_TimestampNoneT1, _TimestampNoneT2]: if normalize: if start is not None: start = start.normalize() if end is not None: end = end.normalize() return (start, end) def _maybe_localize_point(ts: Timestamp | None, freq, tz, ambiguous, nonexistent) -> Timestamp | None: if ts is not None and ts.tzinfo is None: ambiguous = ambiguous if ambiguous != 'infer' else False localize_args = {'ambiguous': ambiguous, 'nonexistent': nonexistent, 'tz': None} if isinstance(freq, Tick) or freq is None: localize_args['tz'] = tz ts = ts.tz_localize(**localize_args) return ts def _generate_range(start: Timestamp | None, end: Timestamp | None, periods: int | None, offset: BaseOffset, *, unit: str) -> Generator[Timestamp, None, None]: offset = to_offset(offset) start = Timestamp(start) if start is not NaT: start = start.as_unit(unit) else: start = None end = Timestamp(end) if end is not NaT: end = end.as_unit(unit) else: end = None if start and (not offset.is_on_offset(start)): if offset.n >= 0: start = offset.rollforward(start) else: start = offset.rollback(start) if periods is None and end < start and (offset.n >= 0): end = None periods = 0 if end is None: end = start + (periods - 1) * offset if start is None: start = end - (periods - 1) * offset start = cast(Timestamp, start) end = cast(Timestamp, end) cur = start if offset.n >= 0: while cur <= end: yield cur if cur == end: break next_date = offset._apply(cur) next_date = next_date.as_unit(unit) if next_date <= cur: raise ValueError(f'Offset {offset} did not increment date') cur = next_date else: while cur >= end: yield cur if cur == end: break next_date = offset._apply(cur) next_date = next_date.as_unit(unit) if next_date >= cur: raise ValueError(f'Offset {offset} did not decrement date') cur = next_date # File: pandas-main/pandas/core/arrays/floating.py from __future__ import annotations from typing import ClassVar import numpy as np from pandas.core.dtypes.base import register_extension_dtype from pandas.core.dtypes.common import is_float_dtype from pandas.core.arrays.numeric import NumericArray, NumericDtype class FloatingDtype(NumericDtype): _internal_fill_value = np.nan _default_np_dtype = np.dtype(np.float64) _checker = is_float_dtype @classmethod def construct_array_type(cls) -> type[FloatingArray]: return FloatingArray @classmethod def _get_dtype_mapping(cls) -> dict[np.dtype, FloatingDtype]: return NUMPY_FLOAT_TO_DTYPE @classmethod def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: return values.astype(dtype, copy=copy) class FloatingArray(NumericArray): _dtype_cls = FloatingDtype _dtype_docstring = '\nAn ExtensionDtype for {dtype} data.\n\nThis dtype uses ``pd.NA`` as missing value indicator.\n\nAttributes\n----------\nNone\n\nMethods\n-------\nNone\n\nSee Also\n--------\nCategoricalDtype : Type for categorical data with the categories and orderedness.\nIntegerDtype : An ExtensionDtype to hold a single size & kind of integer dtype.\nStringDtype : An ExtensionDtype for string data.\n\nExamples\n--------\nFor Float32Dtype:\n\n>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float32Dtype())\n>>> ser.dtype\nFloat32Dtype()\n\nFor Float64Dtype:\n\n>>> ser = pd.Series([2.25, pd.NA], dtype=pd.Float64Dtype())\n>>> ser.dtype\nFloat64Dtype()\n' @register_extension_dtype class Float32Dtype(FloatingDtype): type = np.float32 name: ClassVar[str] = 'Float32' __doc__ = _dtype_docstring.format(dtype='float32') @register_extension_dtype class Float64Dtype(FloatingDtype): type = np.float64 name: ClassVar[str] = 'Float64' __doc__ = _dtype_docstring.format(dtype='float64') NUMPY_FLOAT_TO_DTYPE: dict[np.dtype, FloatingDtype] = {np.dtype(np.float32): Float32Dtype(), np.dtype(np.float64): Float64Dtype()} # File: pandas-main/pandas/core/arrays/integer.py from __future__ import annotations from typing import ClassVar import numpy as np from pandas.core.dtypes.base import register_extension_dtype from pandas.core.dtypes.common import is_integer_dtype from pandas.core.arrays.numeric import NumericArray, NumericDtype class IntegerDtype(NumericDtype): _internal_fill_value = 1 _default_np_dtype = np.dtype(np.int64) _checker = is_integer_dtype @classmethod def construct_array_type(cls) -> type[IntegerArray]: return IntegerArray @classmethod def _get_dtype_mapping(cls) -> dict[np.dtype, IntegerDtype]: return NUMPY_INT_TO_DTYPE @classmethod def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: try: return values.astype(dtype, casting='safe', copy=copy) except TypeError as err: casted = values.astype(dtype, copy=copy) if (casted == values).all(): return casted raise TypeError(f'cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}') from err class IntegerArray(NumericArray): _dtype_cls = IntegerDtype _dtype_docstring = '\nAn ExtensionDtype for {dtype} integer data.\n\nUses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.\n\nAttributes\n----------\nNone\n\nMethods\n-------\nNone\n\nSee Also\n--------\nInt8Dtype : 8-bit nullable integer type.\nInt16Dtype : 16-bit nullable integer type.\nInt32Dtype : 32-bit nullable integer type.\nInt64Dtype : 64-bit nullable integer type.\n\nExamples\n--------\nFor Int8Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())\n>>> ser.dtype\nInt8Dtype()\n\nFor Int16Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())\n>>> ser.dtype\nInt16Dtype()\n\nFor Int32Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())\n>>> ser.dtype\nInt32Dtype()\n\nFor Int64Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())\n>>> ser.dtype\nInt64Dtype()\n\nFor UInt8Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())\n>>> ser.dtype\nUInt8Dtype()\n\nFor UInt16Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())\n>>> ser.dtype\nUInt16Dtype()\n\nFor UInt32Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())\n>>> ser.dtype\nUInt32Dtype()\n\nFor UInt64Dtype:\n\n>>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())\n>>> ser.dtype\nUInt64Dtype()\n' @register_extension_dtype class Int8Dtype(IntegerDtype): type = np.int8 name: ClassVar[str] = 'Int8' __doc__ = _dtype_docstring.format(dtype='int8') @register_extension_dtype class Int16Dtype(IntegerDtype): type = np.int16 name: ClassVar[str] = 'Int16' __doc__ = _dtype_docstring.format(dtype='int16') @register_extension_dtype class Int32Dtype(IntegerDtype): type = np.int32 name: ClassVar[str] = 'Int32' __doc__ = _dtype_docstring.format(dtype='int32') @register_extension_dtype class Int64Dtype(IntegerDtype): type = np.int64 name: ClassVar[str] = 'Int64' __doc__ = _dtype_docstring.format(dtype='int64') @register_extension_dtype class UInt8Dtype(IntegerDtype): type = np.uint8 name: ClassVar[str] = 'UInt8' __doc__ = _dtype_docstring.format(dtype='uint8') @register_extension_dtype class UInt16Dtype(IntegerDtype): type = np.uint16 name: ClassVar[str] = 'UInt16' __doc__ = _dtype_docstring.format(dtype='uint16') @register_extension_dtype class UInt32Dtype(IntegerDtype): type = np.uint32 name: ClassVar[str] = 'UInt32' __doc__ = _dtype_docstring.format(dtype='uint32') @register_extension_dtype class UInt64Dtype(IntegerDtype): type = np.uint64 name: ClassVar[str] = 'UInt64' __doc__ = _dtype_docstring.format(dtype='uint64') NUMPY_INT_TO_DTYPE: dict[np.dtype, IntegerDtype] = {np.dtype(np.int8): Int8Dtype(), np.dtype(np.int16): Int16Dtype(), np.dtype(np.int32): Int32Dtype(), np.dtype(np.int64): Int64Dtype(), np.dtype(np.uint8): UInt8Dtype(), np.dtype(np.uint16): UInt16Dtype(), np.dtype(np.uint32): UInt32Dtype(), np.dtype(np.uint64): UInt64Dtype()} # File: pandas-main/pandas/core/arrays/interval.py from __future__ import annotations import operator from operator import le, lt import textwrap from typing import TYPE_CHECKING, Literal, Union, overload import numpy as np from pandas._libs import lib from pandas._libs.interval import VALID_CLOSED, Interval, IntervalMixin, intervals_to_interval_bounds from pandas._libs.missing import NA from pandas._typing import ArrayLike, AxisInt, Dtype, IntervalClosedType, NpDtype, PositionalIndexer, ScalarIndexer, Self, SequenceIndexer, SortKind, TimeArrayLike, npt from pandas.compat.numpy import function as nv from pandas.errors import IntCastingNaNError from pandas.util._decorators import Appender from pandas.core.dtypes.cast import LossySetitemError, maybe_upcast_numeric_to_64bit from pandas.core.dtypes.common import is_float_dtype, is_integer_dtype, is_list_like, is_object_dtype, is_scalar, is_string_dtype, needs_i8_conversion, pandas_dtype from pandas.core.dtypes.dtypes import CategoricalDtype, IntervalDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCDatetimeIndex, ABCIntervalIndex, ABCPeriodIndex from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna, notna from pandas.core.algorithms import isin, take, unique, value_counts_internal as value_counts from pandas.core.arrays import ArrowExtensionArray from pandas.core.arrays.base import ExtensionArray, _extension_array_shared_docs from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.timedeltas import TimedeltaArray import pandas.core.common as com from pandas.core.construction import array as pd_array, ensure_wrapped_if_datetimelike, extract_array from pandas.core.indexers import check_array_indexer from pandas.core.ops import invalid_comparison, unpack_zerodim_and_defer if TYPE_CHECKING: from collections.abc import Callable, Iterator, Sequence from pandas import Index, Series IntervalSide = Union[TimeArrayLike, np.ndarray] IntervalOrNA = Union[Interval, float] _interval_shared_docs: dict[str, str] = {} _shared_docs_kwargs = {'klass': 'IntervalArray', 'qualname': 'arrays.IntervalArray', 'name': ''} _interval_shared_docs['class'] = "\n%(summary)s\n\nParameters\n----------\ndata : array-like (1-dimensional)\n Array-like (ndarray, :class:`DateTimeArray`, :class:`TimeDeltaArray`) containing\n Interval objects from which to build the %(klass)s.\nclosed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both or\n neither.\ndtype : dtype or None, default None\n If None, dtype will be inferred.\ncopy : bool, default False\n Copy the input data.\n%(name)sverify_integrity : bool, default True\n Verify that the %(klass)s is valid.\n\nAttributes\n----------\nleft\nright\nclosed\nmid\nlength\nis_empty\nis_non_overlapping_monotonic\n%(extra_attributes)s\nMethods\n-------\nfrom_arrays\nfrom_tuples\nfrom_breaks\ncontains\noverlaps\nset_closed\nto_tuples\n%(extra_methods)s\nSee Also\n--------\nIndex : The base pandas Index type.\nInterval : A bounded slice-like interval; the elements of an %(klass)s.\ninterval_range : Function to create a fixed frequency IntervalIndex.\ncut : Bin values into discrete Intervals.\nqcut : Bin values into equal-sized Intervals based on rank or sample quantiles.\n\nNotes\n-----\nSee the `user guide\n`__\nfor more.\n\n%(examples)s" @Appender(_interval_shared_docs['class'] % {'klass': 'IntervalArray', 'summary': 'Pandas array for interval data that are closed on the same side.', 'name': '', 'extra_attributes': '', 'extra_methods': '', 'examples': textwrap.dedent(' Examples\n --------\n A new ``IntervalArray`` can be constructed directly from an array-like of\n ``Interval`` objects:\n\n >>> pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])\n \n [(0, 1], (1, 5]]\n Length: 2, dtype: interval[int64, right]\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalArray.from_arrays`,\n :meth:`IntervalArray.from_breaks`, and :meth:`IntervalArray.from_tuples`.\n ')}) class IntervalArray(IntervalMixin, ExtensionArray): can_hold_na = True _na_value = _fill_value = np.nan @property def ndim(self) -> Literal[1]: return 1 _left: IntervalSide _right: IntervalSide _dtype: IntervalDtype def __new__(cls, data, closed: IntervalClosedType | None=None, dtype: Dtype | None=None, copy: bool=False, verify_integrity: bool=True) -> Self: data = extract_array(data, extract_numpy=True) if isinstance(data, cls): left: IntervalSide = data._left right: IntervalSide = data._right closed = closed or data.closed dtype = IntervalDtype(left.dtype, closed=closed) else: if is_scalar(data): msg = f'{cls.__name__}(...) must be called with a collection of some kind, {data} was passed' raise TypeError(msg) data = _maybe_convert_platform_interval(data) (left, right, infer_closed) = intervals_to_interval_bounds(data, validate_closed=closed is None) if left.dtype == object: left = lib.maybe_convert_objects(left) right = lib.maybe_convert_objects(right) closed = closed or infer_closed (left, right, dtype) = cls._ensure_simple_new_inputs(left, right, closed=closed, copy=copy, dtype=dtype) if verify_integrity: cls._validate(left, right, dtype=dtype) return cls._simple_new(left, right, dtype=dtype) @classmethod def _simple_new(cls, left: IntervalSide, right: IntervalSide, dtype: IntervalDtype) -> Self: result = IntervalMixin.__new__(cls) result._left = left result._right = right result._dtype = dtype return result @classmethod def _ensure_simple_new_inputs(cls, left, right, closed: IntervalClosedType | None=None, copy: bool=False, dtype: Dtype | None=None) -> tuple[IntervalSide, IntervalSide, IntervalDtype]: from pandas.core.indexes.base import ensure_index left = ensure_index(left, copy=copy) left = maybe_upcast_numeric_to_64bit(left) right = ensure_index(right, copy=copy) right = maybe_upcast_numeric_to_64bit(right) if closed is None and isinstance(dtype, IntervalDtype): closed = dtype.closed closed = closed or 'right' if dtype is not None: dtype = pandas_dtype(dtype) if isinstance(dtype, IntervalDtype): if dtype.subtype is not None: left = left.astype(dtype.subtype) right = right.astype(dtype.subtype) else: msg = f'dtype must be an IntervalDtype, got {dtype}' raise TypeError(msg) if dtype.closed is None: dtype = IntervalDtype(dtype.subtype, closed) elif closed != dtype.closed: raise ValueError('closed keyword does not match dtype.closed') if is_float_dtype(left.dtype) and is_integer_dtype(right.dtype): right = right.astype(left.dtype) elif is_float_dtype(right.dtype) and is_integer_dtype(left.dtype): left = left.astype(right.dtype) if type(left) != type(right): msg = f'must not have differing left [{type(left).__name__}] and right [{type(right).__name__}] types' raise ValueError(msg) if isinstance(left.dtype, CategoricalDtype) or is_string_dtype(left.dtype): msg = 'category, object, and string subtypes are not supported for IntervalArray' raise TypeError(msg) if isinstance(left, ABCPeriodIndex): msg = 'Period dtypes are not supported, use a PeriodIndex instead' raise ValueError(msg) if isinstance(left, ABCDatetimeIndex) and str(left.tz) != str(right.tz): msg = f"left and right must have the same time zone, got '{left.tz}' and '{right.tz}'" raise ValueError(msg) elif needs_i8_conversion(left.dtype) and left.unit != right.unit: (left_arr, right_arr) = left._data._ensure_matching_resos(right._data) left = ensure_index(left_arr) right = ensure_index(right_arr) left = ensure_wrapped_if_datetimelike(left) left = extract_array(left, extract_numpy=True) right = ensure_wrapped_if_datetimelike(right) right = extract_array(right, extract_numpy=True) if isinstance(left, ArrowExtensionArray) or isinstance(right, ArrowExtensionArray): pass else: lbase = getattr(left, '_ndarray', left) lbase = getattr(lbase, '_data', lbase).base rbase = getattr(right, '_ndarray', right) rbase = getattr(rbase, '_data', rbase).base if lbase is not None and lbase is rbase: right = right.copy() dtype = IntervalDtype(left.dtype, closed=closed) return (left, right, dtype) @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: return cls(scalars, dtype=dtype, copy=copy) @classmethod def _from_factorized(cls, values: np.ndarray, original: IntervalArray) -> Self: return cls._from_sequence(values, dtype=original.dtype) _interval_shared_docs['from_breaks'] = textwrap.dedent("\n Construct an %(klass)s from an array of splits.\n\n Parameters\n ----------\n breaks : array-like (1-dimensional)\n Left and right bounds for each interval.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither. %(name)s\n copy : bool, default False\n Copy the data.\n dtype : dtype or None, default None\n If None, dtype will be inferred.\n\n Returns\n -------\n %(klass)s\n\n See Also\n --------\n interval_range : Function to create a fixed frequency IntervalIndex.\n %(klass)s.from_arrays : Construct from a left and right array.\n %(klass)s.from_tuples : Construct from a sequence of tuples.\n\n %(examples)s ") @classmethod @Appender(_interval_shared_docs['from_breaks'] % {'klass': 'IntervalArray', 'name': '', 'examples': textwrap.dedent(' Examples\n --------\n >>> pd.arrays.IntervalArray.from_breaks([0, 1, 2, 3])\n \n [(0, 1], (1, 2], (2, 3]]\n Length: 3, dtype: interval[int64, right]\n ')}) def from_breaks(cls, breaks, closed: IntervalClosedType | None='right', copy: bool=False, dtype: Dtype | None=None) -> Self: breaks = _maybe_convert_platform_interval(breaks) return cls.from_arrays(breaks[:-1], breaks[1:], closed, copy=copy, dtype=dtype) _interval_shared_docs['from_arrays'] = textwrap.dedent("\n Construct from two arrays defining the left and right bounds.\n\n Parameters\n ----------\n left : array-like (1-dimensional)\n Left bounds for each interval.\n right : array-like (1-dimensional)\n Right bounds for each interval.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither. %(name)s\n copy : bool, default False\n Copy the data.\n dtype : dtype, optional\n If None, dtype will be inferred.\n\n Returns\n -------\n %(klass)s\n\n Raises\n ------\n ValueError\n When a value is missing in only one of `left` or `right`.\n When a value in `left` is greater than the corresponding value\n in `right`.\n\n See Also\n --------\n interval_range : Function to create a fixed frequency IntervalIndex.\n %(klass)s.from_breaks : Construct an %(klass)s from an array of\n splits.\n %(klass)s.from_tuples : Construct an %(klass)s from an\n array-like of tuples.\n\n Notes\n -----\n Each element of `left` must be less than or equal to the `right`\n element at the same position. If an element is missing, it must be\n missing in both `left` and `right`. A TypeError is raised when\n using an unsupported type for `left` or `right`. At the moment,\n 'category', 'object', and 'string' subtypes are not supported.\n\n %(examples)s ") @classmethod @Appender(_interval_shared_docs['from_arrays'] % {'klass': 'IntervalArray', 'name': '', 'examples': textwrap.dedent(' Examples\n --------\n >>> pd.arrays.IntervalArray.from_arrays([0, 1, 2], [1, 2, 3])\n \n [(0, 1], (1, 2], (2, 3]]\n Length: 3, dtype: interval[int64, right]\n ')}) def from_arrays(cls, left, right, closed: IntervalClosedType | None='right', copy: bool=False, dtype: Dtype | None=None) -> Self: left = _maybe_convert_platform_interval(left) right = _maybe_convert_platform_interval(right) (left, right, dtype) = cls._ensure_simple_new_inputs(left, right, closed=closed, copy=copy, dtype=dtype) cls._validate(left, right, dtype=dtype) return cls._simple_new(left, right, dtype=dtype) _interval_shared_docs['from_tuples'] = textwrap.dedent("\n Construct an %(klass)s from an array-like of tuples.\n\n Parameters\n ----------\n data : array-like (1-dimensional)\n Array of tuples.\n closed : {'left', 'right', 'both', 'neither'}, default 'right'\n Whether the intervals are closed on the left-side, right-side, both\n or neither. %(name)s\n copy : bool, default False\n By-default copy the data, this is compat only and ignored.\n dtype : dtype or None, default None\n If None, dtype will be inferred.\n\n Returns\n -------\n %(klass)s\n\n See Also\n --------\n interval_range : Function to create a fixed frequency IntervalIndex.\n %(klass)s.from_arrays : Construct an %(klass)s from a left and\n right array.\n %(klass)s.from_breaks : Construct an %(klass)s from an array of\n splits.\n\n %(examples)s ") @classmethod @Appender(_interval_shared_docs['from_tuples'] % {'klass': 'IntervalArray', 'name': '', 'examples': textwrap.dedent(' Examples\n --------\n >>> pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2)])\n \n [(0, 1], (1, 2]]\n Length: 2, dtype: interval[int64, right]\n ')}) def from_tuples(cls, data, closed: IntervalClosedType | None='right', copy: bool=False, dtype: Dtype | None=None) -> Self: if len(data): (left, right) = ([], []) else: left = right = data for d in data: if not isinstance(d, tuple) and isna(d): lhs = rhs = np.nan else: name = cls.__name__ try: (lhs, rhs) = d except ValueError as err: msg = f'{name}.from_tuples requires tuples of length 2, got {d}' raise ValueError(msg) from err except TypeError as err: msg = f'{name}.from_tuples received an invalid item, {d}' raise TypeError(msg) from err left.append(lhs) right.append(rhs) return cls.from_arrays(left, right, closed, copy=False, dtype=dtype) @classmethod def _validate(cls, left, right, dtype: IntervalDtype) -> None: if not isinstance(dtype, IntervalDtype): msg = f'invalid dtype: {dtype}' raise ValueError(msg) if len(left) != len(right): msg = 'left and right must have the same length' raise ValueError(msg) left_mask = notna(left) right_mask = notna(right) if not (left_mask == right_mask).all(): msg = 'missing values must be missing in the same location both left and right sides' raise ValueError(msg) if not (left[left_mask] <= right[left_mask]).all(): msg = 'left side of interval must be <= right side' raise ValueError(msg) def _shallow_copy(self, left, right) -> Self: dtype = IntervalDtype(left.dtype, closed=self.closed) (left, right, dtype) = self._ensure_simple_new_inputs(left, right, dtype=dtype) return self._simple_new(left, right, dtype=dtype) @property def dtype(self) -> IntervalDtype: return self._dtype @property def nbytes(self) -> int: return self.left.nbytes + self.right.nbytes @property def size(self) -> int: return self.left.size def __iter__(self) -> Iterator: return iter(np.asarray(self)) def __len__(self) -> int: return len(self._left) @overload def __getitem__(self, key: ScalarIndexer) -> IntervalOrNA: ... @overload def __getitem__(self, key: SequenceIndexer) -> Self: ... def __getitem__(self, key: PositionalIndexer) -> Self | IntervalOrNA: key = check_array_indexer(self, key) left = self._left[key] right = self._right[key] if not isinstance(left, (np.ndarray, ExtensionArray)): if is_scalar(left) and isna(left): return self._fill_value return Interval(left, right, self.closed) if np.ndim(left) > 1: raise ValueError('multi-dimensional indexing not allowed') return self._simple_new(left, right, dtype=self.dtype) def __setitem__(self, key, value) -> None: (value_left, value_right) = self._validate_setitem_value(value) key = check_array_indexer(self, key) self._left[key] = value_left self._right[key] = value_right def _cmp_method(self, other, op): if is_list_like(other): if len(self) != len(other): raise ValueError('Lengths must match to compare') other = pd_array(other) elif not isinstance(other, Interval): if other is NA: from pandas.core.arrays import BooleanArray arr = np.empty(self.shape, dtype=bool) mask = np.ones(self.shape, dtype=bool) return BooleanArray(arr, mask) return invalid_comparison(self, other, op) if isinstance(other, Interval): other_dtype = pandas_dtype('interval') elif not isinstance(other.dtype, CategoricalDtype): other_dtype = other.dtype else: other_dtype = other.categories.dtype if isinstance(other_dtype, IntervalDtype): if self.closed != other.categories.closed: return invalid_comparison(self, other, op) other = other.categories._values.take(other.codes, allow_fill=True, fill_value=other.categories._na_value) if isinstance(other_dtype, IntervalDtype): if self.closed != other.closed: return invalid_comparison(self, other, op) elif not isinstance(other, Interval): other = type(self)(other) if op is operator.eq: return (self._left == other.left) & (self._right == other.right) elif op is operator.ne: return (self._left != other.left) | (self._right != other.right) elif op is operator.gt: return (self._left > other.left) | (self._left == other.left) & (self._right > other.right) elif op is operator.ge: return (self == other) | (self > other) elif op is operator.lt: return (self._left < other.left) | (self._left == other.left) & (self._right < other.right) else: return (self == other) | (self < other) if not is_object_dtype(other_dtype): return invalid_comparison(self, other, op) result = np.zeros(len(self), dtype=bool) for (i, obj) in enumerate(other): try: result[i] = op(self[i], obj) except TypeError: if obj is NA: result = result.astype(object) result[i] = NA else: raise return result @unpack_zerodim_and_defer('__eq__') def __eq__(self, other): return self._cmp_method(other, operator.eq) @unpack_zerodim_and_defer('__ne__') def __ne__(self, other): return self._cmp_method(other, operator.ne) @unpack_zerodim_and_defer('__gt__') def __gt__(self, other): return self._cmp_method(other, operator.gt) @unpack_zerodim_and_defer('__ge__') def __ge__(self, other): return self._cmp_method(other, operator.ge) @unpack_zerodim_and_defer('__lt__') def __lt__(self, other): return self._cmp_method(other, operator.lt) @unpack_zerodim_and_defer('__le__') def __le__(self, other): return self._cmp_method(other, operator.le) def argsort(self, *, ascending: bool=True, kind: SortKind='quicksort', na_position: str='last', **kwargs) -> np.ndarray: ascending = nv.validate_argsort_with_ascending(ascending, (), kwargs) if ascending and kind == 'quicksort' and (na_position == 'last'): return np.lexsort((self.right, self.left)) return super().argsort(ascending=ascending, kind=kind, na_position=na_position, **kwargs) def min(self, *, axis: AxisInt | None=None, skipna: bool=True) -> IntervalOrNA: nv.validate_minmax_axis(axis, self.ndim) if not len(self): return self._na_value mask = self.isna() if mask.any(): if not skipna: return self._na_value obj = self[~mask] else: obj = self indexer = obj.argsort()[0] return obj[indexer] def max(self, *, axis: AxisInt | None=None, skipna: bool=True) -> IntervalOrNA: nv.validate_minmax_axis(axis, self.ndim) if not len(self): return self._na_value mask = self.isna() if mask.any(): if not skipna: return self._na_value obj = self[~mask] else: obj = self indexer = obj.argsort()[-1] return obj[indexer] def fillna(self, value, limit: int | None=None, copy: bool=True) -> Self: if copy is False: raise NotImplementedError if limit is not None: raise ValueError('limit must be None') (value_left, value_right) = self._validate_scalar(value) left = self.left.fillna(value=value_left) right = self.right.fillna(value=value_right) return self._shallow_copy(left, right) def astype(self, dtype, copy: bool=True): from pandas import Index if dtype is not None: dtype = pandas_dtype(dtype) if isinstance(dtype, IntervalDtype): if dtype == self.dtype: return self.copy() if copy else self if is_float_dtype(self.dtype.subtype) and needs_i8_conversion(dtype.subtype): msg = f'Cannot convert {self.dtype} to {dtype}; subtypes are incompatible' raise TypeError(msg) try: new_left = Index(self._left, copy=False).astype(dtype.subtype) new_right = Index(self._right, copy=False).astype(dtype.subtype) except IntCastingNaNError: raise except (TypeError, ValueError) as err: msg = f'Cannot convert {self.dtype} to {dtype}; subtypes are incompatible' raise TypeError(msg) from err return self._shallow_copy(new_left, new_right) else: try: return super().astype(dtype, copy=copy) except (TypeError, ValueError) as err: msg = f'Cannot cast {type(self).__name__} to dtype {dtype}' raise TypeError(msg) from err def equals(self, other) -> bool: if type(self) != type(other): return False return bool(self.closed == other.closed and self.left.equals(other.left) and self.right.equals(other.right)) @classmethod def _concat_same_type(cls, to_concat: Sequence[IntervalArray]) -> Self: closed_set = {interval.closed for interval in to_concat} if len(closed_set) != 1: raise ValueError('Intervals must all be closed on the same side.') closed = closed_set.pop() left: IntervalSide = np.concatenate([interval.left for interval in to_concat]) right: IntervalSide = np.concatenate([interval.right for interval in to_concat]) (left, right, dtype) = cls._ensure_simple_new_inputs(left, right, closed=closed) return cls._simple_new(left, right, dtype=dtype) def copy(self) -> Self: left = self._left.copy() right = self._right.copy() dtype = self.dtype return self._simple_new(left, right, dtype=dtype) def isna(self) -> np.ndarray: return isna(self._left) def shift(self, periods: int=1, fill_value: object=None) -> IntervalArray: if not len(self) or periods == 0: return self.copy() self._validate_scalar(fill_value) empty_len = min(abs(periods), len(self)) if isna(fill_value): from pandas import Index fill_value = Index(self._left, copy=False)._na_value empty = IntervalArray.from_breaks([fill_value] * (empty_len + 1)) else: empty = self._from_sequence([fill_value] * empty_len, dtype=self.dtype) if periods > 0: a = empty b = self[:-periods] else: a = self[abs(periods):] b = empty return self._concat_same_type([a, b]) def take(self, indices, *, allow_fill: bool=False, fill_value=None, axis=None, **kwargs) -> Self: nv.validate_take((), kwargs) fill_left = fill_right = fill_value if allow_fill: (fill_left, fill_right) = self._validate_scalar(fill_value) left_take = take(self._left, indices, allow_fill=allow_fill, fill_value=fill_left) right_take = take(self._right, indices, allow_fill=allow_fill, fill_value=fill_right) return self._shallow_copy(left_take, right_take) def _validate_listlike(self, value): try: array = IntervalArray(value) self._check_closed_matches(array, name='value') (value_left, value_right) = (array.left, array.right) except TypeError as err: msg = f"'value' should be an interval type, got {type(value)} instead." raise TypeError(msg) from err try: self.left._validate_fill_value(value_left) except (LossySetitemError, TypeError) as err: msg = f"'value' should be a compatible interval type, got {type(value)} instead." raise TypeError(msg) from err return (value_left, value_right) def _validate_scalar(self, value): if isinstance(value, Interval): self._check_closed_matches(value, name='value') (left, right) = (value.left, value.right) elif is_valid_na_for_dtype(value, self.left.dtype): left = right = self.left._na_value else: raise TypeError('can only insert Interval objects and NA into an IntervalArray') return (left, right) def _validate_setitem_value(self, value): if is_valid_na_for_dtype(value, self.left.dtype): value = self.left._na_value if is_integer_dtype(self.dtype.subtype): raise TypeError('Cannot set float NaN to integer-backed IntervalArray') (value_left, value_right) = (value, value) elif isinstance(value, Interval): self._check_closed_matches(value, name='value') (value_left, value_right) = (value.left, value.right) self.left._validate_fill_value(value_left) self.left._validate_fill_value(value_right) else: return self._validate_listlike(value) return (value_left, value_right) def value_counts(self, dropna: bool=True) -> Series: result = value_counts(np.asarray(self), dropna=dropna) result.index = result.index.astype(self.dtype) return result def _formatter(self, boxed: bool=False) -> Callable[[object], str]: return str @property def left(self) -> Index: from pandas import Index return Index(self._left, copy=False) @property def right(self) -> Index: from pandas import Index return Index(self._right, copy=False) @property def length(self) -> Index: return self.right - self.left @property def mid(self) -> Index: try: return 0.5 * (self.left + self.right) except TypeError: return self.left + 0.5 * self.length _interval_shared_docs['overlaps'] = textwrap.dedent("\n Check elementwise if an Interval overlaps the values in the %(klass)s.\n\n Two intervals overlap if they share a common point, including closed\n endpoints. Intervals that only have an open endpoint in common do not\n overlap.\n\n Parameters\n ----------\n other : %(klass)s\n Interval to check against for an overlap.\n\n Returns\n -------\n ndarray\n Boolean array positionally indicating where an overlap occurs.\n\n See Also\n --------\n Interval.overlaps : Check whether two Interval objects overlap.\n\n Examples\n --------\n %(examples)s\n >>> intervals.overlaps(pd.Interval(0.5, 1.5))\n array([ True, True, False])\n\n Intervals that share closed endpoints overlap:\n\n >>> intervals.overlaps(pd.Interval(1, 3, closed='left'))\n array([ True, True, True])\n\n Intervals that only have an open endpoint in common do not overlap:\n\n >>> intervals.overlaps(pd.Interval(1, 2, closed='right'))\n array([False, True, False])\n ") @Appender(_interval_shared_docs['overlaps'] % {'klass': 'IntervalArray', 'examples': textwrap.dedent(' >>> data = [(0, 1), (1, 3), (2, 4)]\n >>> intervals = pd.arrays.IntervalArray.from_tuples(data)\n >>> intervals\n \n [(0, 1], (1, 3], (2, 4]]\n Length: 3, dtype: interval[int64, right]\n ')}) def overlaps(self, other): if isinstance(other, (IntervalArray, ABCIntervalIndex)): raise NotImplementedError if not isinstance(other, Interval): msg = f'`other` must be Interval-like, got {type(other).__name__}' raise TypeError(msg) op1 = le if self.closed_left and other.closed_right else lt op2 = le if other.closed_left and self.closed_right else lt return op1(self.left, other.right) & op2(other.left, self.right) @property def closed(self) -> IntervalClosedType: return self.dtype.closed _interval_shared_docs['set_closed'] = textwrap.dedent("\n Return an identical %(klass)s closed on the specified side.\n\n Parameters\n ----------\n closed : {'left', 'right', 'both', 'neither'}\n Whether the intervals are closed on the left-side, right-side, both\n or neither.\n\n Returns\n -------\n %(klass)s\n\n %(examples)s ") def set_closed(self, closed: IntervalClosedType) -> Self: if closed not in VALID_CLOSED: msg = f"invalid option for 'closed': {closed}" raise ValueError(msg) (left, right) = (self._left, self._right) dtype = IntervalDtype(left.dtype, closed=closed) return self._simple_new(left, right, dtype=dtype) _interval_shared_docs['is_non_overlapping_monotonic'] = "\n Return a boolean whether the %(klass)s is non-overlapping and monotonic.\n\n Non-overlapping means (no Intervals share points), and monotonic means\n either monotonic increasing or monotonic decreasing.\n\n Examples\n --------\n For arrays:\n\n >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1), pd.Interval(1, 5)])\n >>> interv_arr\n \n [(0, 1], (1, 5]]\n Length: 2, dtype: interval[int64, right]\n >>> interv_arr.is_non_overlapping_monotonic\n True\n\n >>> interv_arr = pd.arrays.IntervalArray([pd.Interval(0, 1),\n ... pd.Interval(-1, 0.1)])\n >>> interv_arr\n \n [(0.0, 1.0], (-1.0, 0.1]]\n Length: 2, dtype: interval[float64, right]\n >>> interv_arr.is_non_overlapping_monotonic\n False\n\n For Interval Index:\n\n >>> interv_idx = pd.interval_range(start=0, end=2)\n >>> interv_idx\n IntervalIndex([(0, 1], (1, 2]], dtype='interval[int64, right]')\n >>> interv_idx.is_non_overlapping_monotonic\n True\n\n >>> interv_idx = pd.interval_range(start=0, end=2, closed='both')\n >>> interv_idx\n IntervalIndex([[0, 1], [1, 2]], dtype='interval[int64, both]')\n >>> interv_idx.is_non_overlapping_monotonic\n False\n " @property def is_non_overlapping_monotonic(self) -> bool: if self.closed == 'both': return bool((self._right[:-1] < self._left[1:]).all() or (self._left[:-1] > self._right[1:]).all()) return bool((self._right[:-1] <= self._left[1:]).all() or (self._left[:-1] >= self._right[1:]).all()) def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: left = self._left right = self._right mask = self.isna() closed = self.closed result = np.empty(len(left), dtype=object) for (i, left_value) in enumerate(left): if mask[i]: result[i] = np.nan else: result[i] = Interval(left_value, right[i], closed) return result def __arrow_array__(self, type=None): import pyarrow from pandas.core.arrays.arrow.extension_types import ArrowIntervalType try: subtype = pyarrow.from_numpy_dtype(self.dtype.subtype) except TypeError as err: raise TypeError(f"Conversion to arrow with subtype '{self.dtype.subtype}' is not supported") from err interval_type = ArrowIntervalType(subtype, self.closed) storage_array = pyarrow.StructArray.from_arrays([pyarrow.array(self._left, type=subtype, from_pandas=True), pyarrow.array(self._right, type=subtype, from_pandas=True)], names=['left', 'right']) mask = self.isna() if mask.any(): null_bitmap = pyarrow.array(~mask).buffers()[1] storage_array = pyarrow.StructArray.from_buffers(storage_array.type, len(storage_array), [null_bitmap], children=[storage_array.field(0), storage_array.field(1)]) if type is not None: if type.equals(interval_type.storage_type): return storage_array elif isinstance(type, ArrowIntervalType): if not type.equals(interval_type): raise TypeError(f"Not supported to convert IntervalArray to type with different 'subtype' ({self.dtype.subtype} vs {type.subtype}) and 'closed' ({self.closed} vs {type.closed}) attributes") else: raise TypeError(f"Not supported to convert IntervalArray to '{type}' type") return pyarrow.ExtensionArray.from_storage(interval_type, storage_array) _interval_shared_docs['to_tuples'] = textwrap.dedent('\n Return an %(return_type)s of tuples of the form (left, right).\n\n Parameters\n ----------\n na_tuple : bool, default True\n If ``True``, return ``NA`` as a tuple ``(nan, nan)``. If ``False``,\n just return ``NA`` as ``nan``.\n\n Returns\n -------\n tuples: %(return_type)s\n %(examples)s ') def to_tuples(self, na_tuple: bool=True) -> np.ndarray: tuples = com.asarray_tuplesafe(zip(self._left, self._right)) if not na_tuple: tuples = np.where(~self.isna(), tuples, np.nan) return tuples def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: (value_left, value_right) = self._validate_setitem_value(value) if isinstance(self._left, np.ndarray): np.putmask(self._left, mask, value_left) assert isinstance(self._right, np.ndarray) np.putmask(self._right, mask, value_right) else: self._left._putmask(mask, value_left) assert not isinstance(self._right, np.ndarray) self._right._putmask(mask, value_right) def insert(self, loc: int, item: Interval) -> Self: (left_insert, right_insert) = self._validate_scalar(item) new_left = self.left.insert(loc, left_insert) new_right = self.right.insert(loc, right_insert) return self._shallow_copy(new_left, new_right) def delete(self, loc) -> Self: new_left: np.ndarray | DatetimeArray | TimedeltaArray new_right: np.ndarray | DatetimeArray | TimedeltaArray if isinstance(self._left, np.ndarray): new_left = np.delete(self._left, loc) assert isinstance(self._right, np.ndarray) new_right = np.delete(self._right, loc) else: new_left = self._left.delete(loc) assert not isinstance(self._right, np.ndarray) new_right = self._right.delete(loc) return self._shallow_copy(left=new_left, right=new_right) @Appender(_extension_array_shared_docs['repeat'] % _shared_docs_kwargs) def repeat(self, repeats: int | Sequence[int], axis: AxisInt | None=None) -> Self: nv.validate_repeat((), {'axis': axis}) left_repeat = self.left.repeat(repeats) right_repeat = self.right.repeat(repeats) return self._shallow_copy(left=left_repeat, right=right_repeat) _interval_shared_docs['contains'] = textwrap.dedent('\n Check elementwise if the Intervals contain the value.\n\n Return a boolean mask whether the value is contained in the Intervals\n of the %(klass)s.\n\n Parameters\n ----------\n other : scalar\n The value to check whether it is contained in the Intervals.\n\n Returns\n -------\n boolean array\n\n See Also\n --------\n Interval.contains : Check whether Interval object contains value.\n %(klass)s.overlaps : Check if an Interval overlaps the values in the\n %(klass)s.\n\n Examples\n --------\n %(examples)s\n >>> intervals.contains(0.5)\n array([ True, False, False])\n ') def contains(self, other): if isinstance(other, Interval): raise NotImplementedError('contains not implemented for two intervals') return (self._left < other if self.open_left else self._left <= other) & (other < self._right if self.open_right else other <= self._right) def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: if isinstance(values, IntervalArray): if self.closed != values.closed: return np.zeros(self.shape, dtype=bool) if self.dtype == values.dtype: left = self._combined.view('complex128') right = values._combined.view('complex128') return np.isin(left, right).ravel() elif needs_i8_conversion(self.left.dtype) ^ needs_i8_conversion(values.left.dtype): return np.zeros(self.shape, dtype=bool) return isin(self.astype(object), values.astype(object)) @property def _combined(self) -> IntervalSide: left = self.left._values.reshape(-1, 1) right = self.right._values.reshape(-1, 1) if needs_i8_conversion(left.dtype): comb = left._concat_same_type([left, right], axis=1) else: comb = np.concatenate([left, right], axis=1) return comb def _from_combined(self, combined: np.ndarray) -> IntervalArray: nc = combined.view('i8').reshape(-1, 2) dtype = self._left.dtype if needs_i8_conversion(dtype): assert isinstance(self._left, (DatetimeArray, TimedeltaArray)) new_left: DatetimeArray | TimedeltaArray | np.ndarray = type(self._left)._from_sequence(nc[:, 0], dtype=dtype) assert isinstance(self._right, (DatetimeArray, TimedeltaArray)) new_right: DatetimeArray | TimedeltaArray | np.ndarray = type(self._right)._from_sequence(nc[:, 1], dtype=dtype) else: assert isinstance(dtype, np.dtype) new_left = nc[:, 0].view(dtype) new_right = nc[:, 1].view(dtype) return self._shallow_copy(left=new_left, right=new_right) def unique(self) -> IntervalArray: nc = unique(self._combined.view('complex128')[:, 0]) nc = nc[:, None] return self._from_combined(nc) def _maybe_convert_platform_interval(values) -> ArrayLike: if isinstance(values, (list, tuple)) and len(values) == 0: return np.array([], dtype=np.int64) elif not is_list_like(values) or isinstance(values, ABCDataFrame): return values elif isinstance(getattr(values, 'dtype', None), CategoricalDtype): values = np.asarray(values) elif not hasattr(values, 'dtype') and (not isinstance(values, (list, tuple, range))): return values else: values = extract_array(values, extract_numpy=True) if not hasattr(values, 'dtype'): values = np.asarray(values) if values.dtype.kind in 'iu' and values.dtype != np.int64: values = values.astype(np.int64) return values # File: pandas-main/pandas/core/arrays/masked.py from __future__ import annotations from typing import TYPE_CHECKING, Any, Literal, cast, overload import warnings import numpy as np from pandas._libs import lib, missing as libmissing from pandas._libs.tslibs import is_supported_dtype from pandas.compat import IS64, is_platform_windows from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.common import is_bool, is_integer_dtype, is_list_like, is_scalar, is_string_dtype, pandas_dtype from pandas.core.dtypes.dtypes import BaseMaskedDtype from pandas.core.dtypes.missing import array_equivalent, is_valid_na_for_dtype, isna, notna from pandas.core import algorithms as algos, arraylike, missing, nanops, ops from pandas.core.algorithms import factorize_array, isin, map_array, mode, take from pandas.core.array_algos import masked_accumulations, masked_reductions from pandas.core.array_algos.quantile import quantile_with_mask from pandas.core.arraylike import OpsMixin from pandas.core.arrays._utils import to_numpy_dtype_inference from pandas.core.arrays.base import ExtensionArray from pandas.core.construction import array as pd_array, ensure_wrapped_if_datetimelike, extract_array from pandas.core.indexers import check_array_indexer from pandas.core.ops import invalid_comparison from pandas.core.util.hashing import hash_array if TYPE_CHECKING: from collections.abc import Callable from collections.abc import Iterator, Sequence from pandas import Series from pandas.core.arrays import BooleanArray from pandas._typing import NumpySorter, NumpyValueArrayLike, ArrayLike, AstypeArg, AxisInt, DtypeObj, FillnaOptions, InterpolateOptions, NpDtype, PositionalIndexer, Scalar, ScalarIndexer, Self, SequenceIndexer, Shape, npt from pandas._libs.missing import NAType from pandas.core.arrays import FloatingArray from pandas.compat.numpy import function as nv class BaseMaskedArray(OpsMixin, ExtensionArray): _data: np.ndarray _mask: npt.NDArray[np.bool_] @classmethod def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self: result = BaseMaskedArray.__new__(cls) result._data = values result._mask = mask return result def __init__(self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool=False) -> None: if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_): raise TypeError("mask should be boolean numpy array. Use the 'pd.array' function instead") if values.shape != mask.shape: raise ValueError('values.shape must match mask.shape') if copy: values = values.copy() mask = mask.copy() self._data = values self._mask = mask @classmethod def _from_sequence(cls, scalars, *, dtype=None, copy: bool=False) -> Self: (values, mask) = cls._coerce_to_array(scalars, dtype=dtype, copy=copy) return cls(values, mask) @classmethod @doc(ExtensionArray._empty) def _empty(cls, shape: Shape, dtype: ExtensionDtype) -> Self: dtype = cast(BaseMaskedDtype, dtype) values: np.ndarray = np.empty(shape, dtype=dtype.type) values.fill(dtype._internal_fill_value) mask = np.ones(shape, dtype=bool) result = cls(values, mask) if not isinstance(result, cls) or dtype != result.dtype: raise NotImplementedError(f"Default 'empty' implementation is invalid for dtype='{dtype}'") return result def _formatter(self, boxed: bool=False) -> Callable[[Any], str | None]: return str @property def dtype(self) -> BaseMaskedDtype: raise AbstractMethodError(self) @overload def __getitem__(self, item: ScalarIndexer) -> Any: ... @overload def __getitem__(self, item: SequenceIndexer) -> Self: ... def __getitem__(self, item: PositionalIndexer) -> Self | Any: item = check_array_indexer(self, item) newmask = self._mask[item] if is_bool(newmask): if newmask: return self.dtype.na_value return self._data[item] return self._simple_new(self._data[item], newmask) def _pad_or_backfill(self, *, method: FillnaOptions, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, copy: bool=True) -> Self: mask = self._mask if mask.any(): func = missing.get_fill_func(method, ndim=self.ndim) npvalues = self._data.T new_mask = mask.T if copy: npvalues = npvalues.copy() new_mask = new_mask.copy() elif limit_area is not None: mask = mask.copy() func(npvalues, limit=limit, mask=new_mask) if limit_area is not None and (not mask.all()): mask = mask.T neg_mask = ~mask first = neg_mask.argmax() last = len(neg_mask) - neg_mask[::-1].argmax() - 1 if limit_area == 'inside': new_mask[:first] |= mask[:first] new_mask[last + 1:] |= mask[last + 1:] elif limit_area == 'outside': new_mask[first + 1:last] |= mask[first + 1:last] if copy: return self._simple_new(npvalues.T, new_mask.T) else: return self elif copy: new_values = self.copy() else: new_values = self return new_values @doc(ExtensionArray.fillna) def fillna(self, value, limit: int | None=None, copy: bool=True) -> Self: mask = self._mask if limit is not None and limit < len(self): modify = mask.cumsum() > limit if modify.any(): mask = mask.copy() mask[modify] = False value = missing.check_value_size(value, mask, len(self)) if mask.any(): if copy: new_values = self.copy() else: new_values = self[:] new_values[mask] = value elif copy: new_values = self.copy() else: new_values = self[:] return new_values @classmethod def _coerce_to_array(cls, values, *, dtype: DtypeObj, copy: bool=False) -> tuple[np.ndarray, np.ndarray]: raise AbstractMethodError(cls) def _validate_setitem_value(self, value): kind = self.dtype.kind if kind == 'b': if lib.is_bool(value): return value elif kind == 'f': if lib.is_integer(value) or lib.is_float(value): return value elif lib.is_integer(value) or (lib.is_float(value) and value.is_integer()): return value raise TypeError(f"Invalid value '{value!s}' for dtype {self.dtype}") def __setitem__(self, key, value) -> None: key = check_array_indexer(self, key) if is_scalar(value): if is_valid_na_for_dtype(value, self.dtype): self._mask[key] = True else: value = self._validate_setitem_value(value) self._data[key] = value self._mask[key] = False return (value, mask) = self._coerce_to_array(value, dtype=self.dtype) self._data[key] = value self._mask[key] = mask def __contains__(self, key) -> bool: if isna(key) and key is not self.dtype.na_value: if self._data.dtype.kind == 'f' and lib.is_float(key): return bool((np.isnan(self._data) & ~self._mask).any()) return bool(super().__contains__(key)) def __iter__(self) -> Iterator: if self.ndim == 1: if not self._hasna: for val in self._data: yield val else: na_value = self.dtype.na_value for (isna_, val) in zip(self._mask, self._data): if isna_: yield na_value else: yield val else: for i in range(len(self)): yield self[i] def __len__(self) -> int: return len(self._data) @property def shape(self) -> Shape: return self._data.shape @property def ndim(self) -> int: return self._data.ndim def swapaxes(self, axis1, axis2) -> Self: data = self._data.swapaxes(axis1, axis2) mask = self._mask.swapaxes(axis1, axis2) return self._simple_new(data, mask) def delete(self, loc, axis: AxisInt=0) -> Self: data = np.delete(self._data, loc, axis=axis) mask = np.delete(self._mask, loc, axis=axis) return self._simple_new(data, mask) def reshape(self, *args, **kwargs) -> Self: data = self._data.reshape(*args, **kwargs) mask = self._mask.reshape(*args, **kwargs) return self._simple_new(data, mask) def ravel(self, *args, **kwargs) -> Self: data = self._data.ravel(*args, **kwargs) mask = self._mask.ravel(*args, **kwargs) return type(self)(data, mask) @property def T(self) -> Self: return self._simple_new(self._data.T, self._mask.T) def round(self, decimals: int=0, *args, **kwargs): if self.dtype.kind == 'b': return self nv.validate_round(args, kwargs) values = np.round(self._data, decimals=decimals, **kwargs) return self._maybe_mask_result(values, self._mask.copy()) def __invert__(self) -> Self: return self._simple_new(~self._data, self._mask.copy()) def __neg__(self) -> Self: return self._simple_new(-self._data, self._mask.copy()) def __pos__(self) -> Self: return self.copy() def __abs__(self) -> Self: return self._simple_new(abs(self._data), self._mask.copy()) def _values_for_json(self) -> np.ndarray: return np.asarray(self, dtype=object) def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray: hasna = self._hasna (dtype, na_value) = to_numpy_dtype_inference(self, dtype, na_value, hasna) if dtype is None: dtype = object if hasna: if dtype != object and (not is_string_dtype(dtype)) and (na_value is libmissing.NA): raise ValueError(f"cannot convert to '{dtype}'-dtype NumPy array with missing values. Specify an appropriate 'na_value' for this dtype.") with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) data = self._data.astype(dtype) data[self._mask] = na_value else: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) data = self._data.astype(dtype, copy=copy) return data @doc(ExtensionArray.tolist) def tolist(self) -> list: if self.ndim > 1: return [x.tolist() for x in self] dtype = None if self._hasna else self._data.dtype return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist() @overload def astype(self, dtype: npt.DTypeLike, copy: bool=...) -> np.ndarray: ... @overload def astype(self, dtype: ExtensionDtype, copy: bool=...) -> ExtensionArray: ... @overload def astype(self, dtype: AstypeArg, copy: bool=...) -> ArrayLike: ... def astype(self, dtype: AstypeArg, copy: bool=True) -> ArrayLike: dtype = pandas_dtype(dtype) if dtype == self.dtype: if copy: return self.copy() return self if isinstance(dtype, BaseMaskedDtype): with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) data = self._data.astype(dtype.numpy_dtype, copy=copy) mask = self._mask if data is self._data else self._mask.copy() cls = dtype.construct_array_type() return cls(data, mask, copy=False) if isinstance(dtype, ExtensionDtype): eacls = dtype.construct_array_type() return eacls._from_sequence(self, dtype=dtype, copy=copy) na_value: float | np.datetime64 | lib.NoDefault if dtype.kind == 'f': na_value = np.nan elif dtype.kind == 'M': na_value = np.datetime64('NaT') else: na_value = lib.no_default if dtype.kind in 'iu' and self._hasna: raise ValueError('cannot convert NA to integer') if dtype.kind == 'b' and self._hasna: raise ValueError('cannot convert float NaN to bool') data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy) return data __array_priority__ = 1000 def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: return self.to_numpy(dtype=dtype) _HANDLED_TYPES: tuple[type, ...] def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): out = kwargs.get('out', ()) for x in inputs + out: if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if 'out' in kwargs: return arraylike.dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) if method == 'reduce': result = arraylike.dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result mask = np.zeros(len(self), dtype=bool) inputs2 = [] for x in inputs: if isinstance(x, BaseMaskedArray): mask |= x._mask inputs2.append(x._data) else: inputs2.append(x) def reconstruct(x: np.ndarray): from pandas.core.arrays import BooleanArray, FloatingArray, IntegerArray if x.dtype.kind == 'b': m = mask.copy() return BooleanArray(x, m) elif x.dtype.kind in 'iu': m = mask.copy() return IntegerArray(x, m) elif x.dtype.kind == 'f': m = mask.copy() if x.dtype == np.float16: x = x.astype(np.float32) return FloatingArray(x, m) else: x[mask] = np.nan return x result = getattr(ufunc, method)(*inputs2, **kwargs) if ufunc.nout > 1: return tuple((reconstruct(x) for x in result)) elif method == 'reduce': if self._mask.any(): return self._na_value return result else: return reconstruct(result) def __arrow_array__(self, type=None): import pyarrow as pa return pa.array(self._data, mask=self._mask, type=type) @property def _hasna(self) -> bool: return self._mask.any() def _propagate_mask(self, mask: npt.NDArray[np.bool_] | None, other) -> npt.NDArray[np.bool_]: if mask is None: mask = self._mask.copy() if other is libmissing.NA: mask = mask | True elif is_list_like(other) and len(other) == len(mask): mask = mask | isna(other) else: mask = self._mask | mask return mask def _arith_method(self, other, op): op_name = op.__name__ omask = None if not hasattr(other, 'dtype') and is_list_like(other) and (len(other) == len(self)): other = pd_array(other) other = extract_array(other, extract_numpy=True) if isinstance(other, BaseMaskedArray): (other, omask) = (other._data, other._mask) elif is_list_like(other): if not isinstance(other, ExtensionArray): other = np.asarray(other) if other.ndim > 1: raise NotImplementedError('can only perform ops with 1-d structures') other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) pd_op = ops.get_array_op(op) other = ensure_wrapped_if_datetimelike(other) if op_name in {'pow', 'rpow'} and isinstance(other, np.bool_): other = bool(other) mask = self._propagate_mask(omask, other) if other is libmissing.NA: result = np.ones_like(self._data) if self.dtype.kind == 'b': if op_name in {'floordiv', 'rfloordiv', 'pow', 'rpow', 'truediv', 'rtruediv'}: raise NotImplementedError(f"operator '{op_name}' not implemented for bool dtypes") if op_name in {'mod', 'rmod'}: dtype = 'int8' else: dtype = 'bool' result = result.astype(dtype) elif 'truediv' in op_name and self.dtype.kind != 'f': result = result.astype(np.float64) else: if self.dtype.kind in 'iu' and op_name in ['floordiv', 'mod']: pd_op = op with np.errstate(all='ignore'): result = pd_op(self._data, other) if op_name == 'pow': mask = np.where((self._data == 1) & ~self._mask, False, mask) if omask is not None: mask = np.where((other == 0) & ~omask, False, mask) elif other is not libmissing.NA: mask = np.where(other == 0, False, mask) elif op_name == 'rpow': if omask is not None: mask = np.where((other == 1) & ~omask, False, mask) elif other is not libmissing.NA: mask = np.where(other == 1, False, mask) mask = np.where((self._data == 0) & ~self._mask, False, mask) return self._maybe_mask_result(result, mask) _logical_method = _arith_method def _cmp_method(self, other, op) -> BooleanArray: from pandas.core.arrays import BooleanArray mask = None if isinstance(other, BaseMaskedArray): (other, mask) = (other._data, other._mask) elif is_list_like(other): other = np.asarray(other) if other.ndim > 1: raise NotImplementedError('can only perform ops with 1-d structures') if len(self) != len(other): raise ValueError('Lengths must match to compare') if other is libmissing.NA: result = np.zeros(self._data.shape, dtype='bool') mask = np.ones(self._data.shape, dtype='bool') else: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'elementwise', FutureWarning) warnings.filterwarnings('ignore', 'elementwise', DeprecationWarning) method = getattr(self._data, f'__{op.__name__}__') result = method(other) if result is NotImplemented: result = invalid_comparison(self._data, other, op) mask = self._propagate_mask(mask, other) return BooleanArray(result, mask, copy=False) def _maybe_mask_result(self, result: np.ndarray | tuple[np.ndarray, np.ndarray], mask: np.ndarray): if isinstance(result, tuple): (div, mod) = result return (self._maybe_mask_result(div, mask), self._maybe_mask_result(mod, mask)) if result.dtype.kind == 'f': from pandas.core.arrays import FloatingArray return FloatingArray(result, mask, copy=False) elif result.dtype.kind == 'b': from pandas.core.arrays import BooleanArray return BooleanArray(result, mask, copy=False) elif lib.is_np_dtype(result.dtype, 'm') and is_supported_dtype(result.dtype): from pandas.core.arrays import TimedeltaArray result[mask] = result.dtype.type('NaT') if not isinstance(result, TimedeltaArray): return TimedeltaArray._simple_new(result, dtype=result.dtype) return result elif result.dtype.kind in 'iu': from pandas.core.arrays import IntegerArray return IntegerArray(result, mask, copy=False) else: result[mask] = np.nan return result def isna(self) -> np.ndarray: return self._mask.copy() @property def _na_value(self): return self.dtype.na_value @property def nbytes(self) -> int: return self._data.nbytes + self._mask.nbytes @classmethod def _concat_same_type(cls, to_concat: Sequence[Self], axis: AxisInt=0) -> Self: data = np.concatenate([x._data for x in to_concat], axis=axis) mask = np.concatenate([x._mask for x in to_concat], axis=axis) return cls(data, mask) def _hash_pandas_object(self, *, encoding: str, hash_key: str, categorize: bool) -> npt.NDArray[np.uint64]: hashed_array = hash_array(self._data, encoding=encoding, hash_key=hash_key, categorize=categorize) hashed_array[self.isna()] = hash(self.dtype.na_value) return hashed_array def take(self, indexer, *, allow_fill: bool=False, fill_value: Scalar | None=None, axis: AxisInt=0) -> Self: data_fill_value = self.dtype._internal_fill_value if isna(fill_value) else fill_value result = take(self._data, indexer, fill_value=data_fill_value, allow_fill=allow_fill, axis=axis) mask = take(self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis) if allow_fill and notna(fill_value): fill_mask = np.asarray(indexer) == -1 result[fill_mask] = fill_value mask = mask ^ fill_mask return self._simple_new(result, mask) def isin(self, values: ArrayLike) -> BooleanArray: from pandas.core.arrays import BooleanArray values_arr = np.asarray(values) result = isin(self._data, values_arr) if self._hasna: values_have_NA = values_arr.dtype == object and any((val is self.dtype.na_value for val in values_arr)) result[self._mask] = values_have_NA mask = np.zeros(self._data.shape, dtype=bool) return BooleanArray(result, mask, copy=False) def copy(self) -> Self: data = self._data.copy() mask = self._mask.copy() return self._simple_new(data, mask) @doc(ExtensionArray.duplicated) def duplicated(self, keep: Literal['first', 'last', False]='first') -> npt.NDArray[np.bool_]: values = self._data mask = self._mask return algos.duplicated(values, keep=keep, mask=mask) def unique(self) -> Self: (uniques, mask) = algos.unique_with_mask(self._data, self._mask) return self._simple_new(uniques, mask) @doc(ExtensionArray.searchsorted) def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: if self._hasna: raise ValueError('searchsorted requires array to be sorted, which is impossible with NAs present.') if isinstance(value, ExtensionArray): value = value.astype(object) return self._data.searchsorted(value, side=side, sorter=sorter) @doc(ExtensionArray.factorize) def factorize(self, use_na_sentinel: bool=True) -> tuple[np.ndarray, ExtensionArray]: arr = self._data mask = self._mask (codes, uniques) = factorize_array(arr, use_na_sentinel=True, mask=mask) assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype) has_na = mask.any() if use_na_sentinel or not has_na: size = len(uniques) else: size = len(uniques) + 1 uniques_mask = np.zeros(size, dtype=bool) if not use_na_sentinel and has_na: na_index = mask.argmax() if na_index == 0: na_code = np.intp(0) else: na_code = codes[:na_index].max() + 1 codes[codes >= na_code] += 1 codes[codes == -1] = na_code uniques = np.insert(uniques, na_code, 0) uniques_mask[na_code] = True uniques_ea = self._simple_new(uniques, uniques_mask) return (codes, uniques_ea) @doc(ExtensionArray._values_for_argsort) def _values_for_argsort(self) -> np.ndarray: return self._data def value_counts(self, dropna: bool=True) -> Series: from pandas import Index, Series from pandas.arrays import IntegerArray (keys, value_counts, na_counter) = algos.value_counts_arraylike(self._data, dropna=dropna, mask=self._mask) mask_index = np.zeros((len(value_counts),), dtype=np.bool_) mask = mask_index.copy() if na_counter > 0: mask_index[-1] = True arr = IntegerArray(value_counts, mask) index = Index(self.dtype.construct_array_type()(keys, mask_index)) return Series(arr, index=index, name='count', copy=False) def _mode(self, dropna: bool=True) -> Self: if dropna: result = mode(self._data, dropna=dropna, mask=self._mask) res_mask = np.zeros(result.shape, dtype=np.bool_) else: (result, res_mask) = mode(self._data, dropna=dropna, mask=self._mask) result = type(self)(result, res_mask) return result[result.argsort()] @doc(ExtensionArray.equals) def equals(self, other) -> bool: if type(self) != type(other): return False if other.dtype != self.dtype: return False if not np.array_equal(self._mask, other._mask): return False left = self._data[~self._mask] right = other._data[~other._mask] return array_equivalent(left, right, strict_nan=True, dtype_equal=True) def _quantile(self, qs: npt.NDArray[np.float64], interpolation: str) -> BaseMaskedArray: res = quantile_with_mask(self._data, mask=self._mask, fill_value=np.nan, qs=qs, interpolation=interpolation) if self._hasna: if self.ndim == 2: raise NotImplementedError if self.isna().all(): out_mask = np.ones(res.shape, dtype=bool) if is_integer_dtype(self.dtype): res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype) else: out_mask = np.zeros(res.shape, dtype=bool) else: out_mask = np.zeros(res.shape, dtype=bool) return self._maybe_mask_result(res, mask=out_mask) def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): if name in {'any', 'all', 'min', 'max', 'sum', 'prod', 'mean', 'var', 'std'}: result = getattr(self, name)(skipna=skipna, **kwargs) else: data = self._data mask = self._mask op = getattr(nanops, f'nan{name}') axis = kwargs.pop('axis', None) result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs) if keepdims: if isna(result): return self._wrap_na_result(name=name, axis=0, mask_size=(1,)) else: result = result.reshape(1) mask = np.zeros(1, dtype=bool) return self._maybe_mask_result(result, mask) if isna(result): return libmissing.NA else: return result def _wrap_reduction_result(self, name: str, result, *, skipna, axis): if isinstance(result, np.ndarray): if skipna: mask = self._mask.all(axis=axis) else: mask = self._mask.any(axis=axis) return self._maybe_mask_result(result, mask) return result def _wrap_na_result(self, *, name, axis, mask_size): mask = np.ones(mask_size, dtype=bool) float_dtyp = 'float32' if self.dtype == 'Float32' else 'float64' if name in ['mean', 'median', 'var', 'std', 'skew', 'kurt', 'sem']: np_dtype = float_dtyp elif name in ['min', 'max'] or self.dtype.itemsize == 8: np_dtype = self.dtype.numpy_dtype.name else: is_windows_or_32bit = is_platform_windows() or not IS64 int_dtyp = 'int32' if is_windows_or_32bit else 'int64' uint_dtyp = 'uint32' if is_windows_or_32bit else 'uint64' np_dtype = {'b': int_dtyp, 'i': int_dtyp, 'u': uint_dtyp, 'f': float_dtyp}[self.dtype.kind] value = np.array([1], dtype=np_dtype) return self._maybe_mask_result(value, mask=mask) def _wrap_min_count_reduction_result(self, name: str, result, *, skipna, min_count, axis): if min_count == 0 and isinstance(result, np.ndarray): return self._maybe_mask_result(result, np.zeros(result.shape, dtype=bool)) return self._wrap_reduction_result(name, result, skipna=skipna, axis=axis) def sum(self, *, skipna: bool=True, min_count: int=0, axis: AxisInt | None=0, **kwargs): nv.validate_sum((), kwargs) result = masked_reductions.sum(self._data, self._mask, skipna=skipna, min_count=min_count, axis=axis) return self._wrap_min_count_reduction_result('sum', result, skipna=skipna, min_count=min_count, axis=axis) def prod(self, *, skipna: bool=True, min_count: int=0, axis: AxisInt | None=0, **kwargs): nv.validate_prod((), kwargs) result = masked_reductions.prod(self._data, self._mask, skipna=skipna, min_count=min_count, axis=axis) return self._wrap_min_count_reduction_result('prod', result, skipna=skipna, min_count=min_count, axis=axis) def mean(self, *, skipna: bool=True, axis: AxisInt | None=0, **kwargs): nv.validate_mean((), kwargs) result = masked_reductions.mean(self._data, self._mask, skipna=skipna, axis=axis) return self._wrap_reduction_result('mean', result, skipna=skipna, axis=axis) def var(self, *, skipna: bool=True, axis: AxisInt | None=0, ddof: int=1, **kwargs): nv.validate_stat_ddof_func((), kwargs, fname='var') result = masked_reductions.var(self._data, self._mask, skipna=skipna, axis=axis, ddof=ddof) return self._wrap_reduction_result('var', result, skipna=skipna, axis=axis) def std(self, *, skipna: bool=True, axis: AxisInt | None=0, ddof: int=1, **kwargs): nv.validate_stat_ddof_func((), kwargs, fname='std') result = masked_reductions.std(self._data, self._mask, skipna=skipna, axis=axis, ddof=ddof) return self._wrap_reduction_result('std', result, skipna=skipna, axis=axis) def min(self, *, skipna: bool=True, axis: AxisInt | None=0, **kwargs): nv.validate_min((), kwargs) result = masked_reductions.min(self._data, self._mask, skipna=skipna, axis=axis) return self._wrap_reduction_result('min', result, skipna=skipna, axis=axis) def max(self, *, skipna: bool=True, axis: AxisInt | None=0, **kwargs): nv.validate_max((), kwargs) result = masked_reductions.max(self._data, self._mask, skipna=skipna, axis=axis) return self._wrap_reduction_result('max', result, skipna=skipna, axis=axis) def map(self, mapper, na_action: Literal['ignore'] | None=None): return map_array(self.to_numpy(), mapper, na_action=na_action) @overload def any(self, *, skipna: Literal[True]=..., axis: AxisInt | None=..., **kwargs) -> np.bool_: ... @overload def any(self, *, skipna: bool, axis: AxisInt | None=..., **kwargs) -> np.bool_ | NAType: ... def any(self, *, skipna: bool=True, axis: AxisInt | None=0, **kwargs) -> np.bool_ | NAType: nv.validate_any((), kwargs) values = self._data.copy() np.putmask(values, self._mask, self.dtype._falsey_value) result = values.any() if skipna: return result elif result or len(self) == 0 or (not self._mask.any()): return result else: return self.dtype.na_value @overload def all(self, *, skipna: Literal[True]=..., axis: AxisInt | None=..., **kwargs) -> np.bool_: ... @overload def all(self, *, skipna: bool, axis: AxisInt | None=..., **kwargs) -> np.bool_ | NAType: ... def all(self, *, skipna: bool=True, axis: AxisInt | None=0, **kwargs) -> np.bool_ | NAType: nv.validate_all((), kwargs) values = self._data.copy() np.putmask(values, self._mask, self.dtype._truthy_value) result = values.all(axis=axis) if skipna: return result elif not result or len(self) == 0 or (not self._mask.any()): return result else: return self.dtype.na_value def interpolate(self, *, method: InterpolateOptions, axis: int, index, limit, limit_direction, limit_area, copy: bool, **kwargs) -> FloatingArray: if self.dtype.kind == 'f': if copy: data = self._data.copy() mask = self._mask.copy() else: data = self._data mask = self._mask elif self.dtype.kind in 'iu': copy = True data = self._data.astype('f8') mask = self._mask.copy() else: raise NotImplementedError(f'interpolate is not implemented for dtype={self.dtype}') missing.interpolate_2d_inplace(data, method=method, axis=0, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, mask=mask, **kwargs) if not copy: return self if self.dtype.kind == 'f': return type(self)._simple_new(data, mask) else: from pandas.core.arrays import FloatingArray return FloatingArray._simple_new(data, mask) def _accumulate(self, name: str, *, skipna: bool=True, **kwargs) -> BaseMaskedArray: data = self._data mask = self._mask op = getattr(masked_accumulations, name) (data, mask) = op(data, mask, skipna=skipna, **kwargs) return self._simple_new(data, mask) def _groupby_op(self, *, how: str, has_dropped_na: bool, min_count: int, ngroups: int, ids: npt.NDArray[np.intp], **kwargs): from pandas.core.groupby.ops import WrappedCythonOp kind = WrappedCythonOp.get_kind_from_how(how) op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na) mask = self._mask if op.kind != 'aggregate': result_mask = mask.copy() else: result_mask = np.zeros(ngroups, dtype=bool) if how == 'rank' and kwargs.get('na_option') in ['top', 'bottom']: result_mask[:] = False res_values = op._cython_op_ndim_compat(self._data, min_count=min_count, ngroups=ngroups, comp_ids=ids, mask=mask, result_mask=result_mask, **kwargs) if op.how == 'ohlc': arity = op._cython_arity.get(op.how, 1) result_mask = np.tile(result_mask, (arity, 1)).T if op.how in ['idxmin', 'idxmax']: return res_values else: return self._maybe_mask_result(res_values, result_mask) def transpose_homogeneous_masked_arrays(masked_arrays: Sequence[BaseMaskedArray]) -> list[BaseMaskedArray]: masked_arrays = list(masked_arrays) dtype = masked_arrays[0].dtype values = [arr._data.reshape(1, -1) for arr in masked_arrays] transposed_values = np.concatenate(values, axis=0, out=np.empty((len(masked_arrays), len(masked_arrays[0])), order='F', dtype=dtype.numpy_dtype)) masks = [arr._mask.reshape(1, -1) for arr in masked_arrays] transposed_masks = np.concatenate(masks, axis=0, out=np.empty_like(transposed_values, dtype=bool)) arr_type = dtype.construct_array_type() transposed_arrays: list[BaseMaskedArray] = [] for i in range(transposed_values.shape[1]): transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i]) transposed_arrays.append(transposed_arr) return transposed_arrays # File: pandas-main/pandas/core/arrays/numeric.py from __future__ import annotations import numbers from typing import TYPE_CHECKING, Any import numpy as np from pandas._libs import lib, missing as libmissing from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import is_integer_dtype, is_string_dtype, pandas_dtype from pandas.core.arrays.masked import BaseMaskedArray, BaseMaskedDtype if TYPE_CHECKING: from collections.abc import Callable, Mapping import pyarrow from pandas._typing import DtypeObj, Self, npt from pandas.core.dtypes.dtypes import ExtensionDtype class NumericDtype(BaseMaskedDtype): _default_np_dtype: np.dtype _checker: Callable[[Any], bool] def __repr__(self) -> str: return f'{self.name}Dtype()' @cache_readonly def is_signed_integer(self) -> bool: return self.kind == 'i' @cache_readonly def is_unsigned_integer(self) -> bool: return self.kind == 'u' @property def _is_numeric(self) -> bool: return True def __from_arrow__(self, array: pyarrow.Array | pyarrow.ChunkedArray) -> BaseMaskedArray: import pyarrow from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask array_class = self.construct_array_type() pyarrow_type = pyarrow.from_numpy_dtype(self.type) if not array.type.equals(pyarrow_type) and (not pyarrow.types.is_null(array.type)): rt_dtype = pandas_dtype(array.type.to_pandas_dtype()) if rt_dtype.kind not in 'iuf': raise TypeError(f'Expected array of {self} type, got {array.type} instead') array = array.cast(pyarrow_type) if isinstance(array, pyarrow.ChunkedArray): if array.num_chunks == 0: array = pyarrow.array([], type=array.type) else: array = array.combine_chunks() (data, mask) = pyarrow_array_to_numpy_and_mask(array, dtype=self.numpy_dtype) return array_class(data.copy(), ~mask, copy=False) @classmethod def _get_dtype_mapping(cls) -> Mapping[np.dtype, NumericDtype]: raise AbstractMethodError(cls) @classmethod def _standardize_dtype(cls, dtype: NumericDtype | str | np.dtype) -> NumericDtype: if isinstance(dtype, str) and dtype.startswith(('Int', 'UInt', 'Float')): dtype = dtype.lower() if not isinstance(dtype, NumericDtype): mapping = cls._get_dtype_mapping() try: dtype = mapping[np.dtype(dtype)] except KeyError as err: raise ValueError(f'invalid dtype specified {dtype}') from err return dtype @classmethod def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: raise AbstractMethodError(cls) def _coerce_to_data_and_mask(values, dtype, copy: bool, dtype_cls: type[NumericDtype], default_dtype: np.dtype): checker = dtype_cls._checker mask = None inferred_type = None if dtype is None and hasattr(values, 'dtype'): if checker(values.dtype): dtype = values.dtype if dtype is not None: dtype = dtype_cls._standardize_dtype(dtype) cls = dtype_cls.construct_array_type() if isinstance(values, cls): (values, mask) = (values._data, values._mask) if dtype is not None: values = values.astype(dtype.numpy_dtype, copy=False) if copy: values = values.copy() mask = mask.copy() return (values, mask, dtype, inferred_type) original = values if not copy: values = np.asarray(values) else: values = np.array(values, copy=copy) inferred_type = None if values.dtype == object or is_string_dtype(values.dtype): inferred_type = lib.infer_dtype(values, skipna=True) if inferred_type == 'boolean' and dtype is None: name = dtype_cls.__name__.strip('_') raise TypeError(f'{values.dtype} cannot be converted to {name}') elif values.dtype.kind == 'b' and checker(dtype): if not copy: values = np.asarray(values, dtype=default_dtype) else: values = np.array(values, dtype=default_dtype, copy=copy) elif values.dtype.kind not in 'iuf': name = dtype_cls.__name__.strip('_') raise TypeError(f'{values.dtype} cannot be converted to {name}') if values.ndim != 1: raise TypeError('values must be a 1D list-like') if mask is None: if values.dtype.kind in 'iu': mask = np.zeros(len(values), dtype=np.bool_) else: mask = libmissing.is_numeric_na(values) else: assert len(mask) == len(values) if mask.ndim != 1: raise TypeError('mask must be a 1D list-like') if dtype is None: dtype = default_dtype else: dtype = dtype.numpy_dtype if is_integer_dtype(dtype) and values.dtype.kind == 'f' and (len(values) > 0): if mask.all(): values = np.ones(values.shape, dtype=dtype) else: idx = np.nanargmax(values) if int(values[idx]) != original[idx]: inferred_type = lib.infer_dtype(original, skipna=True) if inferred_type not in ['floating', 'mixed-integer-float'] and (not mask.any()): values = np.asarray(original, dtype=dtype) else: values = np.asarray(original, dtype='object') if mask.any(): values = values.copy() values[mask] = dtype_cls._internal_fill_value if inferred_type in ('string', 'unicode'): values = values.astype(dtype, copy=copy) else: values = dtype_cls._safe_cast(values, dtype, copy=False) return (values, mask, dtype, inferred_type) class NumericArray(BaseMaskedArray): _dtype_cls: type[NumericDtype] def __init__(self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool=False) -> None: checker = self._dtype_cls._checker if not (isinstance(values, np.ndarray) and checker(values.dtype)): descr = 'floating' if self._dtype_cls.kind == 'f' else 'integer' raise TypeError(f"values should be {descr} numpy array. Use the 'pd.array' function instead") if values.dtype == np.float16: raise TypeError('FloatingArray does not support np.float16 dtype.') super().__init__(values, mask, copy=copy) @cache_readonly def dtype(self) -> NumericDtype: mapping = self._dtype_cls._get_dtype_mapping() return mapping[self._data.dtype] @classmethod def _coerce_to_array(cls, value, *, dtype: DtypeObj, copy: bool=False) -> tuple[np.ndarray, np.ndarray]: dtype_cls = cls._dtype_cls default_dtype = dtype_cls._default_np_dtype (values, mask, _, _) = _coerce_to_data_and_mask(value, dtype, copy, dtype_cls, default_dtype) return (values, mask) @classmethod def _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy: bool=False) -> Self: from pandas.core.tools.numeric import to_numeric scalars = to_numeric(strings, errors='raise', dtype_backend='numpy_nullable') return cls._from_sequence(scalars, dtype=dtype, copy=copy) _HANDLED_TYPES = (np.ndarray, numbers.Number) # File: pandas-main/pandas/core/arrays/numpy_.py from __future__ import annotations from typing import TYPE_CHECKING, Literal import numpy as np from pandas._libs import lib from pandas._libs.tslibs import is_supported_dtype from pandas.compat.numpy import function as nv from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.dtypes import NumpyEADtype from pandas.core.dtypes.missing import isna from pandas.core import arraylike, missing, nanops, ops from pandas.core.arraylike import OpsMixin from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.strings.object_array import ObjectStringArrayMixin if TYPE_CHECKING: from pandas._typing import AxisInt, Dtype, FillnaOptions, InterpolateOptions, NpDtype, Scalar, Self, npt from pandas import Index class NumpyExtensionArray(OpsMixin, NDArrayBackedExtensionArray, ObjectStringArrayMixin): _typ = 'npy_extension' __array_priority__ = 1000 _ndarray: np.ndarray _dtype: NumpyEADtype _internal_fill_value = np.nan def __init__(self, values: np.ndarray | NumpyExtensionArray, copy: bool=False) -> None: if isinstance(values, type(self)): values = values._ndarray if not isinstance(values, np.ndarray): raise ValueError(f"'values' must be a NumPy array, not {type(values).__name__}") if values.ndim == 0: raise ValueError('NumpyExtensionArray must be 1-dimensional.') if copy: values = values.copy() dtype = NumpyEADtype(values.dtype) super().__init__(values, dtype) @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> NumpyExtensionArray: if isinstance(dtype, NumpyEADtype): dtype = dtype._dtype result = np.asarray(scalars, dtype=dtype) if result.ndim > 1 and (not hasattr(scalars, 'dtype')) and (dtype is None or dtype == object): result = construct_1d_object_array_from_listlike(scalars) if copy and result is scalars: result = result.copy() return cls(result) @property def dtype(self) -> NumpyEADtype: return self._dtype def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: return np.asarray(self._ndarray, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): out = kwargs.get('out', ()) result = arraylike.maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if 'out' in kwargs: return arraylike.dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) if method == 'reduce': result = arraylike.dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result inputs = tuple((x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in inputs)) if out: kwargs['out'] = tuple((x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in out)) result = getattr(ufunc, method)(*inputs, **kwargs) if ufunc.nout > 1: return tuple((type(self)(x) for x in result)) elif method == 'at': return None elif method == 'reduce': if isinstance(result, np.ndarray): return type(self)(result) return result else: return type(self)(result) def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if dtype == self.dtype: if copy: return self.copy() return self result = astype_array(self._ndarray, dtype=dtype, copy=copy) return result def isna(self) -> np.ndarray: return isna(self._ndarray) def _validate_scalar(self, fill_value): if fill_value is None: fill_value = self.dtype.na_value return fill_value def _values_for_factorize(self) -> tuple[np.ndarray, float | None]: if self.dtype.kind in 'iub': fv = None else: fv = np.nan return (self._ndarray, fv) def _pad_or_backfill(self, *, method: FillnaOptions, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, copy: bool=True) -> Self: if copy: out_data = self._ndarray.copy() else: out_data = self._ndarray meth = missing.clean_fill_method(method) missing.pad_or_backfill_inplace(out_data.T, method=meth, axis=0, limit=limit, limit_area=limit_area) if not copy: return self return type(self)._simple_new(out_data, dtype=self.dtype) def interpolate(self, *, method: InterpolateOptions, axis: int, index: Index, limit, limit_direction, limit_area, copy: bool, **kwargs) -> Self: if not copy: out_data = self._ndarray else: out_data = self._ndarray.copy() missing.interpolate_2d_inplace(out_data, method=method, axis=axis, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, **kwargs) if not copy: return self return type(self)._simple_new(out_data, dtype=self.dtype) def any(self, *, axis: AxisInt | None=None, out=None, keepdims: bool=False, skipna: bool=True): nv.validate_any((), {'out': out, 'keepdims': keepdims}) result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def all(self, *, axis: AxisInt | None=None, out=None, keepdims: bool=False, skipna: bool=True): nv.validate_all((), {'out': out, 'keepdims': keepdims}) result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def min(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs) -> Scalar: nv.validate_min((), kwargs) result = nanops.nanmin(values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna) return self._wrap_reduction_result(axis, result) def max(self, *, axis: AxisInt | None=None, skipna: bool=True, **kwargs) -> Scalar: nv.validate_max((), kwargs) result = nanops.nanmax(values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna) return self._wrap_reduction_result(axis, result) def sum(self, *, axis: AxisInt | None=None, skipna: bool=True, min_count: int=0, **kwargs) -> Scalar: nv.validate_sum((), kwargs) result = nanops.nansum(self._ndarray, axis=axis, skipna=skipna, min_count=min_count) return self._wrap_reduction_result(axis, result) def prod(self, *, axis: AxisInt | None=None, skipna: bool=True, min_count: int=0, **kwargs) -> Scalar: nv.validate_prod((), kwargs) result = nanops.nanprod(self._ndarray, axis=axis, skipna=skipna, min_count=min_count) return self._wrap_reduction_result(axis, result) def mean(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, keepdims: bool=False, skipna: bool=True): nv.validate_mean((), {'dtype': dtype, 'out': out, 'keepdims': keepdims}) result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def median(self, *, axis: AxisInt | None=None, out=None, overwrite_input: bool=False, keepdims: bool=False, skipna: bool=True): nv.validate_median((), {'out': out, 'overwrite_input': overwrite_input, 'keepdims': keepdims}) result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def std(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, ddof: int=1, keepdims: bool=False, skipna: bool=True): nv.validate_stat_ddof_func((), {'dtype': dtype, 'out': out, 'keepdims': keepdims}, fname='std') result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) return self._wrap_reduction_result(axis, result) def var(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, ddof: int=1, keepdims: bool=False, skipna: bool=True): nv.validate_stat_ddof_func((), {'dtype': dtype, 'out': out, 'keepdims': keepdims}, fname='var') result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) return self._wrap_reduction_result(axis, result) def sem(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, ddof: int=1, keepdims: bool=False, skipna: bool=True): nv.validate_stat_ddof_func((), {'dtype': dtype, 'out': out, 'keepdims': keepdims}, fname='sem') result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) return self._wrap_reduction_result(axis, result) def kurt(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, keepdims: bool=False, skipna: bool=True): nv.validate_stat_ddof_func((), {'dtype': dtype, 'out': out, 'keepdims': keepdims}, fname='kurt') result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def skew(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, keepdims: bool=False, skipna: bool=True): nv.validate_stat_ddof_func((), {'dtype': dtype, 'out': out, 'keepdims': keepdims}, fname='skew') result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna) return self._wrap_reduction_result(axis, result) def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray: mask = self.isna() if na_value is not lib.no_default and mask.any(): result = self._ndarray.copy() result[mask] = na_value else: result = self._ndarray result = np.asarray(result, dtype=dtype) if copy and result is self._ndarray: result = result.copy() return result def __invert__(self) -> NumpyExtensionArray: return type(self)(~self._ndarray) def __neg__(self) -> NumpyExtensionArray: return type(self)(-self._ndarray) def __pos__(self) -> NumpyExtensionArray: return type(self)(+self._ndarray) def __abs__(self) -> NumpyExtensionArray: return type(self)(abs(self._ndarray)) def _cmp_method(self, other, op): if isinstance(other, NumpyExtensionArray): other = other._ndarray other = ops.maybe_prepare_scalar_for_op(other, (len(self),)) pd_op = ops.get_array_op(op) other = ensure_wrapped_if_datetimelike(other) result = pd_op(self._ndarray, other) if op is divmod or op is ops.rdivmod: (a, b) = result if isinstance(a, np.ndarray): return (self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)) return (a, b) if isinstance(result, np.ndarray): return self._wrap_ndarray_result(result) return result _arith_method = _cmp_method def _wrap_ndarray_result(self, result: np.ndarray): if result.dtype.kind == 'm' and is_supported_dtype(result.dtype): from pandas.core.arrays import TimedeltaArray return TimedeltaArray._simple_new(result, dtype=result.dtype) return type(self)(result) # File: pandas-main/pandas/core/arrays/period.py from __future__ import annotations from datetime import timedelta import operator from typing import TYPE_CHECKING, Any, Literal, TypeVar, cast, overload import warnings import numpy as np from pandas._libs import algos as libalgos, lib from pandas._libs.arrays import NDArrayBacked from pandas._libs.tslibs import BaseOffset, NaT, NaTType, Timedelta, add_overflowsafe, astype_overflowsafe, dt64arr_to_periodarr as c_dt64arr_to_periodarr, get_unit_from_dtype, iNaT, parsing, period as libperiod, to_offset from pandas._libs.tslibs.dtypes import FreqGroup, PeriodDtypeBase from pandas._libs.tslibs.fields import isleapyear_arr from pandas._libs.tslibs.offsets import Tick, delta_to_tick from pandas._libs.tslibs.period import DIFFERENT_FREQ, IncompatibleFrequency, Period, get_period_field_arr, period_asfreq_arr from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import ensure_object, pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, PeriodDtype from pandas.core.dtypes.generic import ABCIndex, ABCPeriodIndex, ABCSeries, ABCTimedeltaArray from pandas.core.dtypes.missing import isna from pandas.core.arrays import datetimelike as dtl import pandas.core.common as com if TYPE_CHECKING: from collections.abc import Callable, Sequence from pandas._typing import AnyArrayLike, Dtype, FillnaOptions, NpDtype, NumpySorter, NumpyValueArrayLike, Self, npt from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.arrays import DatetimeArray, TimedeltaArray from pandas.core.arrays.base import ExtensionArray BaseOffsetT = TypeVar('BaseOffsetT', bound=BaseOffset) _shared_doc_kwargs = {'klass': 'PeriodArray'} def _field_accessor(name: str, docstring: str | None=None): def f(self): base = self.dtype._dtype_code result = get_period_field_arr(name, self.asi8, base) return result f.__name__ = name f.__doc__ = docstring return property(f) class PeriodArray(dtl.DatelikeOps, libperiod.PeriodMixin): __array_priority__ = 1000 _typ = 'periodarray' _internal_fill_value = np.int64(iNaT) _recognized_scalars = (Period,) _is_recognized_dtype = lambda x: isinstance(x, PeriodDtype) _infer_matches = ('period',) @property def _scalar_type(self) -> type[Period]: return Period _other_ops: list[str] = [] _bool_ops: list[str] = ['is_leap_year'] _object_ops: list[str] = ['start_time', 'end_time', 'freq'] _field_ops: list[str] = ['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear', 'weekday', 'week', 'dayofweek', 'day_of_week', 'dayofyear', 'day_of_year', 'quarter', 'qyear', 'days_in_month', 'daysinmonth'] _datetimelike_ops: list[str] = _field_ops + _object_ops + _bool_ops _datetimelike_methods: list[str] = ['strftime', 'to_timestamp', 'asfreq'] _dtype: PeriodDtype def __init__(self, values, dtype: Dtype | None=None, copy: bool=False) -> None: if dtype is not None: dtype = pandas_dtype(dtype) if not isinstance(dtype, PeriodDtype): raise ValueError(f'Invalid dtype {dtype} for PeriodArray') if isinstance(values, ABCSeries): values = values._values if not isinstance(values, type(self)): raise TypeError('Incorrect dtype') elif isinstance(values, ABCPeriodIndex): values = values._values if isinstance(values, type(self)): if dtype is not None and dtype != values.dtype: raise raise_on_incompatible(values, dtype.freq) (values, dtype) = (values._ndarray, values.dtype) if not copy: values = np.asarray(values, dtype='int64') else: values = np.array(values, dtype='int64', copy=copy) if dtype is None: raise ValueError('dtype is not specified and cannot be inferred') dtype = cast(PeriodDtype, dtype) NDArrayBacked.__init__(self, values, dtype) @classmethod def _simple_new(cls, values: npt.NDArray[np.int64], dtype: PeriodDtype) -> Self: assertion_msg = 'Should be numpy array of type i8' assert isinstance(values, np.ndarray) and values.dtype == 'i8', assertion_msg return cls(values, dtype=dtype) @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: if dtype is not None: dtype = pandas_dtype(dtype) if dtype and isinstance(dtype, PeriodDtype): freq = dtype.freq else: freq = None if isinstance(scalars, cls): validate_dtype_freq(scalars.dtype, freq) if copy: scalars = scalars.copy() return scalars periods = np.asarray(scalars, dtype=object) freq = freq or libperiod.extract_freq(periods) ordinals = libperiod.extract_ordinals(periods, freq) dtype = PeriodDtype(freq) return cls(ordinals, dtype=dtype) @classmethod def _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy: bool=False) -> Self: return cls._from_sequence(strings, dtype=dtype, copy=copy) @classmethod def _from_datetime64(cls, data, freq, tz=None) -> Self: if isinstance(freq, BaseOffset): freq = PeriodDtype(freq)._freqstr (data, freq) = dt64arr_to_periodarr(data, freq, tz) dtype = PeriodDtype(freq) return cls(data, dtype=dtype) @classmethod def _generate_range(cls, start, end, periods, freq): periods = dtl.validate_periods(periods) if freq is not None: freq = Period._maybe_convert_freq(freq) if start is not None or end is not None: (subarr, freq) = _get_ordinal_range(start, end, periods, freq) else: raise ValueError('Not enough parameters to construct Period range') return (subarr, freq) @classmethod def _from_fields(cls, *, fields: dict, freq) -> Self: (subarr, freq) = _range_from_fields(freq=freq, **fields) dtype = PeriodDtype(freq) return cls._simple_new(subarr, dtype=dtype) def _unbox_scalar(self, value: Period | NaTType) -> np.int64: if value is NaT: return np.int64(value._value) elif isinstance(value, self._scalar_type): self._check_compatible_with(value) return np.int64(value.ordinal) else: raise ValueError(f"'value' should be a Period. Got '{value}' instead.") def _scalar_from_string(self, value: str) -> Period: return Period(value, freq=self.freq) def _check_compatible_with(self, other: Period | NaTType | PeriodArray) -> None: if other is NaT: return self._require_matching_freq(other.freq) @cache_readonly def dtype(self) -> PeriodDtype: return self._dtype @property def freq(self) -> BaseOffset: return self.dtype.freq @property def freqstr(self) -> str: return PeriodDtype(self.freq)._freqstr def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: if dtype == 'i8': return self.asi8 elif dtype == bool: return ~self._isnan return np.array(list(self), dtype=object) def __arrow_array__(self, type=None): import pyarrow from pandas.core.arrays.arrow.extension_types import ArrowPeriodType if type is not None: if pyarrow.types.is_integer(type): return pyarrow.array(self._ndarray, mask=self.isna(), type=type) elif isinstance(type, ArrowPeriodType): if self.freqstr != type.freq: raise TypeError(f"Not supported to convert PeriodArray to array with different 'freq' ({self.freqstr} vs {type.freq})") else: raise TypeError(f"Not supported to convert PeriodArray to '{type}' type") period_type = ArrowPeriodType(self.freqstr) storage_array = pyarrow.array(self._ndarray, mask=self.isna(), type='int64') return pyarrow.ExtensionArray.from_storage(period_type, storage_array) year = _field_accessor('year', '\n The year of the period.\n\n See Also\n --------\n PeriodIndex.day_of_year : The ordinal day of the year.\n PeriodIndex.dayofyear : The ordinal day of the year.\n PeriodIndex.is_leap_year : Logical indicating if the date belongs to a\n leap year.\n PeriodIndex.weekofyear : The week ordinal of the year.\n PeriodIndex.year : The year of the period.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")\n >>> idx.year\n Index([2023, 2024, 2025], dtype=\'int64\')\n ') month = _field_accessor('month', '\n The month as January=1, December=12.\n\n See Also\n --------\n PeriodIndex.days_in_month : The number of days in the month.\n PeriodIndex.daysinmonth : The number of days in the month.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")\n >>> idx.month\n Index([1, 2, 3], dtype=\'int64\')\n ') day = _field_accessor('day', "\n The days of the period.\n\n See Also\n --------\n PeriodIndex.day_of_week : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.day_of_year : The ordinal day of the year.\n PeriodIndex.dayofweek : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.dayofyear : The ordinal day of the year.\n PeriodIndex.days_in_month : The number of days in the month.\n PeriodIndex.daysinmonth : The number of days in the month.\n PeriodIndex.weekday : The day of the week with Monday=0, Sunday=6.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(['2020-01-31', '2020-02-28'], freq='D')\n >>> idx.day\n Index([31, 28], dtype='int64')\n ") hour = _field_accessor('hour', '\n The hour of the period.\n\n See Also\n --------\n PeriodIndex.minute : The minute of the period.\n PeriodIndex.second : The second of the period.\n PeriodIndex.to_timestamp : Cast to DatetimeArray/Index.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01-01 10:00", "2023-01-01 11:00"], freq=\'h\')\n >>> idx.hour\n Index([10, 11], dtype=\'int64\')\n ') minute = _field_accessor('minute', '\n The minute of the period.\n\n See Also\n --------\n PeriodIndex.hour : The hour of the period.\n PeriodIndex.second : The second of the period.\n PeriodIndex.to_timestamp : Cast to DatetimeArray/Index.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01-01 10:30:00",\n ... "2023-01-01 11:50:00"], freq=\'min\')\n >>> idx.minute\n Index([30, 50], dtype=\'int64\')\n ') second = _field_accessor('second', '\n The second of the period.\n\n See Also\n --------\n PeriodIndex.hour : The hour of the period.\n PeriodIndex.minute : The minute of the period.\n PeriodIndex.to_timestamp : Cast to DatetimeArray/Index.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01-01 10:00:30",\n ... "2023-01-01 10:00:31"], freq=\'s\')\n >>> idx.second\n Index([30, 31], dtype=\'int64\')\n ') weekofyear = _field_accessor('week', '\n The week ordinal of the year.\n\n See Also\n --------\n PeriodIndex.day_of_week : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.dayofweek : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.week : The week ordinal of the year.\n PeriodIndex.weekday : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.year : The year of the period.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")\n >>> idx.week # It can be written `weekofyear`\n Index([5, 9, 13], dtype=\'int64\')\n ') week = weekofyear day_of_week = _field_accessor('day_of_week', '\n The day of the week with Monday=0, Sunday=6.\n\n See Also\n --------\n PeriodIndex.day : The days of the period.\n PeriodIndex.day_of_week : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.day_of_year : The ordinal day of the year.\n PeriodIndex.dayofweek : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.dayofyear : The ordinal day of the year.\n PeriodIndex.week : The week ordinal of the year.\n PeriodIndex.weekday : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.weekofyear : The week ordinal of the year.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01-01", "2023-01-02", "2023-01-03"], freq="D")\n >>> idx.weekday\n Index([6, 0, 1], dtype=\'int64\')\n ') dayofweek = day_of_week weekday = dayofweek dayofyear = day_of_year = _field_accessor('day_of_year', '\n The ordinal day of the year.\n\n See Also\n --------\n PeriodIndex.day : The days of the period.\n PeriodIndex.day_of_week : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.day_of_year : The ordinal day of the year.\n PeriodIndex.dayofweek : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.dayofyear : The ordinal day of the year.\n PeriodIndex.weekday : The day of the week with Monday=0, Sunday=6.\n PeriodIndex.weekofyear : The week ordinal of the year.\n PeriodIndex.year : The year of the period.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01-10", "2023-02-01", "2023-03-01"], freq="D")\n >>> idx.dayofyear\n Index([10, 32, 60], dtype=\'int64\')\n\n >>> idx = pd.PeriodIndex(["2023", "2024", "2025"], freq="Y")\n >>> idx\n PeriodIndex([\'2023\', \'2024\', \'2025\'], dtype=\'period[Y-DEC]\')\n >>> idx.dayofyear\n Index([365, 366, 365], dtype=\'int64\')\n ') quarter = _field_accessor('quarter', '\n The quarter of the date.\n\n See Also\n --------\n PeriodIndex.qyear : Fiscal year the Period lies in according to its\n starting-quarter.\n\n Examples\n --------\n >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")\n >>> idx.quarter\n Index([1, 1, 1], dtype=\'int64\')\n ') qyear = _field_accessor('qyear', "\n Fiscal year the Period lies in according to its starting-quarter.\n\n The `year` and the `qyear` of the period will be the same if the fiscal\n and calendar years are the same. When they are not, the fiscal year\n can be different from the calendar year of the period.\n\n Returns\n -------\n int\n The fiscal year of the period.\n\n See Also\n --------\n PeriodIndex.quarter : The quarter of the date.\n PeriodIndex.year : The year of the period.\n\n Examples\n --------\n If the natural and fiscal year are the same, `qyear` and `year` will\n be the same.\n\n >>> per = pd.Period('2018Q1', freq='Q')\n >>> per.qyear\n 2018\n >>> per.year\n 2018\n\n If the fiscal year starts in April (`Q-MAR`), the first quarter of\n 2018 will start in April 2017. `year` will then be 2017, but `qyear`\n will be the fiscal year, 2018.\n\n >>> per = pd.Period('2018Q1', freq='Q-MAR')\n >>> per.start_time\n Timestamp('2017-04-01 00:00:00')\n >>> per.qyear\n 2018\n >>> per.year\n 2017\n ") days_in_month = _field_accessor('days_in_month', '\n The number of days in the month.\n\n See Also\n --------\n PeriodIndex.day : The days of the period.\n PeriodIndex.days_in_month : The number of days in the month.\n PeriodIndex.daysinmonth : The number of days in the month.\n PeriodIndex.month : The month as January=1, December=12.\n\n Examples\n --------\n For Series:\n\n >>> period = pd.period_range(\'2020-1-1 00:00\', \'2020-3-1 00:00\', freq=\'M\')\n >>> s = pd.Series(period)\n >>> s\n 0 2020-01\n 1 2020-02\n 2 2020-03\n dtype: period[M]\n >>> s.dt.days_in_month\n 0 31\n 1 29\n 2 31\n dtype: int64\n\n For PeriodIndex:\n\n >>> idx = pd.PeriodIndex(["2023-01", "2023-02", "2023-03"], freq="M")\n >>> idx.days_in_month # It can be also entered as `daysinmonth`\n Index([31, 28, 31], dtype=\'int64\')\n ') daysinmonth = days_in_month @property def is_leap_year(self) -> npt.NDArray[np.bool_]: return isleapyear_arr(np.asarray(self.year)) def to_timestamp(self, freq=None, how: str='start') -> DatetimeArray: from pandas.core.arrays import DatetimeArray how = libperiod.validate_end_alias(how) end = how == 'E' if end: if freq == 'B' or self.freq == 'B': adjust = Timedelta(1, 'D') - Timedelta(1, 'ns') return self.to_timestamp(how='start') + adjust else: adjust = Timedelta(1, 'ns') return (self + self.freq).to_timestamp(how='start') - adjust if freq is None: freq_code = self._dtype._get_to_timestamp_base() dtype = PeriodDtypeBase(freq_code, 1) freq = dtype._freqstr base = freq_code else: freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code new_parr = self.asfreq(freq, how=how) new_data = libperiod.periodarr_to_dt64arr(new_parr.asi8, base) dta = DatetimeArray._from_sequence(new_data) if self.freq.name == 'B': diffs = libalgos.unique_deltas(self.asi8) if len(diffs) == 1: diff = diffs[0] if diff == self.dtype._n: dta._freq = self.freq elif diff == 1: dta._freq = self.freq.base return dta else: return dta._with_freq('infer') def _box_func(self, x) -> Period | NaTType: return Period._from_ordinal(ordinal=x, freq=self.freq) @doc(**_shared_doc_kwargs, other='PeriodIndex', other_name='PeriodIndex') def asfreq(self, freq=None, how: str='E') -> Self: how = libperiod.validate_end_alias(how) if isinstance(freq, BaseOffset) and hasattr(freq, '_period_dtype_code'): freq = PeriodDtype(freq)._freqstr freq = Period._maybe_convert_freq(freq) base1 = self._dtype._dtype_code base2 = freq._period_dtype_code asi8 = self.asi8 end = how == 'E' if end: ordinal = asi8 + self.dtype._n - 1 else: ordinal = asi8 new_data = period_asfreq_arr(ordinal, base1, base2, end) if self._hasna: new_data[self._isnan] = iNaT dtype = PeriodDtype(freq) return type(self)(new_data, dtype=dtype) def _formatter(self, boxed: bool=False) -> Callable[[object], str]: if boxed: return str return "'{}'".format def _format_native_types(self, *, na_rep: str | float='NaT', date_format=None, **kwargs) -> npt.NDArray[np.object_]: return libperiod.period_array_strftime(self.asi8, self.dtype._dtype_code, na_rep, date_format) def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if dtype == self._dtype: if not copy: return self else: return self.copy() if isinstance(dtype, PeriodDtype): return self.asfreq(dtype.freq) if lib.is_np_dtype(dtype, 'M') or isinstance(dtype, DatetimeTZDtype): tz = getattr(dtype, 'tz', None) unit = dtl.dtype_to_unit(dtype) return self.to_timestamp().tz_localize(tz).as_unit(unit) return super().astype(dtype, copy=copy) def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: npvalue = self._validate_setitem_value(value).view('M8[ns]') m8arr = self._ndarray.view('M8[ns]') return m8arr.searchsorted(npvalue, side=side, sorter=sorter) def _pad_or_backfill(self, *, method: FillnaOptions, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, copy: bool=True) -> Self: dta = self.view('M8[ns]') result = dta._pad_or_backfill(method=method, limit=limit, limit_area=limit_area, copy=copy) if copy: return cast('Self', result.view(self.dtype)) else: return self def _addsub_int_array_or_scalar(self, other: np.ndarray | int, op: Callable[[Any, Any], Any]) -> Self: assert op in [operator.add, operator.sub] if op is operator.sub: other = -other res_values = add_overflowsafe(self.asi8, np.asarray(other, dtype='i8')) return type(self)(res_values, dtype=self.dtype) def _add_offset(self, other: BaseOffset): assert not isinstance(other, Tick) self._require_matching_freq(other, base=True) return self._addsub_int_array_or_scalar(other.n, operator.add) def _add_timedeltalike_scalar(self, other): if not isinstance(self.freq, Tick): raise raise_on_incompatible(self, other) if isna(other): return super()._add_timedeltalike_scalar(other) td = np.asarray(Timedelta(other).asm8) return self._add_timedelta_arraylike(td) def _add_timedelta_arraylike(self, other: TimedeltaArray | npt.NDArray[np.timedelta64]) -> Self: if not self.dtype._is_tick_like(): raise TypeError(f'Cannot add or subtract timedelta64[ns] dtype from {self.dtype}') dtype = np.dtype(f'm8[{self.dtype._td64_unit}]') try: delta = astype_overflowsafe(np.asarray(other), dtype=dtype, copy=False, round_ok=False) except ValueError as err: raise IncompatibleFrequency("Cannot add/subtract timedelta-like from PeriodArray that is not an integer multiple of the PeriodArray's freq.") from err res_values = add_overflowsafe(self.asi8, np.asarray(delta.view('i8'))) return type(self)(res_values, dtype=self.dtype) def _check_timedeltalike_freq_compat(self, other): assert self.dtype._is_tick_like() dtype = np.dtype(f'm8[{self.dtype._td64_unit}]') if isinstance(other, (timedelta, np.timedelta64, Tick)): td = np.asarray(Timedelta(other).asm8) else: td = np.asarray(other) try: delta = astype_overflowsafe(td, dtype=dtype, copy=False, round_ok=False) except ValueError as err: raise raise_on_incompatible(self, other) from err delta = delta.view('i8') return lib.item_from_zerodim(delta) def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): result = super()._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) if keepdims and isinstance(result, np.ndarray): return self._from_sequence(result, dtype=self.dtype) return result def raise_on_incompatible(left, right) -> IncompatibleFrequency: if isinstance(right, (np.ndarray, ABCTimedeltaArray)) or right is None: other_freq = None elif isinstance(right, BaseOffset): with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'PeriodDtype\\[B\\] is deprecated', category=FutureWarning) other_freq = PeriodDtype(right)._freqstr elif isinstance(right, (ABCPeriodIndex, PeriodArray, Period)): other_freq = right.freqstr else: other_freq = delta_to_tick(Timedelta(right)).freqstr own_freq = PeriodDtype(left.freq)._freqstr msg = DIFFERENT_FREQ.format(cls=type(left).__name__, own_freq=own_freq, other_freq=other_freq) return IncompatibleFrequency(msg) def period_array(data: Sequence[Period | str | None] | AnyArrayLike, freq: str | Tick | BaseOffset | None=None, copy: bool=False) -> PeriodArray: data_dtype = getattr(data, 'dtype', None) if lib.is_np_dtype(data_dtype, 'M'): return PeriodArray._from_datetime64(data, freq) if isinstance(data_dtype, PeriodDtype): out = PeriodArray(data) if freq is not None: if freq == data_dtype.freq: return out return out.asfreq(freq) return out if not isinstance(data, (np.ndarray, list, tuple, ABCSeries)): data = list(data) arrdata = np.asarray(data) dtype: PeriodDtype | None if freq: dtype = PeriodDtype(freq) else: dtype = None if arrdata.dtype.kind == 'f' and len(arrdata) > 0: raise TypeError('PeriodIndex does not allow floating point in construction') if arrdata.dtype.kind in 'iu': arr = arrdata.astype(np.int64, copy=False) ordinals = libperiod.from_ordinals(arr, freq) return PeriodArray(ordinals, dtype=dtype) data = ensure_object(arrdata) if freq is None: freq = libperiod.extract_freq(data) dtype = PeriodDtype(freq) return PeriodArray._from_sequence(data, dtype=dtype) @overload def validate_dtype_freq(dtype, freq: BaseOffsetT) -> BaseOffsetT: ... @overload def validate_dtype_freq(dtype, freq: timedelta | str | None) -> BaseOffset: ... def validate_dtype_freq(dtype, freq: BaseOffsetT | BaseOffset | timedelta | str | None) -> BaseOffsetT: if freq is not None: freq = to_offset(freq, is_period=True) if dtype is not None: dtype = pandas_dtype(dtype) if not isinstance(dtype, PeriodDtype): raise ValueError('dtype must be PeriodDtype') if freq is None: freq = dtype.freq elif freq != dtype.freq: raise IncompatibleFrequency('specified freq and dtype are different') return freq def dt64arr_to_periodarr(data, freq, tz=None) -> tuple[npt.NDArray[np.int64], BaseOffset]: if not isinstance(data.dtype, np.dtype) or data.dtype.kind != 'M': raise ValueError(f'Wrong dtype: {data.dtype}') if freq is None: if isinstance(data, ABCIndex): (data, freq) = (data._values, data.freq) elif isinstance(data, ABCSeries): (data, freq) = (data._values, data.dt.freq) elif isinstance(data, (ABCIndex, ABCSeries)): data = data._values reso = get_unit_from_dtype(data.dtype) freq = Period._maybe_convert_freq(freq) base = freq._period_dtype_code return (c_dt64arr_to_periodarr(data.view('i8'), base, tz, reso=reso), freq) def _get_ordinal_range(start, end, periods, freq, mult: int=1): if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, exactly two must be specified') if freq is not None: freq = to_offset(freq, is_period=True) mult = freq.n if start is not None: start = Period(start, freq) if end is not None: end = Period(end, freq) is_start_per = isinstance(start, Period) is_end_per = isinstance(end, Period) if is_start_per and is_end_per and (start.freq != end.freq): raise ValueError('start and end must have same freq') if start is NaT or end is NaT: raise ValueError('start and end must not be NaT') if freq is None: if is_start_per: freq = start.freq elif is_end_per: freq = end.freq else: raise ValueError('Could not infer freq from start/end') mult = freq.n if periods is not None: periods = periods * mult if start is None: data = np.arange(end.ordinal - periods + mult, end.ordinal + 1, mult, dtype=np.int64) else: data = np.arange(start.ordinal, start.ordinal + periods, mult, dtype=np.int64) else: data = np.arange(start.ordinal, end.ordinal + 1, mult, dtype=np.int64) return (data, freq) def _range_from_fields(year=None, month=None, quarter=None, day=None, hour=None, minute=None, second=None, freq=None) -> tuple[np.ndarray, BaseOffset]: if hour is None: hour = 0 if minute is None: minute = 0 if second is None: second = 0 if day is None: day = 1 ordinals = [] if quarter is not None: if freq is None: freq = to_offset('Q', is_period=True) base = FreqGroup.FR_QTR.value else: freq = to_offset(freq, is_period=True) base = libperiod.freq_to_dtype_code(freq) if base != FreqGroup.FR_QTR.value: raise AssertionError('base must equal FR_QTR') freqstr = freq.freqstr (year, quarter) = _make_field_arrays(year, quarter) for (y, q) in zip(year, quarter): (calendar_year, calendar_month) = parsing.quarter_to_myear(y, q, freqstr) val = libperiod.period_ordinal(calendar_year, calendar_month, 1, 1, 1, 1, 0, 0, base) ordinals.append(val) else: freq = to_offset(freq, is_period=True) base = libperiod.freq_to_dtype_code(freq) arrays = _make_field_arrays(year, month, day, hour, minute, second) for (y, mth, d, h, mn, s) in zip(*arrays): ordinals.append(libperiod.period_ordinal(y, mth, d, h, mn, s, 0, 0, base)) return (np.array(ordinals, dtype=np.int64), freq) def _make_field_arrays(*fields) -> list[np.ndarray]: length = None for x in fields: if isinstance(x, (list, np.ndarray, ABCSeries)): if length is not None and len(x) != length: raise ValueError('Mismatched Period array lengths') if length is None: length = len(x) return [np.asarray(x) if isinstance(x, (np.ndarray, list, ABCSeries)) else np.repeat(x, length) for x in fields] # File: pandas-main/pandas/core/arrays/sparse/__init__.py from pandas.core.arrays.sparse.accessor import SparseAccessor, SparseFrameAccessor from pandas.core.arrays.sparse.array import BlockIndex, IntIndex, SparseArray, make_sparse_index __all__ = ['BlockIndex', 'IntIndex', 'make_sparse_index', 'SparseAccessor', 'SparseArray', 'SparseFrameAccessor'] # File: pandas-main/pandas/core/arrays/sparse/accessor.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.dtypes import SparseDtype from pandas.core.accessor import PandasDelegate, delegate_names from pandas.core.arrays.sparse.array import SparseArray if TYPE_CHECKING: from scipy.sparse import coo_matrix, spmatrix from pandas import DataFrame, Series class BaseAccessor: _validation_msg = "Can only use the '.sparse' accessor with Sparse data." def __init__(self, data=None) -> None: self._parent = data self._validate(data) def _validate(self, data) -> None: raise NotImplementedError @delegate_names(SparseArray, ['npoints', 'density', 'fill_value', 'sp_values'], typ='property') class SparseAccessor(BaseAccessor, PandasDelegate): def _validate(self, data) -> None: if not isinstance(data.dtype, SparseDtype): raise AttributeError(self._validation_msg) def _delegate_property_get(self, name: str, *args, **kwargs): return getattr(self._parent.array, name) def _delegate_method(self, name: str, *args, **kwargs): if name == 'from_coo': return self.from_coo(*args, **kwargs) elif name == 'to_coo': return self.to_coo(*args, **kwargs) else: raise ValueError @classmethod def from_coo(cls, A, dense_index: bool=False) -> Series: from pandas import Series from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series result = coo_to_sparse_series(A, dense_index=dense_index) result = Series(result.array, index=result.index, copy=False) return result def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool=False) -> tuple[coo_matrix, list, list]: from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo (A, rows, columns) = sparse_series_to_coo(self._parent, row_levels, column_levels, sort_labels=sort_labels) return (A, rows, columns) def to_dense(self) -> Series: from pandas import Series return Series(self._parent.array.to_dense(), index=self._parent.index, name=self._parent.name, copy=False) class SparseFrameAccessor(BaseAccessor, PandasDelegate): def _validate(self, data) -> None: dtypes = data.dtypes if not all((isinstance(t, SparseDtype) for t in dtypes)): raise AttributeError(self._validation_msg) @classmethod def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame: from pandas._libs.sparse import IntIndex from pandas import DataFrame data = data.tocsc() (index, columns) = cls._prep_index(data, index, columns) (n_rows, n_columns) = data.shape data.sort_indices() indices = data.indices indptr = data.indptr array_data = data.data dtype = SparseDtype(array_data.dtype) arrays = [] for i in range(n_columns): sl = slice(indptr[i], indptr[i + 1]) idx = IntIndex(n_rows, indices[sl], check_integrity=False) arr = SparseArray._simple_new(array_data[sl], idx, dtype) arrays.append(arr) return DataFrame._from_arrays(arrays, columns=columns, index=index, verify_integrity=False) def to_dense(self) -> DataFrame: from pandas import DataFrame data = {k: v.array.to_dense() for (k, v) in self._parent.items()} return DataFrame(data, index=self._parent.index, columns=self._parent.columns) def to_coo(self) -> spmatrix: import_optional_dependency('scipy') from scipy.sparse import coo_matrix dtype = find_common_type(self._parent.dtypes.to_list()) if isinstance(dtype, SparseDtype): dtype = dtype.subtype (cols, rows, data) = ([], [], []) for (col, (_, ser)) in enumerate(self._parent.items()): sp_arr = ser.array row = sp_arr.sp_index.indices cols.append(np.repeat(col, len(row))) rows.append(row) data.append(sp_arr.sp_values.astype(dtype, copy=False)) cols = np.concatenate(cols) rows = np.concatenate(rows) data = np.concatenate(data) return coo_matrix((data, (rows, cols)), shape=self._parent.shape) @property def density(self) -> float: tmp = np.mean([column.array.density for (_, column) in self._parent.items()]) return tmp @staticmethod def _prep_index(data, index, columns): from pandas.core.indexes.api import default_index, ensure_index (N, K) = data.shape if index is None: index = default_index(N) else: index = ensure_index(index) if columns is None: columns = default_index(K) else: columns = ensure_index(columns) if len(columns) != K: raise ValueError(f'Column length mismatch: {len(columns)} vs. {K}') if len(index) != N: raise ValueError(f'Index length mismatch: {len(index)} vs. {N}') return (index, columns) # File: pandas-main/pandas/core/arrays/sparse/array.py """""" from __future__ import annotations from collections import abc import numbers import operator from typing import TYPE_CHECKING, Any, Literal, cast, overload import warnings import numpy as np from pandas._config.config import get_option from pandas._libs import lib import pandas._libs.sparse as splib from pandas._libs.sparse import BlockIndex, IntIndex, SparseIndex from pandas._libs.tslibs import NaT from pandas.compat.numpy import function as nv from pandas.errors import PerformanceWarning from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg, validate_insert_loc from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.cast import find_common_type, maybe_box_datetimelike from pandas.core.dtypes.common import is_bool_dtype, is_integer, is_list_like, is_object_dtype, is_scalar, is_string_dtype, pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, SparseDtype from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna from pandas.core import arraylike import pandas.core.algorithms as algos from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike, extract_array, sanitize_array from pandas.core.indexers import check_array_indexer, unpack_tuple_and_ellipses from pandas.core.nanops import check_below_min_count from pandas.io.formats import printing if TYPE_CHECKING: from collections.abc import Callable, Sequence from enum import Enum class ellipsis(Enum): Ellipsis = '...' Ellipsis = ellipsis.Ellipsis from scipy.sparse import spmatrix from pandas._typing import NumpySorter SparseIndexKind = Literal['integer', 'block'] from pandas._typing import ArrayLike, AstypeArg, Axis, AxisInt, Dtype, NpDtype, PositionalIndexer, Scalar, ScalarIndexer, Self, SequenceIndexer, npt from pandas import Series else: ellipsis = type(Ellipsis) _sparray_doc_kwargs = {'klass': 'SparseArray'} def _get_fill(arr: SparseArray) -> np.ndarray: try: return np.asarray(arr.fill_value, dtype=arr.dtype.subtype) except ValueError: return np.asarray(arr.fill_value) def _sparse_array_op(left: SparseArray, right: SparseArray, op: Callable, name: str) -> SparseArray: if name.startswith('__'): name = name[2:-2] ltype = left.dtype.subtype rtype = right.dtype.subtype if ltype != rtype: subtype = find_common_type([ltype, rtype]) ltype = SparseDtype(subtype, left.fill_value) rtype = SparseDtype(subtype, right.fill_value) left = left.astype(ltype, copy=False) right = right.astype(rtype, copy=False) dtype = ltype.subtype else: dtype = ltype result_dtype = None if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0: with np.errstate(all='ignore'): result = op(left.to_dense(), right.to_dense()) fill = op(_get_fill(left), _get_fill(right)) if left.sp_index.ngaps == 0: index = left.sp_index else: index = right.sp_index elif left.sp_index.equals(right.sp_index): with np.errstate(all='ignore'): result = op(left.sp_values, right.sp_values) fill = op(_get_fill(left), _get_fill(right)) index = left.sp_index else: if name[0] == 'r': (left, right) = (right, left) name = name[1:] if name in ('and', 'or', 'xor') and dtype == 'bool': opname = f'sparse_{name}_uint8' left_sp_values = left.sp_values.view(np.uint8) right_sp_values = right.sp_values.view(np.uint8) result_dtype = bool else: opname = f'sparse_{name}_{dtype}' left_sp_values = left.sp_values right_sp_values = right.sp_values if name in ['floordiv', 'mod'] and (right == 0).any() and (left.dtype.kind in 'iu'): opname = f'sparse_{name}_float64' left_sp_values = left_sp_values.astype('float64') right_sp_values = right_sp_values.astype('float64') sparse_op = getattr(splib, opname) with np.errstate(all='ignore'): (result, index, fill) = sparse_op(left_sp_values, left.sp_index, left.fill_value, right_sp_values, right.sp_index, right.fill_value) if name == 'divmod': return (_wrap_result(name, result[0], index, fill[0], dtype=result_dtype), _wrap_result(name, result[1], index, fill[1], dtype=result_dtype)) if result_dtype is None: result_dtype = result.dtype return _wrap_result(name, result, index, fill, dtype=result_dtype) def _wrap_result(name: str, data, sparse_index, fill_value, dtype: Dtype | None=None) -> SparseArray: if name.startswith('__'): name = name[2:-2] if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'): dtype = bool fill_value = lib.item_from_zerodim(fill_value) if is_bool_dtype(dtype): fill_value = bool(fill_value) return SparseArray(data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype) class SparseArray(OpsMixin, PandasObject, ExtensionArray): _subtyp = 'sparse_array' _hidden_attrs = PandasObject._hidden_attrs | frozenset([]) _sparse_index: SparseIndex _sparse_values: np.ndarray _dtype: SparseDtype def __init__(self, data, sparse_index=None, fill_value=None, kind: SparseIndexKind='integer', dtype: Dtype | None=None, copy: bool=False) -> None: if fill_value is None and isinstance(dtype, SparseDtype): fill_value = dtype.fill_value if isinstance(data, type(self)): if sparse_index is None: sparse_index = data.sp_index if fill_value is None: fill_value = data.fill_value if dtype is None: dtype = data.dtype data = data.sp_values if isinstance(dtype, str): try: dtype = SparseDtype.construct_from_string(dtype) except TypeError: dtype = pandas_dtype(dtype) if isinstance(dtype, SparseDtype): if fill_value is None: fill_value = dtype.fill_value dtype = dtype.subtype if is_scalar(data): raise TypeError(f'Cannot construct {type(self).__name__} from scalar data. Pass a sequence instead.') if dtype is not None: dtype = pandas_dtype(dtype) if data is None: data = np.array([], dtype=dtype) try: data = sanitize_array(data, index=None) except ValueError: if dtype is None: dtype = np.dtype(object) data = np.atleast_1d(np.asarray(data, dtype=dtype)) else: raise if copy: data = data.copy() if fill_value is None: fill_value_dtype = data.dtype if dtype is None else dtype if fill_value_dtype is None: fill_value = np.nan else: fill_value = na_value_for_dtype(fill_value_dtype) if isinstance(data, type(self)) and sparse_index is None: sparse_index = data._sparse_index sparse_values = np.asarray(data.sp_values, dtype=dtype) elif sparse_index is None: data = extract_array(data, extract_numpy=True) if not isinstance(data, np.ndarray): if isinstance(data.dtype, DatetimeTZDtype): warnings.warn(f'Creating SparseArray from {data.dtype} data loses timezone information. Cast to object before sparse to retain timezone information.', UserWarning, stacklevel=find_stack_level()) data = np.asarray(data, dtype='datetime64[ns]') if fill_value is NaT: fill_value = np.datetime64('NaT', 'ns') data = np.asarray(data) (sparse_values, sparse_index, fill_value) = _make_sparse(data, kind=kind, fill_value=fill_value, dtype=dtype) else: sparse_values = np.asarray(data, dtype=dtype) if len(sparse_values) != sparse_index.npoints: raise AssertionError(f'Non array-like type {type(sparse_values)} must have the same length as the index') self._sparse_index = sparse_index self._sparse_values = sparse_values self._dtype = SparseDtype(sparse_values.dtype, fill_value) @classmethod def _simple_new(cls, sparse_array: np.ndarray, sparse_index: SparseIndex, dtype: SparseDtype) -> Self: new = object.__new__(cls) new._sparse_index = sparse_index new._sparse_values = sparse_array new._dtype = dtype return new @classmethod def from_spmatrix(cls, data: spmatrix) -> Self: (length, ncol) = data.shape if ncol != 1: raise ValueError(f"'data' must have a single column, not '{ncol}'") data = data.tocsc() data.sort_indices() arr = data.data idx = data.indices zero = np.array(0, dtype=arr.dtype).item() dtype = SparseDtype(arr.dtype, zero) index = IntIndex(length, idx) return cls._simple_new(arr, index, dtype) def __array__(self, dtype: NpDtype | None=None, copy: bool | None=None) -> np.ndarray: fill_value = self.fill_value if self.sp_index.ngaps == 0: return self.sp_values if dtype is None: if self.sp_values.dtype.kind == 'M': if fill_value is NaT: fill_value = np.datetime64('NaT') try: dtype = np.result_type(self.sp_values.dtype, type(fill_value)) except TypeError: dtype = object out = np.full(self.shape, fill_value, dtype=dtype) out[self.sp_index.indices] = self.sp_values return out def __setitem__(self, key, value) -> None: msg = 'SparseArray does not support item assignment via setitem' raise TypeError(msg) @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: return cls(scalars, dtype=dtype) @classmethod def _from_factorized(cls, values, original) -> Self: return cls(values, dtype=original.dtype) @property def sp_index(self) -> SparseIndex: return self._sparse_index @property def sp_values(self) -> np.ndarray: return self._sparse_values @property def dtype(self) -> SparseDtype: return self._dtype @property def fill_value(self): return self.dtype.fill_value @fill_value.setter def fill_value(self, value) -> None: self._dtype = SparseDtype(self.dtype.subtype, value) @property def kind(self) -> SparseIndexKind: if isinstance(self.sp_index, IntIndex): return 'integer' else: return 'block' @property def _valid_sp_values(self) -> np.ndarray: sp_vals = self.sp_values mask = notna(sp_vals) return sp_vals[mask] def __len__(self) -> int: return self.sp_index.length @property def _null_fill_value(self) -> bool: return self._dtype._is_na_fill_value @property def nbytes(self) -> int: return self.sp_values.nbytes + self.sp_index.nbytes @property def density(self) -> float: return self.sp_index.npoints / self.sp_index.length @property def npoints(self) -> int: return self.sp_index.npoints def isna(self) -> Self: dtype = SparseDtype(bool, self._null_fill_value) if self._null_fill_value: return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype) mask = np.full(len(self), False, dtype=np.bool_) mask[self.sp_index.indices] = isna(self.sp_values) return type(self)(mask, fill_value=False, dtype=dtype) def fillna(self, value, limit: int | None=None, copy: bool=True) -> Self: if limit is not None: raise ValueError('limit must be None') new_values = np.where(isna(self.sp_values), value, self.sp_values) if self._null_fill_value: new_dtype = SparseDtype(self.dtype.subtype, fill_value=value) else: new_dtype = self.dtype return self._simple_new(new_values, self._sparse_index, new_dtype) def shift(self, periods: int=1, fill_value=None) -> Self: if not len(self) or periods == 0: return self.copy() if isna(fill_value): fill_value = self.dtype.na_value subtype = np.result_type(fill_value, self.dtype.subtype) if subtype != self.dtype.subtype: arr = self.astype(SparseDtype(subtype, self.fill_value)) else: arr = self empty = self._from_sequence([fill_value] * min(abs(periods), len(self)), dtype=arr.dtype) if periods > 0: a = empty b = arr[:-periods] else: a = arr[abs(periods):] b = empty return arr._concat_same_type([a, b]) def _first_fill_value_loc(self): if len(self) == 0 or self.sp_index.npoints == len(self): return -1 indices = self.sp_index.indices if not len(indices) or indices[0] > 0: return 0 diff = np.r_[np.diff(indices), 2] return indices[(diff > 1).argmax()] + 1 @doc(ExtensionArray.duplicated) def duplicated(self, keep: Literal['first', 'last', False]='first') -> npt.NDArray[np.bool_]: values = np.asarray(self) mask = np.asarray(self.isna()) return algos.duplicated(values, keep=keep, mask=mask) def unique(self) -> Self: uniques = algos.unique(self.sp_values) if len(self.sp_values) != len(self): fill_loc = self._first_fill_value_loc() insert_loc = len(algos.unique(self.sp_values[:fill_loc])) uniques = np.insert(uniques, insert_loc, self.fill_value) return type(self)._from_sequence(uniques, dtype=self.dtype) def _values_for_factorize(self): return (np.asarray(self), self.fill_value) def factorize(self, use_na_sentinel: bool=True) -> tuple[np.ndarray, SparseArray]: (codes, uniques) = algos.factorize(np.asarray(self), use_na_sentinel=use_na_sentinel) uniques_sp = SparseArray(uniques, dtype=self.dtype) return (codes, uniques_sp) def value_counts(self, dropna: bool=True) -> Series: from pandas import Index, Series (keys, counts, _) = algos.value_counts_arraylike(self.sp_values, dropna=dropna) fcounts = self.sp_index.ngaps if fcounts > 0 and (not self._null_fill_value or not dropna): mask = isna(keys) if self._null_fill_value else keys == self.fill_value if mask.any(): counts[mask] += fcounts else: keys = np.insert(keys, 0, self.fill_value) counts = np.insert(counts, 0, fcounts) if not isinstance(keys, ABCIndex): index = Index(keys) else: index = keys return Series(counts, index=index, copy=False) @overload def __getitem__(self, key: ScalarIndexer) -> Any: ... @overload def __getitem__(self, key: SequenceIndexer | tuple[int | ellipsis, ...]) -> Self: ... def __getitem__(self, key: PositionalIndexer | tuple[int | ellipsis, ...]) -> Self | Any: if isinstance(key, tuple): key = unpack_tuple_and_ellipses(key) if key is Ellipsis: raise ValueError('Cannot slice with Ellipsis') if is_integer(key): return self._get_val_at(key) elif isinstance(key, tuple): data_slice = self.to_dense()[key] elif isinstance(key, slice): if key.step is None or key.step == 1: start = 0 if key.start is None else key.start if start < 0: start += len(self) end = len(self) if key.stop is None else key.stop if end < 0: end += len(self) indices = self.sp_index.indices keep_inds = np.flatnonzero((indices >= start) & (indices < end)) sp_vals = self.sp_values[keep_inds] sp_index = indices[keep_inds].copy() if start > 0: sp_index -= start new_len = len(range(len(self))[key]) new_sp_index = make_sparse_index(new_len, sp_index, self.kind) return type(self)._simple_new(sp_vals, new_sp_index, self.dtype) else: indices = np.arange(len(self), dtype=np.int32)[key] return self.take(indices) elif not is_list_like(key): raise IndexError('only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices') else: if isinstance(key, SparseArray): if is_bool_dtype(key): if isna(key.fill_value): return self.take(key.sp_index.indices[key.sp_values]) if not key.fill_value: return self.take(key.sp_index.indices) n = len(self) mask = np.full(n, True, dtype=np.bool_) mask[key.sp_index.indices] = False return self.take(np.arange(n)[mask]) else: key = np.asarray(key) key = check_array_indexer(self, key) if com.is_bool_indexer(key): key = cast(np.ndarray, key) return self.take(np.arange(len(key), dtype=np.int32)[key]) elif hasattr(key, '__len__'): return self.take(key) else: raise ValueError(f"Cannot slice with '{key}'") return type(self)(data_slice, kind=self.kind) def _get_val_at(self, loc): loc = validate_insert_loc(loc, len(self)) sp_loc = self.sp_index.lookup(loc) if sp_loc == -1: return self.fill_value else: val = self.sp_values[sp_loc] val = maybe_box_datetimelike(val, self.sp_values.dtype) return val def take(self, indices, *, allow_fill: bool=False, fill_value=None) -> Self: if is_scalar(indices): raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.") indices = np.asarray(indices, dtype=np.int32) dtype = None if indices.size == 0: result = np.array([], dtype='object') dtype = self.dtype elif allow_fill: result = self._take_with_fill(indices, fill_value=fill_value) else: return self._take_without_fill(indices) return type(self)(result, fill_value=self.fill_value, kind=self.kind, dtype=dtype) def _take_with_fill(self, indices, fill_value=None) -> np.ndarray: if fill_value is None: fill_value = self.dtype.na_value if indices.min() < -1: raise ValueError("Invalid value in 'indices'. Must be between -1 and the length of the array.") if indices.max() >= len(self): raise IndexError("out of bounds value in 'indices'.") if len(self) == 0: if (indices == -1).all(): dtype = np.result_type(self.sp_values, type(fill_value)) taken = np.empty_like(indices, dtype=dtype) taken.fill(fill_value) return taken else: raise IndexError('cannot do a non-empty take from an empty axes.') sp_indexer = self.sp_index.lookup_array(indices) new_fill_indices = indices == -1 old_fill_indices = (sp_indexer == -1) & ~new_fill_indices if self.sp_index.npoints == 0 and old_fill_indices.all(): taken = np.full(sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype) elif self.sp_index.npoints == 0: _dtype = np.result_type(self.dtype.subtype, type(fill_value)) taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype) taken[old_fill_indices] = self.fill_value else: taken = self.sp_values.take(sp_indexer) m0 = sp_indexer[old_fill_indices] < 0 m1 = sp_indexer[new_fill_indices] < 0 result_type = taken.dtype if m0.any(): result_type = np.result_type(result_type, type(self.fill_value)) taken = taken.astype(result_type) taken[old_fill_indices] = self.fill_value if m1.any(): result_type = np.result_type(result_type, type(fill_value)) taken = taken.astype(result_type) taken[new_fill_indices] = fill_value return taken def _take_without_fill(self, indices) -> Self: to_shift = indices < 0 n = len(self) if indices.max() >= n or indices.min() < -n: if n == 0: raise IndexError('cannot do a non-empty take from an empty axes.') raise IndexError("out of bounds value in 'indices'.") if to_shift.any(): indices = indices.copy() indices[to_shift] += n sp_indexer = self.sp_index.lookup_array(indices) value_mask = sp_indexer != -1 new_sp_values = self.sp_values[sp_indexer[value_mask]] value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False) new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind) return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype) def searchsorted(self, v: ArrayLike | object, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: if get_option('performance_warnings'): msg = 'searchsorted requires high memory usage.' warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level()) v = np.asarray(v) return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter) def copy(self) -> Self: values = self.sp_values.copy() return self._simple_new(values, self.sp_index, self.dtype) @classmethod def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self: fill_value = to_concat[0].fill_value values = [] length = 0 if to_concat: sp_kind = to_concat[0].kind else: sp_kind = 'integer' sp_index: SparseIndex if sp_kind == 'integer': indices = [] for arr in to_concat: int_idx = arr.sp_index.indices.copy() int_idx += length length += arr.sp_index.length values.append(arr.sp_values) indices.append(int_idx) data = np.concatenate(values) indices_arr = np.concatenate(indices) sp_index = IntIndex(length, indices_arr) else: blengths = [] blocs = [] for arr in to_concat: block_idx = arr.sp_index.to_block_index() values.append(arr.sp_values) blocs.append(block_idx.blocs.copy() + length) blengths.append(block_idx.blengths) length += arr.sp_index.length data = np.concatenate(values) blocs_arr = np.concatenate(blocs) blengths_arr = np.concatenate(blengths) sp_index = BlockIndex(length, blocs_arr, blengths_arr) return cls(data, sparse_index=sp_index, fill_value=fill_value) def astype(self, dtype: AstypeArg | None=None, copy: bool=True): if dtype == self._dtype: if not copy: return self else: return self.copy() future_dtype = pandas_dtype(dtype) if not isinstance(future_dtype, SparseDtype): values = np.asarray(self) values = ensure_wrapped_if_datetimelike(values) return astype_array(values, dtype=future_dtype, copy=False) dtype = self.dtype.update_dtype(dtype) subtype = pandas_dtype(dtype._subtype_with_str) subtype = cast(np.dtype, subtype) values = ensure_wrapped_if_datetimelike(self.sp_values) sp_values = astype_array(values, subtype, copy=copy) sp_values = np.asarray(sp_values) return self._simple_new(sp_values, self.sp_index, dtype) def map(self, mapper, na_action: Literal['ignore'] | None=None) -> Self: is_map = isinstance(mapper, (abc.Mapping, ABCSeries)) fill_val = self.fill_value if na_action is None or notna(fill_val): fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val) def func(sp_val): new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val) if new_sp_val is fill_val or new_sp_val == fill_val: msg = 'fill value in the sparse values not supported' raise ValueError(msg) return new_sp_val sp_values = [func(x) for x in self.sp_values] return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val) def to_dense(self) -> np.ndarray: return np.asarray(self, dtype=self.sp_values.dtype) def _where(self, mask, value): naive_implementation = np.where(mask, self, value) dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value) result = type(self)._from_sequence(naive_implementation, dtype=dtype) return result def __setstate__(self, state) -> None: if isinstance(state, tuple): (nd_state, (fill_value, sp_index)) = state sparse_values = np.array([]) sparse_values.__setstate__(nd_state) self._sparse_values = sparse_values self._sparse_index = sp_index self._dtype = SparseDtype(sparse_values.dtype, fill_value) else: self.__dict__.update(state) def nonzero(self) -> tuple[npt.NDArray[np.int32]]: if self.fill_value == 0: return (self.sp_index.indices,) else: return (self.sp_index.indices[self.sp_values != 0],) def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): method = getattr(self, name, None) if method is None: raise TypeError(f'cannot perform {name} with type {self.dtype}') if skipna: arr = self else: arr = self.dropna() result = getattr(arr, name)(**kwargs) if keepdims: return type(self)([result], dtype=self.dtype) else: return result def all(self, axis=None, *args, **kwargs): nv.validate_all(args, kwargs) values = self.sp_values if len(values) != len(self) and (not np.all(self.fill_value)): return False return values.all() def any(self, axis: AxisInt=0, *args, **kwargs) -> bool: nv.validate_any(args, kwargs) values = self.sp_values if len(values) != len(self) and np.any(self.fill_value): return True return values.any().item() def sum(self, axis: AxisInt=0, min_count: int=0, skipna: bool=True, *args, **kwargs) -> Scalar: nv.validate_sum(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() has_na = self.sp_index.ngaps > 0 and (not self._null_fill_value) if has_na and (not skipna): return na_value_for_dtype(self.dtype.subtype, compat=False) if self._null_fill_value: if check_below_min_count(valid_vals.shape, None, min_count): return na_value_for_dtype(self.dtype.subtype, compat=False) return sp_sum else: nsparse = self.sp_index.ngaps if check_below_min_count(valid_vals.shape, None, min_count - nsparse): return na_value_for_dtype(self.dtype.subtype, compat=False) return sp_sum + self.fill_value * nsparse def cumsum(self, axis: AxisInt=0, *args, **kwargs) -> SparseArray: nv.validate_cumsum(args, kwargs) if axis is not None and axis >= self.ndim: raise ValueError(f'axis(={axis}) out of bounds') if not self._null_fill_value: return SparseArray(self.to_dense()).cumsum() return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index, fill_value=self.fill_value) def mean(self, axis: Axis=0, *args, **kwargs): nv.validate_mean(args, kwargs) valid_vals = self._valid_sp_values sp_sum = valid_vals.sum() ct = len(valid_vals) if self._null_fill_value: return sp_sum / ct else: nsparse = self.sp_index.ngaps return (sp_sum + self.fill_value * nsparse) / (ct + nsparse) def max(self, *, axis: AxisInt | None=None, skipna: bool=True): nv.validate_minmax_axis(axis, self.ndim) return self._min_max('max', skipna=skipna) def min(self, *, axis: AxisInt | None=None, skipna: bool=True): nv.validate_minmax_axis(axis, self.ndim) return self._min_max('min', skipna=skipna) def _min_max(self, kind: Literal['min', 'max'], skipna: bool) -> Scalar: valid_vals = self._valid_sp_values has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0 if len(valid_vals) > 0: sp_min_max = getattr(valid_vals, kind)() if has_nonnull_fill_vals: func = max if kind == 'max' else min return func(sp_min_max, self.fill_value) elif skipna: return sp_min_max elif self.sp_index.ngaps == 0: return sp_min_max else: return na_value_for_dtype(self.dtype.subtype, compat=False) elif has_nonnull_fill_vals: return self.fill_value else: return na_value_for_dtype(self.dtype.subtype, compat=False) def _argmin_argmax(self, kind: Literal['argmin', 'argmax']) -> int: values = self._sparse_values index = self._sparse_index.indices mask = np.asarray(isna(values)) func = np.argmax if kind == 'argmax' else np.argmin idx = np.arange(values.shape[0]) non_nans = values[~mask] non_nan_idx = idx[~mask] _candidate = non_nan_idx[func(non_nans)] candidate = index[_candidate] if isna(self.fill_value): return candidate if kind == 'argmin' and self[candidate] < self.fill_value: return candidate if kind == 'argmax' and self[candidate] > self.fill_value: return candidate _loc = self._first_fill_value_loc() if _loc == -1: return candidate else: return _loc def argmax(self, skipna: bool=True) -> int: validate_bool_kwarg(skipna, 'skipna') if not skipna and self._hasna: raise ValueError('Encountered an NA value with skipna=False') return self._argmin_argmax('argmax') def argmin(self, skipna: bool=True) -> int: validate_bool_kwarg(skipna, 'skipna') if not skipna and self._hasna: raise ValueError('Encountered an NA value with skipna=False') return self._argmin_argmax('argmin') _HANDLED_TYPES = (np.ndarray, numbers.Number) def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): out = kwargs.get('out', ()) for x in inputs + out: if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if 'out' in kwargs: res = arraylike.dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) return res if method == 'reduce': result = arraylike.dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if len(inputs) == 1: sp_values = getattr(ufunc, method)(self.sp_values, **kwargs) fill_value = getattr(ufunc, method)(self.fill_value, **kwargs) if ufunc.nout > 1: arrays = tuple((self._simple_new(sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)) for (sp_value, fv) in zip(sp_values, fill_value))) return arrays elif method == 'reduce': return sp_values return self._simple_new(sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)) new_inputs = tuple((np.asarray(x) for x in inputs)) result = getattr(ufunc, method)(*new_inputs, **kwargs) if out: if len(out) == 1: out = out[0] return out if ufunc.nout > 1: return tuple((type(self)(x) for x in result)) elif method == 'at': return None else: return type(self)(result) def _arith_method(self, other, op): op_name = op.__name__ if isinstance(other, SparseArray): return _sparse_array_op(self, other, op, op_name) elif is_scalar(other): with np.errstate(all='ignore'): fill = op(_get_fill(self), np.asarray(other)) result = op(self.sp_values, other) if op_name == 'divmod': (left, right) = result (lfill, rfill) = fill return (_wrap_result(op_name, left, self.sp_index, lfill), _wrap_result(op_name, right, self.sp_index, rfill)) return _wrap_result(op_name, result, self.sp_index, fill) else: other = np.asarray(other) with np.errstate(all='ignore'): if len(self) != len(other): raise AssertionError(f'length mismatch: {len(self)} vs. {len(other)}') if not isinstance(other, SparseArray): dtype = getattr(other, 'dtype', None) other = SparseArray(other, fill_value=self.fill_value, dtype=dtype) return _sparse_array_op(self, other, op, op_name) def _cmp_method(self, other, op) -> SparseArray: if not is_scalar(other) and (not isinstance(other, type(self))): other = np.asarray(other) if isinstance(other, np.ndarray): other = SparseArray(other, fill_value=self.fill_value) if isinstance(other, SparseArray): if len(self) != len(other): raise ValueError(f'operands have mismatched length {len(self)} and {len(other)}') op_name = op.__name__.strip('_') return _sparse_array_op(self, other, op, op_name) else: fill_value = op(self.fill_value, other) result = np.full(len(self), fill_value, dtype=np.bool_) result[self.sp_index.indices] = op(self.sp_values, other) return type(self)(result, fill_value=fill_value, dtype=np.bool_) _logical_method = _cmp_method def _unary_method(self, op) -> SparseArray: fill_value = op(np.array(self.fill_value)).item() dtype = SparseDtype(self.dtype.subtype, fill_value) if isna(self.fill_value) or fill_value == self.fill_value: values = op(self.sp_values) return type(self)._simple_new(values, self.sp_index, self.dtype) return type(self)(op(self.to_dense()), dtype=dtype) def __pos__(self) -> SparseArray: return self._unary_method(operator.pos) def __neg__(self) -> SparseArray: return self._unary_method(operator.neg) def __invert__(self) -> SparseArray: return self._unary_method(operator.invert) def __abs__(self) -> SparseArray: return self._unary_method(operator.abs) def __repr__(self) -> str: pp_str = printing.pprint_thing(self) pp_fill = printing.pprint_thing(self.fill_value) pp_index = printing.pprint_thing(self.sp_index) return f'{pp_str}\nFill: {pp_fill}\n{pp_index}' def _formatter(self, boxed: bool=False) -> None: return None def _make_sparse(arr: np.ndarray, kind: SparseIndexKind='block', fill_value=None, dtype: np.dtype | None=None): assert isinstance(arr, np.ndarray) if arr.ndim > 1: raise TypeError('expected dimension <= 1 data') if fill_value is None: fill_value = na_value_for_dtype(arr.dtype) if isna(fill_value): mask = notna(arr) else: if is_string_dtype(arr.dtype): arr = arr.astype(object) if is_object_dtype(arr.dtype): mask = splib.make_mask_object_ndarray(arr, fill_value) else: mask = arr != fill_value length = len(arr) if length != len(mask): indices = mask.sp_index.indices else: indices = mask.nonzero()[0].astype(np.int32) index = make_sparse_index(length, indices, kind) sparsified_values = arr[mask] if dtype is not None: sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values) sparsified_values = astype_array(sparsified_values, dtype=dtype) sparsified_values = np.asarray(sparsified_values) return (sparsified_values, index, fill_value) @overload def make_sparse_index(length: int, indices, kind: Literal['block']) -> BlockIndex: ... @overload def make_sparse_index(length: int, indices, kind: Literal['integer']) -> IntIndex: ... def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex: index: SparseIndex if kind == 'block': (locs, lens) = splib.get_blocks(indices) index = BlockIndex(length, locs, lens) elif kind == 'integer': index = IntIndex(length, indices) else: raise ValueError('must be block or integer type') return index # File: pandas-main/pandas/core/arrays/sparse/scipy_sparse.py """""" from __future__ import annotations from typing import TYPE_CHECKING from pandas._libs import lib from pandas.core.dtypes.missing import notna from pandas.core.algorithms import factorize from pandas.core.indexes.api import MultiIndex from pandas.core.series import Series if TYPE_CHECKING: from collections.abc import Iterable import numpy as np import scipy.sparse from pandas._typing import IndexLabel, npt def _check_is_partition(parts: Iterable, whole: Iterable) -> None: whole = set(whole) parts = [set(x) for x in parts] if set.intersection(*parts) != set(): raise ValueError('Is not a partition because intersection is not null.') if set.union(*parts) != whole: raise ValueError('Is not a partition because union is not the whole.') def _levels_to_axis(ss, levels: tuple[int] | list[int], valid_ilocs: npt.NDArray[np.intp], sort_labels: bool=False) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]: if sort_labels and len(levels) == 1: ax_coords = ss.index.codes[levels[0]][valid_ilocs] ax_labels = ss.index.levels[levels[0]] else: levels_values = lib.fast_zip([ss.index.get_level_values(lvl).to_numpy() for lvl in levels]) (codes, ax_labels) = factorize(levels_values, sort=sort_labels) ax_coords = codes[valid_ilocs] ax_labels = ax_labels.tolist() return (ax_coords, ax_labels) def _to_ijv(ss, row_levels: tuple[int] | list[int]=(0,), column_levels: tuple[int] | list[int]=(1,), sort_labels: bool=False) -> tuple[np.ndarray, npt.NDArray[np.intp], npt.NDArray[np.intp], list[IndexLabel], list[IndexLabel]]: _check_is_partition([row_levels, column_levels], range(ss.index.nlevels)) sp_vals = ss.array.sp_values na_mask = notna(sp_vals) values = sp_vals[na_mask] valid_ilocs = ss.array.sp_index.indices[na_mask] (i_coords, i_labels) = _levels_to_axis(ss, row_levels, valid_ilocs, sort_labels=sort_labels) (j_coords, j_labels) = _levels_to_axis(ss, column_levels, valid_ilocs, sort_labels=sort_labels) return (values, i_coords, j_coords, i_labels, j_labels) def sparse_series_to_coo(ss: Series, row_levels: Iterable[int]=(0,), column_levels: Iterable[int]=(1,), sort_labels: bool=False) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]: import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels >= 2.') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo transformation.') row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] (v, i, j, rows, columns) = _to_ijv(ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels) sparse_matrix = scipy.sparse.coo_matrix((v, (i, j)), shape=(len(rows), len(columns))) return (sparse_matrix, rows, columns) def coo_to_sparse_series(A: scipy.sparse.coo_matrix, dense_index: bool=False) -> Series: from pandas import SparseDtype try: ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False) except AttributeError as err: raise TypeError(f'Expected coo_matrix. Got {type(A).__name__} instead.') from err ser = ser.sort_index() ser = ser.astype(SparseDtype(ser.dtype)) if dense_index: ind = MultiIndex.from_product([A.row, A.col]) ser = ser.reindex(ind) return ser # File: pandas-main/pandas/core/arrays/string_.py from __future__ import annotations import operator from typing import TYPE_CHECKING, Any, Literal, cast import numpy as np from pandas._config import get_option, using_string_dtype from pandas._libs import lib, missing as libmissing from pandas._libs.arrays import NDArrayBacked from pandas._libs.lib import ensure_string_array from pandas.compat import HAS_PYARROW, pa_version_under10p1 from pandas.compat.numpy import function as nv from pandas.util._decorators import doc from pandas.core.dtypes.base import ExtensionDtype, StorageExtensionDtype, register_extension_dtype from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_integer_dtype, is_object_dtype, is_string_dtype, pandas_dtype from pandas.core import nanops, ops from pandas.core.algorithms import isin from pandas.core.array_algos import masked_reductions from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.floating import FloatingArray, FloatingDtype from pandas.core.arrays.integer import IntegerArray, IntegerDtype from pandas.core.arrays.numpy_ import NumpyExtensionArray from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer from pandas.core.missing import isna if TYPE_CHECKING: import pyarrow from pandas._typing import ArrayLike, AxisInt, Dtype, DtypeObj, NumpySorter, NumpyValueArrayLike, Scalar, Self, npt, type_t from pandas import Series @register_extension_dtype class StringDtype(StorageExtensionDtype): @property def name(self) -> str: if self._na_value is libmissing.NA: return 'string' else: return 'str' @property def na_value(self) -> libmissing.NAType | float: return self._na_value _metadata = ('storage', '_na_value') def __init__(self, storage: str | None=None, na_value: libmissing.NAType | float=libmissing.NA) -> None: if storage is None: if na_value is not libmissing.NA: storage = get_option('mode.string_storage') if storage == 'auto': if HAS_PYARROW: storage = 'pyarrow' else: storage = 'python' else: storage = get_option('mode.string_storage') if storage == 'auto': storage = 'python' if storage == 'pyarrow_numpy': storage = 'pyarrow' na_value = np.nan if storage not in {'python', 'pyarrow'}: raise ValueError(f"Storage must be 'python' or 'pyarrow'. Got {storage} instead.") if storage == 'pyarrow' and pa_version_under10p1: raise ImportError('pyarrow>=10.0.1 is required for PyArrow backed StringArray.') if isinstance(na_value, float) and np.isnan(na_value): na_value = np.nan elif na_value is not libmissing.NA: raise ValueError(f"'na_value' must be np.nan or pd.NA, got {na_value}") self.storage = cast(str, storage) self._na_value = na_value def __repr__(self) -> str: if self._na_value is libmissing.NA: return f'{self.name}[{self.storage}]' else: return self.name def __eq__(self, other: object) -> bool: if isinstance(other, str): if other == 'string' or other == self.name: return True try: other = self.construct_from_string(other) except (TypeError, ImportError): return False if isinstance(other, type(self)): return self.storage == other.storage and self.na_value is other.na_value return False def __hash__(self) -> int: return super().__hash__() def __reduce__(self): return (StringDtype, (self.storage, self.na_value)) @property def type(self) -> type[str]: return str @classmethod def construct_from_string(cls, string) -> Self: if not isinstance(string, str): raise TypeError(f"'construct_from_string' expects a string, got {type(string)}") if string == 'string': return cls() elif string == 'str' and using_string_dtype(): return cls(na_value=np.nan) elif string == 'string[python]': return cls(storage='python') elif string == 'string[pyarrow]': return cls(storage='pyarrow') elif string == 'string[pyarrow_numpy]': return cls(storage='pyarrow_numpy') else: raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") def construct_array_type(self) -> type_t[BaseStringArray]: from pandas.core.arrays.string_arrow import ArrowStringArray, ArrowStringArrayNumpySemantics if self.storage == 'python' and self._na_value is libmissing.NA: return StringArray elif self.storage == 'pyarrow' and self._na_value is libmissing.NA: return ArrowStringArray elif self.storage == 'python': return StringArrayNumpySemantics else: return ArrowStringArrayNumpySemantics def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: storages = set() na_values = set() for dtype in dtypes: if isinstance(dtype, StringDtype): storages.add(dtype.storage) na_values.add(dtype.na_value) elif isinstance(dtype, np.dtype) and dtype.kind in ('U', 'T'): continue else: return None if len(storages) == 2: storage = 'pyarrow' else: storage = next(iter(storages)) na_value: libmissing.NAType | float if len(na_values) == 2: na_value = libmissing.NA else: na_value = next(iter(na_values)) return StringDtype(storage=storage, na_value=na_value) def __from_arrow__(self, array: pyarrow.Array | pyarrow.ChunkedArray) -> BaseStringArray: if self.storage == 'pyarrow': if self._na_value is libmissing.NA: from pandas.core.arrays.string_arrow import ArrowStringArray return ArrowStringArray(array) else: from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics return ArrowStringArrayNumpySemantics(array) else: import pyarrow if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: arr = arr.to_numpy(zero_copy_only=False) arr = ensure_string_array(arr, na_value=self.na_value) results.append(arr) if len(chunks) == 0: arr = np.array([], dtype=object) else: arr = np.concatenate(results) new_string_array = StringArray.__new__(StringArray) NDArrayBacked.__init__(new_string_array, arr, self) return new_string_array class BaseStringArray(ExtensionArray): dtype: StringDtype @doc(ExtensionArray.tolist) def tolist(self) -> list: if self.ndim > 1: return [x.tolist() for x in self] return list(self.to_numpy()) @classmethod def _from_scalars(cls, scalars, dtype: DtypeObj) -> Self: if lib.infer_dtype(scalars, skipna=True) not in ['string', 'empty']: raise ValueError return cls._from_sequence(scalars, dtype=dtype) def _str_map(self, f, na_value=None, dtype: Dtype | None=None, convert: bool=True): if self.dtype.na_value is np.nan: return self._str_map_nan_semantics(f, na_value=na_value, dtype=dtype) from pandas.arrays import BooleanArray if dtype is None: dtype = self.dtype if na_value is None: na_value = self.dtype.na_value mask = isna(self) arr = np.asarray(self) if is_integer_dtype(dtype) or is_bool_dtype(dtype): constructor: type[IntegerArray | BooleanArray] if is_integer_dtype(dtype): constructor = IntegerArray else: constructor = BooleanArray na_value_is_na = isna(na_value) if na_value_is_na: na_value = 1 elif dtype == np.dtype('bool'): na_value = bool(na_value) result = lib.map_infer_mask(arr, f, mask.view('uint8'), convert=False, na_value=na_value, dtype=np.dtype(cast(type, dtype))) if not na_value_is_na: mask[:] = False return constructor(result, mask) else: return self._str_map_str_or_object(dtype, na_value, arr, f, mask) def _str_map_str_or_object(self, dtype, na_value, arr: np.ndarray, f, mask: npt.NDArray[np.bool_]): if is_string_dtype(dtype) and (not is_object_dtype(dtype)): result = lib.map_infer_mask(arr, f, mask.view('uint8'), convert=False, na_value=na_value) if self.dtype.storage == 'pyarrow': import pyarrow as pa result = pa.array(result, mask=mask, type=pa.large_string(), from_pandas=True) return type(self)(result) else: return lib.map_infer_mask(arr, f, mask.view('uint8')) def _str_map_nan_semantics(self, f, na_value=None, dtype: Dtype | None=None): if dtype is None: dtype = self.dtype if na_value is None: na_value = self.dtype.na_value mask = isna(self) arr = np.asarray(self) if is_integer_dtype(dtype) or is_bool_dtype(dtype): na_value_is_na = isna(na_value) if na_value_is_na: if is_integer_dtype(dtype): na_value = 0 else: na_value = True result = lib.map_infer_mask(arr, f, mask.view('uint8'), convert=False, na_value=na_value, dtype=np.dtype(cast(type, dtype))) if na_value_is_na and mask.any(): if is_integer_dtype(dtype): result = result.astype('float64') else: result = result.astype('object') result[mask] = np.nan return result else: return self._str_map_str_or_object(dtype, na_value, arr, f, mask) class StringArray(BaseStringArray, NumpyExtensionArray): _typ = 'extension' _storage = 'python' _na_value: libmissing.NAType | float = libmissing.NA def __init__(self, values, copy: bool=False) -> None: values = extract_array(values) super().__init__(values, copy=copy) if not isinstance(values, type(self)): self._validate() NDArrayBacked.__init__(self, self._ndarray, StringDtype(storage=self._storage, na_value=self._na_value)) def _validate(self) -> None: if len(self._ndarray) and (not lib.is_string_array(self._ndarray, skipna=True)): raise ValueError('StringArray requires a sequence of strings or pandas.NA') if self._ndarray.dtype != 'object': raise ValueError(f"StringArray requires a sequence of strings or pandas.NA. Got '{self._ndarray.dtype}' dtype instead.") if self._ndarray.ndim > 2: lib.convert_nans_to_NA(self._ndarray.ravel('K')) else: lib.convert_nans_to_NA(self._ndarray) def _validate_scalar(self, value): if isna(value): return self.dtype.na_value elif not isinstance(value, str): raise TypeError(f"Cannot set non-string value '{value}' into a string array.") return value @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: if dtype and (not (isinstance(dtype, str) and dtype == 'string')): dtype = pandas_dtype(dtype) assert isinstance(dtype, StringDtype) and dtype.storage == 'python' elif using_string_dtype(): dtype = StringDtype(storage='python', na_value=np.nan) else: dtype = StringDtype(storage='python') from pandas.core.arrays.masked import BaseMaskedArray na_value = dtype.na_value if isinstance(scalars, BaseMaskedArray): na_values = scalars._mask result = scalars._data result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) result[na_values] = na_value else: if lib.is_pyarrow_array(scalars): scalars = np.array(scalars) result = lib.ensure_string_array(scalars, na_value=na_value, copy=copy) new_string_array = cls.__new__(cls) NDArrayBacked.__init__(new_string_array, result, dtype) return new_string_array @classmethod def _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy: bool=False) -> Self: return cls._from_sequence(strings, dtype=dtype, copy=copy) @classmethod def _empty(cls, shape, dtype) -> StringArray: values = np.empty(shape, dtype=object) values[:] = libmissing.NA return cls(values).astype(dtype, copy=False) def __arrow_array__(self, type=None): import pyarrow as pa if type is None: type = pa.string() values = self._ndarray.copy() values[self.isna()] = None return pa.array(values, type=type, from_pandas=True) def _values_for_factorize(self) -> tuple[np.ndarray, libmissing.NAType | float]: arr = self._ndarray return (arr, self.dtype.na_value) def __setitem__(self, key, value) -> None: value = extract_array(value, extract_numpy=True) if isinstance(value, type(self)): value = value._ndarray key = check_array_indexer(self, key) scalar_key = lib.is_scalar(key) scalar_value = lib.is_scalar(value) if scalar_key and (not scalar_value): raise ValueError('setting an array element with a sequence.') if scalar_value: if isna(value): value = self.dtype.na_value elif not isinstance(value, str): raise TypeError(f"Cannot set non-string value '{value}' into a StringArray.") else: if not is_array_like(value): value = np.asarray(value, dtype=object) else: value = np.asarray(value) if len(value) and (not lib.is_string_array(value, skipna=True)): raise TypeError('Must provide strings.') mask = isna(value) if mask.any(): value = value.copy() value[isna(value)] = self.dtype.na_value super().__setitem__(key, value) def _putmask(self, mask: npt.NDArray[np.bool_], value) -> None: ExtensionArray._putmask(self, mask, value) def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: if isinstance(values, BaseStringArray) or (isinstance(values, ExtensionArray) and is_string_dtype(values.dtype)): values = values.astype(self.dtype, copy=False) else: if not lib.is_string_array(np.asarray(values), skipna=True): values = np.array([val for val in values if isinstance(val, str) or isna(val)], dtype=object) if not len(values): return np.zeros(self.shape, dtype=bool) values = self._from_sequence(values, dtype=self.dtype) return isin(np.asarray(self), np.asarray(values)) def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if dtype == self.dtype: if copy: return self.copy() return self elif isinstance(dtype, IntegerDtype): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype.numpy_dtype) return IntegerArray(values, mask, copy=False) elif isinstance(dtype, FloatingDtype): arr_ea = self.copy() mask = self.isna() arr_ea[mask] = '0' values = arr_ea.astype(dtype.numpy_dtype) return FloatingArray(values, mask, copy=False) elif isinstance(dtype, ExtensionDtype): return ExtensionArray.astype(self, dtype, copy) elif np.issubdtype(dtype, np.floating): arr = self._ndarray.copy() mask = self.isna() arr[mask] = 0 values = arr.astype(dtype) values[mask] = np.nan return values return super().astype(dtype, copy) def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, axis: AxisInt | None=0, **kwargs): if self.dtype.na_value is np.nan and name in ['any', 'all']: if name == 'any': return nanops.nanany(self._ndarray, skipna=skipna) else: return nanops.nanall(self._ndarray, skipna=skipna) if name in ['min', 'max']: result = getattr(self, name)(skipna=skipna, axis=axis) if keepdims: return self._from_sequence([result], dtype=self.dtype) return result raise TypeError(f"Cannot perform reduction '{name}' with string dtype") def _wrap_reduction_result(self, axis: AxisInt | None, result) -> Any: if self.dtype.na_value is np.nan and result is libmissing.NA: return np.nan return super()._wrap_reduction_result(axis, result) def min(self, axis=None, skipna: bool=True, **kwargs) -> Scalar: nv.validate_min((), kwargs) result = masked_reductions.min(values=self.to_numpy(), mask=self.isna(), skipna=skipna) return self._wrap_reduction_result(axis, result) def max(self, axis=None, skipna: bool=True, **kwargs) -> Scalar: nv.validate_max((), kwargs) result = masked_reductions.max(values=self.to_numpy(), mask=self.isna(), skipna=skipna) return self._wrap_reduction_result(axis, result) def value_counts(self, dropna: bool=True) -> Series: from pandas.core.algorithms import value_counts_internal as value_counts result = value_counts(self._ndarray, sort=False, dropna=dropna) result.index = result.index.astype(self.dtype) if self.dtype.na_value is libmissing.NA: result = result.astype('Int64') return result def memory_usage(self, deep: bool=False) -> int: result = self._ndarray.nbytes if deep: return result + lib.memory_usage_of_objects(self._ndarray) return result @doc(ExtensionArray.searchsorted) def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: if self._hasna: raise ValueError('searchsorted requires array to be sorted, which is impossible with NAs present.') return super().searchsorted(value=value, side=side, sorter=sorter) def _cmp_method(self, other, op): from pandas.arrays import BooleanArray if isinstance(other, StringArray): other = other._ndarray mask = isna(self) | isna(other) valid = ~mask if not lib.is_scalar(other): if len(other) != len(self): raise ValueError(f'Lengths of operands do not match: {len(self)} != {len(other)}') if not is_array_like(other): other = np.asarray(other) other = other[valid] other = np.asarray(other) if op.__name__ in ops.ARITHMETIC_BINOPS: result = np.empty_like(self._ndarray, dtype='object') result[mask] = self.dtype.na_value result[valid] = op(self._ndarray[valid], other) return self._from_backing_data(result) else: result = np.zeros(len(self._ndarray), dtype='bool') result[valid] = op(self._ndarray[valid], other) res_arr = BooleanArray(result, mask) if self.dtype.na_value is np.nan: if op == operator.ne: return res_arr.to_numpy(np.bool_, na_value=True) else: return res_arr.to_numpy(np.bool_, na_value=False) return res_arr _arith_method = _cmp_method class StringArrayNumpySemantics(StringArray): _storage = 'python' _na_value = np.nan def _validate(self) -> None: if len(self._ndarray) and (not lib.is_string_array(self._ndarray, skipna=True)): raise ValueError('StringArrayNumpySemantics requires a sequence of strings or NaN') if self._ndarray.dtype != 'object': raise ValueError(f"StringArrayNumpySemantics requires a sequence of strings or NaN. Got '{self._ndarray.dtype}' dtype instead.") @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: if dtype is None: dtype = StringDtype(storage='python', na_value=np.nan) return super()._from_sequence(scalars, dtype=dtype, copy=copy) # File: pandas-main/pandas/core/arrays/string_arrow.py from __future__ import annotations import operator import re from typing import TYPE_CHECKING, Union import warnings import numpy as np from pandas._libs import lib, missing as libmissing from pandas.compat import pa_version_under10p1, pa_version_under13p0 from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_scalar, pandas_dtype from pandas.core.dtypes.missing import isna from pandas.core.arrays._arrow_string_mixins import ArrowStringArrayMixin from pandas.core.arrays.arrow import ArrowExtensionArray from pandas.core.arrays.boolean import BooleanDtype from pandas.core.arrays.floating import Float64Dtype from pandas.core.arrays.integer import Int64Dtype from pandas.core.arrays.numeric import NumericDtype from pandas.core.arrays.string_ import BaseStringArray, StringDtype from pandas.core.strings.object_array import ObjectStringArrayMixin if not pa_version_under10p1: import pyarrow as pa import pyarrow.compute as pc if TYPE_CHECKING: from collections.abc import Callable, Sequence from pandas._typing import ArrayLike, Dtype, NpDtype, Self, npt from pandas.core.dtypes.dtypes import ExtensionDtype from pandas import Series ArrowStringScalarOrNAT = Union[str, libmissing.NAType] def _chk_pyarrow_available() -> None: if pa_version_under10p1: msg = 'pyarrow>=10.0.1 is required for PyArrow backed ArrowExtensionArray.' raise ImportError(msg) class ArrowStringArray(ObjectStringArrayMixin, ArrowExtensionArray, BaseStringArray): _dtype: StringDtype _storage = 'pyarrow' _na_value: libmissing.NAType | float = libmissing.NA def __init__(self, values) -> None: _chk_pyarrow_available() if isinstance(values, (pa.Array, pa.ChunkedArray)) and (pa.types.is_string(values.type) or (pa.types.is_dictionary(values.type) and (pa.types.is_string(values.type.value_type) or pa.types.is_large_string(values.type.value_type)))): values = pc.cast(values, pa.large_string()) super().__init__(values) self._dtype = StringDtype(storage=self._storage, na_value=self._na_value) if not pa.types.is_large_string(self._pa_array.type): raise ValueError('ArrowStringArray requires a PyArrow (chunked) array of large_string type') @classmethod def _box_pa_scalar(cls, value, pa_type: pa.DataType | None=None) -> pa.Scalar: pa_scalar = super()._box_pa_scalar(value, pa_type) if pa.types.is_string(pa_scalar.type) and pa_type is None: pa_scalar = pc.cast(pa_scalar, pa.large_string()) return pa_scalar @classmethod def _box_pa_array(cls, value, pa_type: pa.DataType | None=None, copy: bool=False) -> pa.Array | pa.ChunkedArray: pa_array = super()._box_pa_array(value, pa_type) if pa.types.is_string(pa_array.type) and pa_type is None: pa_array = pc.cast(pa_array, pa.large_string()) return pa_array def __len__(self) -> int: return len(self._pa_array) @classmethod def _from_sequence(cls, scalars, *, dtype: Dtype | None=None, copy: bool=False) -> Self: from pandas.core.arrays.masked import BaseMaskedArray _chk_pyarrow_available() if dtype and (not (isinstance(dtype, str) and dtype == 'string')): dtype = pandas_dtype(dtype) assert isinstance(dtype, StringDtype) and dtype.storage == 'pyarrow' if isinstance(scalars, BaseMaskedArray): na_values = scalars._mask result = scalars._data result = lib.ensure_string_array(result, copy=copy, convert_na_value=False) return cls(pa.array(result, mask=na_values, type=pa.large_string())) elif isinstance(scalars, (pa.Array, pa.ChunkedArray)): return cls(pc.cast(scalars, pa.large_string())) result = lib.ensure_string_array(scalars, copy=copy) return cls(pa.array(result, type=pa.large_string(), from_pandas=True)) @classmethod def _from_sequence_of_strings(cls, strings, *, dtype: ExtensionDtype, copy: bool=False) -> Self: return cls._from_sequence(strings, dtype=dtype, copy=copy) @property def dtype(self) -> StringDtype: return self._dtype def insert(self, loc: int, item) -> ArrowStringArray: if self.dtype.na_value is np.nan and item is np.nan: item = libmissing.NA if not isinstance(item, str) and item is not libmissing.NA: raise TypeError('Scalar must be NA or str') return super().insert(loc, item) def _convert_bool_result(self, values): if self.dtype.na_value is np.nan: return ArrowExtensionArray(values).to_numpy(na_value=np.nan) return BooleanDtype().__from_arrow__(values) def _maybe_convert_setitem_value(self, value): if is_scalar(value): if isna(value): value = None elif not isinstance(value, str): raise TypeError('Scalar must be NA or str') else: value = np.array(value, dtype=object, copy=True) value[isna(value)] = None for v in value: if not (v is None or isinstance(v, str)): raise TypeError('Must provide strings') return super()._maybe_convert_setitem_value(value) def isin(self, values: ArrayLike) -> npt.NDArray[np.bool_]: value_set = [pa_scalar.as_py() for pa_scalar in [pa.scalar(value, from_pandas=True) for value in values] if pa_scalar.type in (pa.string(), pa.null(), pa.large_string())] if not len(value_set): return np.zeros(len(self), dtype=bool) result = pc.is_in(self._pa_array, value_set=pa.array(value_set, type=self._pa_array.type)) return np.array(result, dtype=np.bool_) def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if dtype == self.dtype: if copy: return self.copy() return self elif isinstance(dtype, NumericDtype): data = self._pa_array.cast(pa.from_numpy_dtype(dtype.numpy_dtype)) return dtype.__from_arrow__(data) elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.floating): return self.to_numpy(dtype=dtype, na_value=np.nan) return super().astype(dtype, copy=copy) _str_isalnum = ArrowStringArrayMixin._str_isalnum _str_isalpha = ArrowStringArrayMixin._str_isalpha _str_isdecimal = ArrowStringArrayMixin._str_isdecimal _str_isdigit = ArrowStringArrayMixin._str_isdigit _str_islower = ArrowStringArrayMixin._str_islower _str_isnumeric = ArrowStringArrayMixin._str_isnumeric _str_isspace = ArrowStringArrayMixin._str_isspace _str_istitle = ArrowStringArrayMixin._str_istitle _str_isupper = ArrowStringArrayMixin._str_isupper _str_map = BaseStringArray._str_map _str_startswith = ArrowStringArrayMixin._str_startswith _str_endswith = ArrowStringArrayMixin._str_endswith _str_pad = ArrowStringArrayMixin._str_pad _str_match = ArrowStringArrayMixin._str_match _str_fullmatch = ArrowStringArrayMixin._str_fullmatch _str_lower = ArrowStringArrayMixin._str_lower _str_upper = ArrowStringArrayMixin._str_upper _str_strip = ArrowStringArrayMixin._str_strip _str_lstrip = ArrowStringArrayMixin._str_lstrip _str_rstrip = ArrowStringArrayMixin._str_rstrip _str_removesuffix = ArrowStringArrayMixin._str_removesuffix _str_get = ArrowStringArrayMixin._str_get _str_capitalize = ArrowStringArrayMixin._str_capitalize _str_title = ArrowStringArrayMixin._str_title _str_swapcase = ArrowStringArrayMixin._str_swapcase _str_slice_replace = ArrowStringArrayMixin._str_slice_replace _str_len = ArrowStringArrayMixin._str_len _str_slice = ArrowStringArrayMixin._str_slice def _str_contains(self, pat, case: bool=True, flags: int=0, na=np.nan, regex: bool=True): if flags: return super()._str_contains(pat, case, flags, na, regex) if not isna(na): if not isinstance(na, bool): warnings.warn("Allowing a non-bool 'na' in obj.str.contains is deprecated and will raise in a future version.", FutureWarning, stacklevel=find_stack_level()) na = bool(na) return ArrowStringArrayMixin._str_contains(self, pat, case, flags, na, regex) def _str_replace(self, pat: str | re.Pattern, repl: str | Callable, n: int=-1, case: bool=True, flags: int=0, regex: bool=True): if isinstance(pat, re.Pattern) or callable(repl) or (not case) or flags: return super()._str_replace(pat, repl, n, case, flags, regex) return ArrowStringArrayMixin._str_replace(self, pat, repl, n, case, flags, regex) def _str_repeat(self, repeats: int | Sequence[int]): if not isinstance(repeats, int): return super()._str_repeat(repeats) else: return ArrowExtensionArray._str_repeat(self, repeats=repeats) def _str_removeprefix(self, prefix: str): if not pa_version_under13p0: return ArrowStringArrayMixin._str_removeprefix(self, prefix) return super()._str_removeprefix(prefix) def _str_count(self, pat: str, flags: int=0): if flags: return super()._str_count(pat, flags) result = pc.count_substring_regex(self._pa_array, pat) return self._convert_int_result(result) def _str_find(self, sub: str, start: int=0, end: int | None=None): if pa_version_under13p0 and (not (start != 0 and end is not None)) and (not (start == 0 and end is None)): return super()._str_find(sub, start, end) return ArrowStringArrayMixin._str_find(self, sub, start, end) def _str_get_dummies(self, sep: str='|', dtype: NpDtype | None=None): if dtype is None: dtype = np.int64 (dummies_pa, labels) = ArrowExtensionArray(self._pa_array)._str_get_dummies(sep, dtype) if len(labels) == 0: return (np.empty(shape=(0, 0), dtype=dtype), labels) dummies = np.vstack(dummies_pa.to_numpy()) _dtype = pandas_dtype(dtype) dummies_dtype: NpDtype if isinstance(_dtype, np.dtype): dummies_dtype = _dtype else: dummies_dtype = np.bool_ return (dummies.astype(dummies_dtype, copy=False), labels) def _convert_int_result(self, result): if self.dtype.na_value is np.nan: if isinstance(result, pa.Array): result = result.to_numpy(zero_copy_only=False) else: result = result.to_numpy() if result.dtype == np.int32: result = result.astype(np.int64) return result return Int64Dtype().__from_arrow__(result) def _convert_rank_result(self, result): if self.dtype.na_value is np.nan: if isinstance(result, pa.Array): result = result.to_numpy(zero_copy_only=False) else: result = result.to_numpy() return result.astype('float64', copy=False) return Float64Dtype().__from_arrow__(result) def _reduce(self, name: str, *, skipna: bool=True, keepdims: bool=False, **kwargs): if self.dtype.na_value is np.nan and name in ['any', 'all']: if not skipna: nas = pc.is_null(self._pa_array) arr = pc.or_kleene(nas, pc.not_equal(self._pa_array, '')) else: arr = pc.not_equal(self._pa_array, '') return ArrowExtensionArray(arr)._reduce(name, skipna=skipna, keepdims=keepdims, **kwargs) result = self._reduce_calc(name, skipna=skipna, keepdims=keepdims, **kwargs) if name in ('argmin', 'argmax') and isinstance(result, pa.Array): return self._convert_int_result(result) elif isinstance(result, pa.Array): return type(self)(result) else: return result def value_counts(self, dropna: bool=True) -> Series: result = super().value_counts(dropna=dropna) if self.dtype.na_value is np.nan: res_values = result._values.to_numpy() return result._constructor(res_values, index=result.index, name=result.name, copy=False) return result def _cmp_method(self, other, op): result = super()._cmp_method(other, op) if self.dtype.na_value is np.nan: if op == operator.ne: return result.to_numpy(np.bool_, na_value=True) else: return result.to_numpy(np.bool_, na_value=False) return result class ArrowStringArrayNumpySemantics(ArrowStringArray): _na_value = np.nan # File: pandas-main/pandas/core/arrays/timedeltas.py from __future__ import annotations from datetime import timedelta import operator from typing import TYPE_CHECKING, cast import numpy as np from pandas._libs import lib, tslibs from pandas._libs.tslibs import NaT, NaTType, Tick, Timedelta, astype_overflowsafe, get_supported_dtype, iNaT, is_supported_dtype, periods_per_second from pandas._libs.tslibs.conversion import cast_from_unit_vectorized from pandas._libs.tslibs.fields import get_timedelta_days, get_timedelta_field from pandas._libs.tslibs.timedeltas import array_to_timedelta64, floordiv_object_array, ints_to_pytimedelta, parse_timedelta_unit, truediv_object_array from pandas.compat.numpy import function as nv from pandas.util._validators import validate_endpoints from pandas.core.dtypes.common import TD64NS_DTYPE, is_float_dtype, is_integer_dtype, is_object_dtype, is_scalar, is_string_dtype, pandas_dtype from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import isna from pandas.core import nanops, roperator from pandas.core.array_algos import datetimelike_accumulations from pandas.core.arrays import datetimelike as dtl from pandas.core.arrays._ranges import generate_regular_range import pandas.core.common as com from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from collections.abc import Iterator from pandas._typing import AxisInt, DateTimeErrorChoices, DtypeObj, NpDtype, Self, npt from pandas import DataFrame import textwrap def _field_accessor(name: str, alias: str, docstring: str): def f(self) -> np.ndarray: values = self.asi8 if alias == 'days': result = get_timedelta_days(values, reso=self._creso) else: result = get_timedelta_field(values, alias, reso=self._creso) if self._hasna: result = self._maybe_mask_results(result, fill_value=None, convert='float64') return result f.__name__ = name f.__doc__ = f'\n{docstring}\n' return property(f) class TimedeltaArray(dtl.TimelikeOps): _typ = 'timedeltaarray' _internal_fill_value = np.timedelta64('NaT', 'ns') _recognized_scalars = (timedelta, np.timedelta64, Tick) _is_recognized_dtype = lambda x: lib.is_np_dtype(x, 'm') _infer_matches = ('timedelta', 'timedelta64') @property def _scalar_type(self) -> type[Timedelta]: return Timedelta __array_priority__ = 1000 _other_ops: list[str] = [] _bool_ops: list[str] = [] _field_ops: list[str] = ['days', 'seconds', 'microseconds', 'nanoseconds'] _datetimelike_ops: list[str] = _field_ops + _bool_ops + ['unit', 'freq'] _datetimelike_methods: list[str] = ['to_pytimedelta', 'total_seconds', 'round', 'floor', 'ceil', 'as_unit'] def _box_func(self, x: np.timedelta64) -> Timedelta | NaTType: y = x.view('i8') if y == NaT._value: return NaT return Timedelta._from_value_and_reso(y, reso=self._creso) @property def dtype(self) -> np.dtype[np.timedelta64]: return self._ndarray.dtype _freq = None @classmethod def _validate_dtype(cls, values, dtype): dtype = _validate_td64_dtype(dtype) _validate_td64_dtype(values.dtype) if dtype != values.dtype: raise ValueError('Values resolution does not match dtype.') return dtype @classmethod def _simple_new(cls, values: npt.NDArray[np.timedelta64], freq: Tick | None=None, dtype: np.dtype[np.timedelta64]=TD64NS_DTYPE) -> Self: assert lib.is_np_dtype(dtype, 'm') assert not tslibs.is_unitless(dtype) assert isinstance(values, np.ndarray), type(values) assert dtype == values.dtype assert freq is None or isinstance(freq, Tick) result = super()._simple_new(values=values, dtype=dtype) result._freq = freq return result @classmethod def _from_sequence(cls, data, *, dtype=None, copy: bool=False) -> Self: if dtype: dtype = _validate_td64_dtype(dtype) (data, freq) = sequence_to_td64ns(data, copy=copy, unit=None) if dtype is not None: data = astype_overflowsafe(data, dtype=dtype, copy=False) return cls._simple_new(data, dtype=data.dtype, freq=freq) @classmethod def _from_sequence_not_strict(cls, data, *, dtype=None, copy: bool=False, freq=lib.no_default, unit=None) -> Self: if dtype: dtype = _validate_td64_dtype(dtype) assert unit not in ['Y', 'y', 'M'] (data, inferred_freq) = sequence_to_td64ns(data, copy=copy, unit=unit) if dtype is not None: data = astype_overflowsafe(data, dtype=dtype, copy=False) result = cls._simple_new(data, dtype=data.dtype, freq=inferred_freq) result._maybe_pin_freq(freq, {}) return result @classmethod def _generate_range(cls, start, end, periods, freq, closed=None, *, unit: str | None=None) -> Self: periods = dtl.validate_periods(periods) if freq is None and any((x is None for x in [periods, start, end])): raise ValueError('Must provide freq argument if no data is supplied') if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and freq, exactly three must be specified') if start is not None: start = Timedelta(start).as_unit('ns') if end is not None: end = Timedelta(end).as_unit('ns') if unit is not None: if unit not in ['s', 'ms', 'us', 'ns']: raise ValueError("'unit' must be one of 's', 'ms', 'us', 'ns'") else: unit = 'ns' if start is not None and unit is not None: start = start.as_unit(unit, round_ok=False) if end is not None and unit is not None: end = end.as_unit(unit, round_ok=False) (left_closed, right_closed) = validate_endpoints(closed) if freq is not None: index = generate_regular_range(start, end, periods, freq, unit=unit) else: index = np.linspace(start._value, end._value, periods).astype('i8') if not left_closed: index = index[1:] if not right_closed: index = index[:-1] td64values = index.view(f'm8[{unit}]') return cls._simple_new(td64values, dtype=td64values.dtype, freq=freq) def _unbox_scalar(self, value) -> np.timedelta64: if not isinstance(value, self._scalar_type) and value is not NaT: raise ValueError("'value' should be a Timedelta.") self._check_compatible_with(value) if value is NaT: return np.timedelta64(value._value, self.unit) else: return value.as_unit(self.unit, round_ok=False).asm8 def _scalar_from_string(self, value) -> Timedelta | NaTType: return Timedelta(value) def _check_compatible_with(self, other) -> None: pass def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if lib.is_np_dtype(dtype, 'm'): if dtype == self.dtype: if copy: return self.copy() return self if is_supported_dtype(dtype): res_values = astype_overflowsafe(self._ndarray, dtype, copy=False) return type(self)._simple_new(res_values, dtype=res_values.dtype, freq=self.freq) else: raise ValueError(f"Cannot convert from {self.dtype} to {dtype}. Supported resolutions are 's', 'ms', 'us', 'ns'") return dtl.DatetimeLikeArrayMixin.astype(self, dtype, copy=copy) def __iter__(self) -> Iterator: if self.ndim > 1: for i in range(len(self)): yield self[i] else: data = self._ndarray length = len(self) chunksize = 10000 chunks = length // chunksize + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, length) converted = ints_to_pytimedelta(data[start_i:end_i], box=True) yield from converted def sum(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, keepdims: bool=False, initial=None, skipna: bool=True, min_count: int=0): nv.validate_sum((), {'dtype': dtype, 'out': out, 'keepdims': keepdims, 'initial': initial}) result = nanops.nansum(self._ndarray, axis=axis, skipna=skipna, min_count=min_count) return self._wrap_reduction_result(axis, result) def std(self, *, axis: AxisInt | None=None, dtype: NpDtype | None=None, out=None, ddof: int=1, keepdims: bool=False, skipna: bool=True): nv.validate_stat_ddof_func((), {'dtype': dtype, 'out': out, 'keepdims': keepdims}, fname='std') result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof) if axis is None or self.ndim == 1: return self._box_func(result) return self._from_backing_data(result) def _accumulate(self, name: str, *, skipna: bool=True, **kwargs): if name == 'cumsum': op = getattr(datetimelike_accumulations, name) result = op(self._ndarray.copy(), skipna=skipna, **kwargs) return type(self)._simple_new(result, freq=None, dtype=self.dtype) elif name == 'cumprod': raise TypeError('cumprod not supported for Timedelta.') else: return super()._accumulate(name, skipna=skipna, **kwargs) def _formatter(self, boxed: bool=False): from pandas.io.formats.format import get_format_timedelta64 return get_format_timedelta64(self, box=True) def _format_native_types(self, *, na_rep: str | float='NaT', date_format=None, **kwargs) -> npt.NDArray[np.object_]: from pandas.io.formats.format import get_format_timedelta64 formatter = get_format_timedelta64(self, na_rep) return np.frompyfunc(formatter, 1, 1)(self._ndarray) def _add_offset(self, other): assert not isinstance(other, Tick) raise TypeError(f'cannot add the type {type(other).__name__} to a {type(self).__name__}') @unpack_zerodim_and_defer('__mul__') def __mul__(self, other) -> Self: if is_scalar(other): result = self._ndarray * other if result.dtype.kind != 'm': raise TypeError(f'Cannot multiply with {type(other).__name__}') freq = None if self.freq is not None and (not isna(other)): freq = self.freq * other if freq.n == 0: freq = None return type(self)._simple_new(result, dtype=result.dtype, freq=freq) if not hasattr(other, 'dtype'): other = np.array(other) if len(other) != len(self) and (not lib.is_np_dtype(other.dtype, 'm')): raise ValueError('Cannot multiply with unequal lengths') if is_object_dtype(other.dtype): arr = self._ndarray result = [arr[n] * other[n] for n in range(len(self))] result = np.array(result) return type(self)._simple_new(result, dtype=result.dtype) result = self._ndarray * other if result.dtype.kind != 'm': raise TypeError(f'Cannot multiply with {type(other).__name__}') return type(self)._simple_new(result, dtype=result.dtype) __rmul__ = __mul__ def _scalar_divlike_op(self, other, op): if isinstance(other, self._recognized_scalars): other = Timedelta(other) if cast('Timedelta | NaTType', other) is NaT: res = np.empty(self.shape, dtype=np.float64) res.fill(np.nan) return res return op(self._ndarray, other) else: if op in [roperator.rtruediv, roperator.rfloordiv]: raise TypeError(f'Cannot divide {type(other).__name__} by {type(self).__name__}') result = op(self._ndarray, other) freq = None if self.freq is not None: freq = self.freq / other if freq.nanos == 0 and self.freq.nanos != 0: freq = None return type(self)._simple_new(result, dtype=result.dtype, freq=freq) def _cast_divlike_op(self, other): if not hasattr(other, 'dtype'): other = np.array(other) if len(other) != len(self): raise ValueError('Cannot divide vectors with unequal lengths') return other def _vector_divlike_op(self, other, op) -> np.ndarray | Self: result = op(self._ndarray, np.asarray(other)) if (is_integer_dtype(other.dtype) or is_float_dtype(other.dtype)) and op in [operator.truediv, operator.floordiv]: return type(self)._simple_new(result, dtype=result.dtype) if op in [operator.floordiv, roperator.rfloordiv]: mask = self.isna() | isna(other) if mask.any(): result = result.astype(np.float64) np.putmask(result, mask, np.nan) return result @unpack_zerodim_and_defer('__truediv__') def __truediv__(self, other): op = operator.truediv if is_scalar(other): return self._scalar_divlike_op(other, op) other = self._cast_divlike_op(other) if lib.is_np_dtype(other.dtype, 'm') or is_integer_dtype(other.dtype) or is_float_dtype(other.dtype): return self._vector_divlike_op(other, op) if is_object_dtype(other.dtype): other = np.asarray(other) if self.ndim > 1: res_cols = [left / right for (left, right) in zip(self, other)] res_cols2 = [x.reshape(1, -1) for x in res_cols] result = np.concatenate(res_cols2, axis=0) else: result = truediv_object_array(self._ndarray, other) return result else: return NotImplemented @unpack_zerodim_and_defer('__rtruediv__') def __rtruediv__(self, other): op = roperator.rtruediv if is_scalar(other): return self._scalar_divlike_op(other, op) other = self._cast_divlike_op(other) if lib.is_np_dtype(other.dtype, 'm'): return self._vector_divlike_op(other, op) elif is_object_dtype(other.dtype): result_list = [other[n] / self[n] for n in range(len(self))] return np.array(result_list) else: return NotImplemented @unpack_zerodim_and_defer('__floordiv__') def __floordiv__(self, other): op = operator.floordiv if is_scalar(other): return self._scalar_divlike_op(other, op) other = self._cast_divlike_op(other) if lib.is_np_dtype(other.dtype, 'm') or is_integer_dtype(other.dtype) or is_float_dtype(other.dtype): return self._vector_divlike_op(other, op) elif is_object_dtype(other.dtype): other = np.asarray(other) if self.ndim > 1: res_cols = [left // right for (left, right) in zip(self, other)] res_cols2 = [x.reshape(1, -1) for x in res_cols] result = np.concatenate(res_cols2, axis=0) else: result = floordiv_object_array(self._ndarray, other) assert result.dtype == object return result else: return NotImplemented @unpack_zerodim_and_defer('__rfloordiv__') def __rfloordiv__(self, other): op = roperator.rfloordiv if is_scalar(other): return self._scalar_divlike_op(other, op) other = self._cast_divlike_op(other) if lib.is_np_dtype(other.dtype, 'm'): return self._vector_divlike_op(other, op) elif is_object_dtype(other.dtype): result_list = [other[n] // self[n] for n in range(len(self))] result = np.array(result_list) return result else: return NotImplemented @unpack_zerodim_and_defer('__mod__') def __mod__(self, other): if isinstance(other, self._recognized_scalars): other = Timedelta(other) return self - self // other * other @unpack_zerodim_and_defer('__rmod__') def __rmod__(self, other): if isinstance(other, self._recognized_scalars): other = Timedelta(other) return other - other // self * self @unpack_zerodim_and_defer('__divmod__') def __divmod__(self, other): if isinstance(other, self._recognized_scalars): other = Timedelta(other) res1 = self // other res2 = self - res1 * other return (res1, res2) @unpack_zerodim_and_defer('__rdivmod__') def __rdivmod__(self, other): if isinstance(other, self._recognized_scalars): other = Timedelta(other) res1 = other // self res2 = other - res1 * self return (res1, res2) def __neg__(self) -> TimedeltaArray: freq = None if self.freq is not None: freq = -self.freq return type(self)._simple_new(-self._ndarray, dtype=self.dtype, freq=freq) def __pos__(self) -> TimedeltaArray: return type(self)._simple_new(self._ndarray.copy(), dtype=self.dtype, freq=self.freq) def __abs__(self) -> TimedeltaArray: return type(self)._simple_new(np.abs(self._ndarray), dtype=self.dtype) def total_seconds(self) -> npt.NDArray[np.float64]: pps = periods_per_second(self._creso) return self._maybe_mask_results(self.asi8 / pps, fill_value=None) def to_pytimedelta(self) -> npt.NDArray[np.object_]: return ints_to_pytimedelta(self._ndarray) days_docstring = textwrap.dedent('Number of days for each element.\n\n See Also\n --------\n Series.dt.seconds : Return number of seconds for each element.\n Series.dt.microseconds : Return number of microseconds for each element.\n Series.dt.nanoseconds : Return number of nanoseconds for each element.\n\n Examples\n --------\n For Series:\n\n >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit=\'D\'))\n >>> ser\n 0 1 days\n 1 2 days\n 2 3 days\n dtype: timedelta64[ns]\n >>> ser.dt.days\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n For TimedeltaIndex:\n\n >>> tdelta_idx = pd.to_timedelta(["0 days", "10 days", "20 days"])\n >>> tdelta_idx\n TimedeltaIndex([\'0 days\', \'10 days\', \'20 days\'],\n dtype=\'timedelta64[ns]\', freq=None)\n >>> tdelta_idx.days\n Index([0, 10, 20], dtype=\'int64\')') days = _field_accessor('days', 'days', days_docstring) seconds_docstring = textwrap.dedent("Number of seconds (>= 0 and less than 1 day) for each element.\n\n See Also\n --------\n Series.dt.seconds : Return number of seconds for each element.\n Series.dt.nanoseconds : Return number of nanoseconds for each element.\n\n Examples\n --------\n For Series:\n\n >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='s'))\n >>> ser\n 0 0 days 00:00:01\n 1 0 days 00:00:02\n 2 0 days 00:00:03\n dtype: timedelta64[ns]\n >>> ser.dt.seconds\n 0 1\n 1 2\n 2 3\n dtype: int32\n\n For TimedeltaIndex:\n\n >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='s')\n >>> tdelta_idx\n TimedeltaIndex(['0 days 00:00:01', '0 days 00:00:02', '0 days 00:00:03'],\n dtype='timedelta64[ns]', freq=None)\n >>> tdelta_idx.seconds\n Index([1, 2, 3], dtype='int32')") seconds = _field_accessor('seconds', 'seconds', seconds_docstring) microseconds_docstring = textwrap.dedent("Number of microseconds (>= 0 and less than 1 second) for each element.\n\n See Also\n --------\n pd.Timedelta.microseconds : Number of microseconds (>= 0 and less than 1 second).\n pd.Timedelta.to_pytimedelta.microseconds : Number of microseconds (>= 0 and less\n than 1 second) of a datetime.timedelta.\n\n Examples\n --------\n For Series:\n\n >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='us'))\n >>> ser\n 0 0 days 00:00:00.000001\n 1 0 days 00:00:00.000002\n 2 0 days 00:00:00.000003\n dtype: timedelta64[ns]\n >>> ser.dt.microseconds\n 0 1\n 1 2\n 2 3\n dtype: int32\n\n For TimedeltaIndex:\n\n >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='us')\n >>> tdelta_idx\n TimedeltaIndex(['0 days 00:00:00.000001', '0 days 00:00:00.000002',\n '0 days 00:00:00.000003'],\n dtype='timedelta64[ns]', freq=None)\n >>> tdelta_idx.microseconds\n Index([1, 2, 3], dtype='int32')") microseconds = _field_accessor('microseconds', 'microseconds', microseconds_docstring) nanoseconds_docstring = textwrap.dedent("Number of nanoseconds (>= 0 and less than 1 microsecond) for each element.\n\n See Also\n --------\n Series.dt.seconds : Return number of seconds for each element.\n Series.dt.microseconds : Return number of nanoseconds for each element.\n\n Examples\n --------\n For Series:\n\n >>> ser = pd.Series(pd.to_timedelta([1, 2, 3], unit='ns'))\n >>> ser\n 0 0 days 00:00:00.000000001\n 1 0 days 00:00:00.000000002\n 2 0 days 00:00:00.000000003\n dtype: timedelta64[ns]\n >>> ser.dt.nanoseconds\n 0 1\n 1 2\n 2 3\n dtype: int32\n\n For TimedeltaIndex:\n\n >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit='ns')\n >>> tdelta_idx\n TimedeltaIndex(['0 days 00:00:00.000000001', '0 days 00:00:00.000000002',\n '0 days 00:00:00.000000003'],\n dtype='timedelta64[ns]', freq=None)\n >>> tdelta_idx.nanoseconds\n Index([1, 2, 3], dtype='int32')") nanoseconds = _field_accessor('nanoseconds', 'nanoseconds', nanoseconds_docstring) @property def components(self) -> DataFrame: from pandas import DataFrame columns = ['days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds'] hasnans = self._hasna if hasnans: def f(x): if isna(x): return [np.nan] * len(columns) return x.components else: def f(x): return x.components result = DataFrame([f(x) for x in self], columns=columns) if not hasnans: result = result.astype('int64') return result def sequence_to_td64ns(data, copy: bool=False, unit=None, errors: DateTimeErrorChoices='raise') -> tuple[np.ndarray, Tick | None]: assert unit not in ['Y', 'y', 'M'] inferred_freq = None if unit is not None: unit = parse_timedelta_unit(unit) (data, copy) = dtl.ensure_arraylike_for_datetimelike(data, copy, cls_name='TimedeltaArray') if isinstance(data, TimedeltaArray): inferred_freq = data.freq if data.dtype == object or is_string_dtype(data.dtype): data = _objects_to_td64ns(data, unit=unit, errors=errors) copy = False elif is_integer_dtype(data.dtype): (data, copy_made) = _ints_to_td64ns(data, unit=unit) copy = copy and (not copy_made) elif is_float_dtype(data.dtype): if isinstance(data.dtype, ExtensionDtype): mask = data._mask data = data._data else: mask = np.isnan(data) data = cast_from_unit_vectorized(data, unit or 'ns') data[mask] = iNaT data = data.view('m8[ns]') copy = False elif lib.is_np_dtype(data.dtype, 'm'): if not is_supported_dtype(data.dtype): new_dtype = get_supported_dtype(data.dtype) data = astype_overflowsafe(data, dtype=new_dtype, copy=False) copy = False else: raise TypeError(f'dtype {data.dtype} cannot be converted to timedelta64[ns]') if not copy: data = np.asarray(data) else: data = np.array(data, copy=copy) assert data.dtype.kind == 'm' assert data.dtype != 'm8' return (data, inferred_freq) def _ints_to_td64ns(data, unit: str='ns') -> tuple[np.ndarray, bool]: copy_made = False unit = unit if unit is not None else 'ns' if data.dtype != np.int64: data = data.astype(np.int64) copy_made = True if unit != 'ns': dtype_str = f'timedelta64[{unit}]' data = data.view(dtype_str) data = astype_overflowsafe(data, dtype=TD64NS_DTYPE) copy_made = True else: data = data.view('timedelta64[ns]') return (data, copy_made) def _objects_to_td64ns(data, unit=None, errors: DateTimeErrorChoices='raise') -> np.ndarray: values = np.asarray(data, dtype=np.object_) result = array_to_timedelta64(values, unit=unit, errors=errors) return result.view('timedelta64[ns]') def _validate_td64_dtype(dtype) -> DtypeObj: dtype = pandas_dtype(dtype) if dtype == np.dtype('m8'): msg = "Passing in 'timedelta' dtype with no precision is not allowed. Please pass in 'timedelta64[ns]' instead." raise ValueError(msg) if not lib.is_np_dtype(dtype, 'm'): raise ValueError(f"dtype '{dtype}' is invalid, should be np.timedelta64 dtype") elif not is_supported_dtype(dtype): raise ValueError("Supported timedelta64 resolutions are 's', 'ms', 'us', 'ns'") return dtype # File: pandas-main/pandas/core/base.py """""" from __future__ import annotations import textwrap from typing import TYPE_CHECKING, Any, Generic, Literal, cast, final, overload import numpy as np from pandas._libs import lib from pandas._typing import AxisInt, DtypeObj, IndexLabel, NDFrameT, Self, Shape, npt from pandas.compat import PYPY from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.cast import can_hold_element from pandas.core.dtypes.common import is_object_dtype, is_scalar from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCSeries from pandas.core.dtypes.missing import isna, remove_na_arraylike from pandas.core import algorithms, nanops, ops from pandas.core.accessor import DirNamesMixin from pandas.core.arraylike import OpsMixin from pandas.core.arrays import ExtensionArray from pandas.core.construction import ensure_wrapped_if_datetimelike, extract_array if TYPE_CHECKING: from collections.abc import Hashable, Iterator from pandas._typing import DropKeep, NumpySorter, NumpyValueArrayLike, ScalarLike_co from pandas import DataFrame, Index, Series _shared_docs: dict[str, str] = {} class PandasObject(DirNamesMixin): _cache: dict[str, Any] @property def _constructor(self) -> type[Self]: return type(self) def __repr__(self) -> str: return object.__repr__(self) def _reset_cache(self, key: str | None=None) -> None: if not hasattr(self, '_cache'): return if key is None: self._cache.clear() else: self._cache.pop(key, None) def __sizeof__(self) -> int: memory_usage = getattr(self, 'memory_usage', None) if memory_usage: mem = memory_usage(deep=True) return int(mem if is_scalar(mem) else mem.sum()) return super().__sizeof__() class NoNewAttributesMixin: def _freeze(self) -> None: object.__setattr__(self, '__frozen', True) def __setattr__(self, key: str, value) -> None: if getattr(self, '__frozen', False) and (not (key == '_cache' or key in type(self).__dict__ or getattr(self, key, None) is not None)): raise AttributeError(f"You cannot add any new attribute '{key}'") object.__setattr__(self, key, value) class SelectionMixin(Generic[NDFrameT]): obj: NDFrameT _selection: IndexLabel | None = None exclusions: frozenset[Hashable] _internal_names = ['_cache', '__setstate__'] _internal_names_set = set(_internal_names) @final @property def _selection_list(self): if not isinstance(self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)): return [self._selection] return self._selection @cache_readonly def _selected_obj(self): if self._selection is None or isinstance(self.obj, ABCSeries): return self.obj else: return self.obj[self._selection] @final @cache_readonly def ndim(self) -> int: return self._selected_obj.ndim @final @cache_readonly def _obj_with_exclusions(self): if isinstance(self.obj, ABCSeries): return self.obj if self._selection is not None: return self.obj[self._selection_list] if len(self.exclusions) > 0: return self.obj._drop_axis(self.exclusions, axis=1, only_slice=True) else: return self.obj def __getitem__(self, key): if self._selection is not None: raise IndexError(f'Column(s) {self._selection} already selected') if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)): if len(self.obj.columns.intersection(key)) != len(set(key)): bad_keys = list(set(key).difference(self.obj.columns)) raise KeyError(f'Columns not found: {str(bad_keys)[1:-1]}') return self._gotitem(list(key), ndim=2) else: if key not in self.obj: raise KeyError(f'Column not found: {key}') ndim = self.obj[key].ndim return self._gotitem(key, ndim=ndim) def _gotitem(self, key, ndim: int, subset=None): raise AbstractMethodError(self) @final def _infer_selection(self, key, subset: Series | DataFrame): selection = None if subset.ndim == 2 and (lib.is_scalar(key) and key in subset or lib.is_list_like(key)): selection = key elif subset.ndim == 1 and lib.is_scalar(key) and (key == subset.name): selection = key return selection def aggregate(self, func, *args, **kwargs): raise AbstractMethodError(self) agg = aggregate class IndexOpsMixin(OpsMixin): __array_priority__ = 1000 _hidden_attrs: frozenset[str] = frozenset(['tolist']) @property def dtype(self) -> DtypeObj: raise AbstractMethodError(self) @property def _values(self) -> ExtensionArray | np.ndarray: raise AbstractMethodError(self) @final def transpose(self, *args, **kwargs) -> Self: nv.validate_transpose(args, kwargs) return self T = property(transpose, doc="\n Return the transpose, which is by definition self.\n\n See Also\n --------\n Index : Immutable sequence used for indexing and alignment.\n\n Examples\n --------\n For Series:\n\n >>> s = pd.Series(['Ant', 'Bear', 'Cow'])\n >>> s\n 0 Ant\n 1 Bear\n 2 Cow\n dtype: object\n >>> s.T\n 0 Ant\n 1 Bear\n 2 Cow\n dtype: object\n\n For Index:\n\n >>> idx = pd.Index([1, 2, 3])\n >>> idx.T\n Index([1, 2, 3], dtype='int64')\n ") @property def shape(self) -> Shape: return self._values.shape def __len__(self) -> int: raise AbstractMethodError(self) @property def ndim(self) -> Literal[1]: return 1 @final def item(self): if len(self) == 1: return next(iter(self)) raise ValueError('can only convert an array of size 1 to a Python scalar') @property def nbytes(self) -> int: return self._values.nbytes @property def size(self) -> int: return len(self._values) @property def array(self) -> ExtensionArray: raise AbstractMethodError(self) def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default, **kwargs) -> np.ndarray: if isinstance(self.dtype, ExtensionDtype): return self.array.to_numpy(dtype, copy=copy, na_value=na_value, **kwargs) elif kwargs: bad_keys = next(iter(kwargs.keys())) raise TypeError(f"to_numpy() got an unexpected keyword argument '{bad_keys}'") fillna = na_value is not lib.no_default and (not (na_value is np.nan and np.issubdtype(self.dtype, np.floating))) values = self._values if fillna and self.hasnans: if not can_hold_element(values, na_value): values = np.asarray(values, dtype=dtype) else: values = values.copy() values[np.asanyarray(isna(self))] = na_value result = np.asarray(values, dtype=dtype) if copy and (not fillna) or not copy: if np.shares_memory(self._values[:2], result[:2]): if not copy: result = result.view() result.flags.writeable = False else: result = result.copy() return result @final @property def empty(self) -> bool: return not self.size @doc(op='max', oppose='min', value='largest') def argmax(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs) -> int: delegate = self._values nv.validate_minmax_axis(axis) skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): return delegate.argmax(skipna=skipna) else: result = nanops.nanargmax(delegate, skipna=skipna) return result @doc(argmax, op='min', oppose='max', value='smallest') def argmin(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs) -> int: delegate = self._values nv.validate_minmax_axis(axis) skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs) if isinstance(delegate, ExtensionArray): return delegate.argmin(skipna=skipna) else: result = nanops.nanargmin(delegate, skipna=skipna) return result def tolist(self) -> list: return self._values.tolist() to_list = tolist def __iter__(self) -> Iterator: if not isinstance(self._values, np.ndarray): return iter(self._values) else: return map(self._values.item, range(self._values.size)) @cache_readonly def hasnans(self) -> bool: return bool(isna(self).any()) @final def _map_values(self, mapper, na_action=None): arr = self._values if isinstance(arr, ExtensionArray): return arr.map(mapper, na_action=na_action) return algorithms.map_array(arr, mapper, na_action=na_action) def value_counts(self, normalize: bool=False, sort: bool=True, ascending: bool=False, bins=None, dropna: bool=True) -> Series: return algorithms.value_counts_internal(self, sort=sort, ascending=ascending, normalize=normalize, bins=bins, dropna=dropna) def unique(self): values = self._values if not isinstance(values, np.ndarray): result = values.unique() else: result = algorithms.unique1d(values) return result @final def nunique(self, dropna: bool=True) -> int: uniqs = self.unique() if dropna: uniqs = remove_na_arraylike(uniqs) return len(uniqs) @property def is_unique(self) -> bool: return self.nunique(dropna=False) == len(self) @property def is_monotonic_increasing(self) -> bool: from pandas import Index return Index(self).is_monotonic_increasing @property def is_monotonic_decreasing(self) -> bool: from pandas import Index return Index(self).is_monotonic_decreasing @final def _memory_usage(self, deep: bool=False) -> int: if hasattr(self.array, 'memory_usage'): return self.array.memory_usage(deep=deep) v = self.array.nbytes if deep and is_object_dtype(self.dtype) and (not PYPY): values = cast(np.ndarray, self._values) v += lib.memory_usage_of_objects(values) return v @doc(algorithms.factorize, values='', order='', size_hint='', sort=textwrap.dedent(' sort : bool, default False\n Sort `uniques` and shuffle `codes` to maintain the\n relationship.\n ')) def factorize(self, sort: bool=False, use_na_sentinel: bool=True) -> tuple[npt.NDArray[np.intp], Index]: (codes, uniques) = algorithms.factorize(self._values, sort=sort, use_na_sentinel=use_na_sentinel) if uniques.dtype == np.float16: uniques = uniques.astype(np.float32) if isinstance(self, ABCIndex): uniques = self._constructor(uniques) else: from pandas import Index uniques = Index(uniques) return (codes, uniques) _shared_docs['searchsorted'] = "\n Find indices where elements should be inserted to maintain order.\n\n Find the indices into a sorted {klass} `self` such that, if the\n corresponding elements in `value` were inserted before the indices,\n the order of `self` would be preserved.\n\n .. note::\n\n The {klass} *must* be monotonically sorted, otherwise\n wrong locations will likely be returned. Pandas does *not*\n check this for you.\n\n Parameters\n ----------\n value : array-like or scalar\n Values to insert into `self`.\n side : {{'left', 'right'}}, optional\n If 'left', the index of the first suitable location found is given.\n If 'right', return the last such index. If there is no suitable\n index, return either 0 or N (where N is the length of `self`).\n sorter : 1-D array-like, optional\n Optional array of integer indices that sort `self` into ascending\n order. They are typically the result of ``np.argsort``.\n\n Returns\n -------\n int or array of int\n A scalar or array of insertion points with the\n same shape as `value`.\n\n See Also\n --------\n sort_values : Sort by the values along either axis.\n numpy.searchsorted : Similar method from NumPy.\n\n Notes\n -----\n Binary search is used to find the required insertion points.\n\n Examples\n --------\n >>> ser = pd.Series([1, 2, 3])\n >>> ser\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> ser.searchsorted(4)\n 3\n\n >>> ser.searchsorted([0, 4])\n array([0, 3])\n\n >>> ser.searchsorted([1, 3], side='left')\n array([0, 2])\n\n >>> ser.searchsorted([1, 3], side='right')\n array([1, 3])\n\n >>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))\n >>> ser\n 0 2000-03-11\n 1 2000-03-12\n 2 2000-03-13\n dtype: datetime64[s]\n\n >>> ser.searchsorted('3/14/2000')\n 3\n\n >>> ser = pd.Categorical(\n ... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True\n ... )\n >>> ser\n ['apple', 'bread', 'bread', 'cheese', 'milk']\n Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']\n\n >>> ser.searchsorted('bread')\n 1\n\n >>> ser.searchsorted(['bread'], side='right')\n array([3])\n\n If the values are not monotonically sorted, wrong locations\n may be returned:\n\n >>> ser = pd.Series([2, 1, 3])\n >>> ser\n 0 2\n 1 1\n 2 3\n dtype: int64\n\n >>> ser.searchsorted(1) # doctest: +SKIP\n 0 # wrong result, correct would be 1\n " @overload def searchsorted(self, value: ScalarLike_co, side: Literal['left', 'right']=..., sorter: NumpySorter=...) -> np.intp: ... @overload def searchsorted(self, value: npt.ArrayLike | ExtensionArray, side: Literal['left', 'right']=..., sorter: NumpySorter=...) -> npt.NDArray[np.intp]: ... @doc(_shared_docs['searchsorted'], klass='Index') def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: if isinstance(value, ABCDataFrame): msg = f'Value must be 1-D array-like or scalar, {type(value).__name__} is not supported' raise ValueError(msg) values = self._values if not isinstance(values, np.ndarray): return values.searchsorted(value, side=side, sorter=sorter) return algorithms.searchsorted(values, value, side=side, sorter=sorter) def drop_duplicates(self, *, keep: DropKeep='first') -> Self: duplicated = self._duplicated(keep=keep) return self[~duplicated] @final def _duplicated(self, keep: DropKeep='first') -> npt.NDArray[np.bool_]: arr = self._values if isinstance(arr, ExtensionArray): return arr.duplicated(keep=keep) return algorithms.duplicated(arr, keep=keep) def _arith_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape) rvalues = ensure_wrapped_if_datetimelike(rvalues) if isinstance(rvalues, range): rvalues = np.arange(rvalues.start, rvalues.stop, rvalues.step) with np.errstate(all='ignore'): result = ops.arithmetic_op(lvalues, rvalues, op) return self._construct_result(result, name=res_name) def _construct_result(self, result, name): raise AbstractMethodError(self) # File: pandas-main/pandas/core/common.py """""" from __future__ import annotations import builtins from collections import abc, defaultdict from collections.abc import Callable, Collection, Generator, Hashable, Iterable, Sequence import contextlib from functools import partial import inspect from typing import TYPE_CHECKING, Any, TypeVar, cast, overload import warnings import numpy as np from pandas._libs import lib from pandas.compat.numpy import np_version_gte1p24 from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike from pandas.core.dtypes.common import is_bool_dtype, is_integer from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries from pandas.core.dtypes.inference import iterable_not_string if TYPE_CHECKING: from pandas._typing import AnyArrayLike, ArrayLike, Concatenate, NpDtype, P, RandomState, T from pandas import Index def flatten(line): for element in line: if iterable_not_string(element): yield from flatten(element) else: yield element def consensus_name_attr(objs): name = objs[0].name for obj in objs[1:]: try: if obj.name != name: name = None except ValueError: name = None return name def is_bool_indexer(key: Any) -> bool: if isinstance(key, (ABCSeries, np.ndarray, ABCIndex, ABCExtensionArray)) and (not isinstance(key, ABCMultiIndex)): if key.dtype == np.object_: key_array = np.asarray(key) if not lib.is_bool_array(key_array): na_msg = 'Cannot mask with non-boolean array containing NA / NaN values' if lib.is_bool_array(key_array, skipna=True): raise ValueError(na_msg) return False return True elif is_bool_dtype(key.dtype): return True elif isinstance(key, list): if len(key) > 0: if type(key) is not list: key = list(key) return lib.is_bool_list(key) return False def cast_scalar_indexer(val): if lib.is_float(val) and val.is_integer(): raise IndexError('Indexing with a float is no longer supported. Manually convert to an integer key instead.') return val def not_none(*args): return (arg for arg in args if arg is not None) def any_none(*args) -> bool: return any((arg is None for arg in args)) def all_none(*args) -> bool: return all((arg is None for arg in args)) def any_not_none(*args) -> bool: return any((arg is not None for arg in args)) def all_not_none(*args) -> bool: return all((arg is not None for arg in args)) def count_not_none(*args) -> int: return sum((x is not None for x in args)) @overload def asarray_tuplesafe(values: ArrayLike | list | tuple | zip, dtype: NpDtype | None=...) -> np.ndarray: ... @overload def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None=...) -> ArrayLike: ... def asarray_tuplesafe(values: Iterable, dtype: NpDtype | None=None) -> ArrayLike: if not (isinstance(values, (list, tuple)) or hasattr(values, '__array__')): values = list(values) elif isinstance(values, ABCIndex): return values._values elif isinstance(values, ABCSeries): return values._values if isinstance(values, list) and dtype in [np.object_, object]: return construct_1d_object_array_from_listlike(values) try: with warnings.catch_warnings(): if not np_version_gte1p24: warnings.simplefilter('ignore', np.VisibleDeprecationWarning) result = np.asarray(values, dtype=dtype) except ValueError: return construct_1d_object_array_from_listlike(values) if issubclass(result.dtype.type, str): result = np.asarray(values, dtype=object) if result.ndim == 2: values = [tuple(x) for x in values] result = construct_1d_object_array_from_listlike(values) return result def index_labels_to_array(labels: np.ndarray | Iterable, dtype: NpDtype | None=None) -> np.ndarray: if isinstance(labels, (str, tuple)): labels = [labels] if not isinstance(labels, (list, np.ndarray)): try: labels = list(labels) except TypeError: labels = [labels] labels = asarray_tuplesafe(labels, dtype=dtype) return labels def maybe_make_list(obj): if obj is not None and (not isinstance(obj, (tuple, list))): return [obj] return obj def maybe_iterable_to_list(obj: Iterable[T] | T) -> Collection[T] | T: if isinstance(obj, abc.Iterable) and (not isinstance(obj, abc.Sized)): return list(obj) obj = cast(Collection, obj) return obj def is_null_slice(obj) -> bool: return isinstance(obj, slice) and obj.start is None and (obj.stop is None) and (obj.step is None) def is_empty_slice(obj) -> bool: return isinstance(obj, slice) and obj.start is not None and (obj.stop is not None) and (obj.start == obj.stop) def is_true_slices(line: abc.Iterable) -> abc.Generator[bool, None, None]: for k in line: yield (isinstance(k, slice) and (not is_null_slice(k))) def is_full_slice(obj, line: int) -> bool: return isinstance(obj, slice) and obj.start == 0 and (obj.stop == line) and (obj.step is None) def get_callable_name(obj): if hasattr(obj, '__name__'): return getattr(obj, '__name__') if isinstance(obj, partial): return get_callable_name(obj.func) if callable(obj): return type(obj).__name__ return None def apply_if_callable(maybe_callable, obj, **kwargs): if callable(maybe_callable): return maybe_callable(obj, **kwargs) return maybe_callable def standardize_mapping(into): if not inspect.isclass(into): if isinstance(into, defaultdict): return partial(defaultdict, into.default_factory) into = type(into) if not issubclass(into, abc.Mapping): raise TypeError(f'unsupported type: {into}') if into == defaultdict: raise TypeError('to_dict() only accepts initialized defaultdicts') return into @overload def random_state(state: np.random.Generator) -> np.random.Generator: ... @overload def random_state(state: int | np.ndarray | np.random.BitGenerator | np.random.RandomState | None) -> np.random.RandomState: ... def random_state(state: RandomState | None=None): if is_integer(state) or isinstance(state, (np.ndarray, np.random.BitGenerator)): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): return state elif isinstance(state, np.random.Generator): return state elif state is None: return np.random else: raise ValueError('random_state must be an integer, array-like, a BitGenerator, Generator, a numpy RandomState, or None') _T = TypeVar('_T') @overload def pipe(obj: _T, func: Callable[Concatenate[_T, P], T], *args: P.args, **kwargs: P.kwargs) -> T: ... @overload def pipe(obj: Any, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: ... def pipe(obj: _T, func: Callable[Concatenate[_T, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: if isinstance(func, tuple): (func_, target) = func if target in kwargs: msg = f'{target} is both the pipe target and a keyword argument' raise ValueError(msg) kwargs[target] = obj return func_(*args, **kwargs) else: return func(obj, *args, **kwargs) def get_rename_function(mapper): def f(x): if x in mapper: return mapper[x] else: return x return f if isinstance(mapper, (abc.Mapping, ABCSeries)) else mapper def convert_to_list_like(values: Hashable | Iterable | AnyArrayLike) -> list | AnyArrayLike: if isinstance(values, (list, np.ndarray, ABCIndex, ABCSeries, ABCExtensionArray)): return values elif isinstance(values, abc.Iterable) and (not isinstance(values, str)): return list(values) return [values] @contextlib.contextmanager def temp_setattr(obj, attr: str, value, condition: bool=True) -> Generator[None, None, None]: if condition: old_value = getattr(obj, attr) setattr(obj, attr, value) try: yield obj finally: if condition: setattr(obj, attr, old_value) def require_length_match(data, index: Index) -> None: if len(data) != len(index): raise ValueError(f'Length of values ({len(data)}) does not match length of index ({len(index)})') _cython_table = {builtins.sum: 'sum', builtins.max: 'max', builtins.min: 'min', np.all: 'all', np.any: 'any', np.sum: 'sum', np.nansum: 'sum', np.mean: 'mean', np.nanmean: 'mean', np.prod: 'prod', np.nanprod: 'prod', np.std: 'std', np.nanstd: 'std', np.var: 'var', np.nanvar: 'var', np.median: 'median', np.nanmedian: 'median', np.max: 'max', np.nanmax: 'max', np.min: 'min', np.nanmin: 'min', np.cumprod: 'cumprod', np.nancumprod: 'cumprod', np.cumsum: 'cumsum', np.nancumsum: 'cumsum'} def get_cython_func(arg: Callable) -> str | None: return _cython_table.get(arg) def fill_missing_names(names: Sequence[Hashable | None]) -> list[Hashable]: return [f'level_{i}' if name is None else name for (i, name) in enumerate(names)] # File: pandas-main/pandas/core/computation/align.py """""" from __future__ import annotations from functools import partial, wraps from typing import TYPE_CHECKING import warnings import numpy as np from pandas._config.config import get_option from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation.common import result_type_many if TYPE_CHECKING: from collections.abc import Callable, Sequence from pandas._typing import F from pandas.core.generic import NDFrame from pandas.core.indexes.api import Index def _align_core_single_unary_op(term) -> tuple[partial | type[NDFrame], dict[str, Index] | None]: typ: partial | type[NDFrame] axes: dict[str, Index] | None = None if isinstance(term.value, np.ndarray): typ = partial(np.asanyarray, dtype=term.value.dtype) else: typ = type(term.value) if hasattr(term.value, 'axes'): axes = _zip_axes_from_type(typ, term.value.axes) return (typ, axes) def _zip_axes_from_type(typ: type[NDFrame], new_axes: Sequence[Index]) -> dict[str, Index]: return {name: new_axes[i] for (i, name) in enumerate(typ._AXIS_ORDERS)} def _any_pandas_objects(terms) -> bool: return any((isinstance(term.value, PandasObject) for term in terms)) def _filter_special_cases(f) -> Callable[[F], F]: @wraps(f) def wrapper(terms): if len(terms) == 1: return _align_core_single_unary_op(terms[0]) term_values = (term.value for term in terms) if not _any_pandas_objects(terms): return (result_type_many(*term_values), None) return f(terms) return wrapper @_filter_special_cases def _align_core(terms): term_index = [i for (i, term) in enumerate(terms) if hasattr(term.value, 'axes')] term_dims = [terms[i].value.ndim for i in term_index] from pandas import Series ndims = Series(dict(zip(term_index, term_dims))) biggest = terms[ndims.idxmax()].value typ = biggest._constructor axes = biggest.axes naxes = len(axes) gt_than_one_axis = naxes > 1 for value in (terms[i].value for i in term_index): is_series = isinstance(value, ABCSeries) is_series_and_gt_one_axis = is_series and gt_than_one_axis for (axis, items) in enumerate(value.axes): if is_series_and_gt_one_axis: (ax, itm) = (naxes - 1, value.index) else: (ax, itm) = (axis, items) if not axes[ax].is_(itm): axes[ax] = axes[ax].union(itm) for (i, ndim) in ndims.items(): for (axis, items) in zip(range(ndim), axes): ti = terms[i].value if hasattr(ti, 'reindex'): transpose = isinstance(ti, ABCSeries) and naxes > 1 reindexer = axes[naxes - 1] if transpose else items term_axis_size = len(ti.axes[axis]) reindexer_size = len(reindexer) ordm = np.log10(max(1, abs(reindexer_size - term_axis_size))) if get_option('performance_warnings') and ordm >= 1 and (reindexer_size >= 10000): w = f'Alignment difference on axis {axis} is larger than an order of magnitude on term {terms[i].name!r}, by more than {ordm:.4g}; performance may suffer.' warnings.warn(w, category=PerformanceWarning, stacklevel=find_stack_level()) obj = ti.reindex(reindexer, axis=axis) terms[i].update(obj) terms[i].update(terms[i].value.values) return (typ, _zip_axes_from_type(typ, axes)) def align_terms(terms): try: terms = list(com.flatten(terms)) except TypeError: if isinstance(terms.value, (ABCSeries, ABCDataFrame)): typ = type(terms.value) name = terms.value.name if isinstance(terms.value, ABCSeries) else None return (typ, _zip_axes_from_type(typ, terms.value.axes), name) return (np.result_type(terms.type), None, None) if all((term.is_scalar for term in terms)): return (result_type_many(*(term.value for term in terms)).type, None, None) names = {term.value.name for term in terms if isinstance(term.value, ABCSeries)} name = names.pop() if len(names) == 1 else None (typ, axes) = _align_core(terms) return (typ, axes, name) def reconstruct_object(typ, obj, axes, dtype, name): try: typ = typ.type except AttributeError: pass res_t = np.result_type(obj.dtype, dtype) if not isinstance(typ, partial) and issubclass(typ, PandasObject): if name is None: return typ(obj, dtype=res_t, **axes) return typ(obj, dtype=res_t, name=name, **axes) if hasattr(res_t, 'type') and typ == np.bool_ and (res_t != np.bool_): ret_value = res_t.type(obj) else: ret_value = typ(obj).astype(res_t) if len(obj.shape) == 1 and len(obj) == 1 and (not isinstance(ret_value, np.ndarray)): ret_value = np.array([ret_value]).astype(res_t) return ret_value # File: pandas-main/pandas/core/computation/common.py from __future__ import annotations from functools import reduce import numpy as np from pandas._config import get_option def ensure_decoded(s) -> str: if isinstance(s, (np.bytes_, bytes)): s = s.decode(get_option('display.encoding')) return s def result_type_many(*arrays_and_dtypes): try: return np.result_type(*arrays_and_dtypes) except ValueError: return reduce(np.result_type, arrays_and_dtypes) except TypeError: from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import is_extension_array_dtype arr_and_dtypes = list(arrays_and_dtypes) (ea_dtypes, non_ea_dtypes) = ([], []) for arr_or_dtype in arr_and_dtypes: if is_extension_array_dtype(arr_or_dtype): ea_dtypes.append(arr_or_dtype) else: non_ea_dtypes.append(arr_or_dtype) if non_ea_dtypes: try: np_dtype = np.result_type(*non_ea_dtypes) except ValueError: np_dtype = reduce(np.result_type, arrays_and_dtypes) return find_common_type(ea_dtypes + [np_dtype]) return find_common_type(ea_dtypes) # File: pandas-main/pandas/core/computation/engines.py """""" from __future__ import annotations import abc from typing import TYPE_CHECKING from pandas.errors import NumExprClobberingError from pandas.core.computation.align import align_terms, reconstruct_object from pandas.core.computation.ops import MATHOPS, REDUCTIONS from pandas.io.formats import printing if TYPE_CHECKING: from pandas.core.computation.expr import Expr _ne_builtins = frozenset(MATHOPS + REDUCTIONS) def _check_ne_builtin_clash(expr: Expr) -> None: names = expr.names overlap = names & _ne_builtins if overlap: s = ', '.join([repr(x) for x in overlap]) raise NumExprClobberingError(f'Variables in expression "{expr}" overlap with builtins: ({s})') class AbstractEngine(metaclass=abc.ABCMeta): has_neg_frac = False def __init__(self, expr) -> None: self.expr = expr self.aligned_axes = None self.result_type = None self.result_name = None def convert(self) -> str: return printing.pprint_thing(self.expr) def evaluate(self) -> object: if not self._is_aligned: (self.result_type, self.aligned_axes, self.result_name) = align_terms(self.expr.terms) res = self._evaluate() return reconstruct_object(self.result_type, res, self.aligned_axes, self.expr.terms.return_type, self.result_name) @property def _is_aligned(self) -> bool: return self.aligned_axes is not None and self.result_type is not None @abc.abstractmethod def _evaluate(self): class NumExprEngine(AbstractEngine): has_neg_frac = True def _evaluate(self): import numexpr as ne s = self.convert() env = self.expr.env scope = env.full_scope _check_ne_builtin_clash(self.expr) return ne.evaluate(s, local_dict=scope) class PythonEngine(AbstractEngine): has_neg_frac = False def evaluate(self): return self.expr() def _evaluate(self) -> None: pass ENGINES: dict[str, type[AbstractEngine]] = {'numexpr': NumExprEngine, 'python': PythonEngine} # File: pandas-main/pandas/core/computation/eval.py """""" from __future__ import annotations import tokenize from typing import TYPE_CHECKING, Any import warnings from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.common import is_extension_array_dtype from pandas.core.computation.engines import ENGINES from pandas.core.computation.expr import PARSERS, Expr from pandas.core.computation.parsing import tokenize_string from pandas.core.computation.scope import ensure_scope from pandas.core.generic import NDFrame from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas.core.computation.ops import BinOp def _check_engine(engine: str | None) -> str: from pandas.core.computation.check import NUMEXPR_INSTALLED from pandas.core.computation.expressions import USE_NUMEXPR if engine is None: engine = 'numexpr' if USE_NUMEXPR else 'python' if engine not in ENGINES: valid_engines = list(ENGINES.keys()) raise KeyError(f"Invalid engine '{engine}' passed, valid engines are {valid_engines}") if engine == 'numexpr' and (not NUMEXPR_INSTALLED): raise ImportError("'numexpr' is not installed or an unsupported version. Cannot use engine='numexpr' for query/eval if 'numexpr' is not installed") return engine def _check_parser(parser: str) -> None: if parser not in PARSERS: raise KeyError(f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}") def _check_resolvers(resolvers) -> None: if resolvers is not None: for resolver in resolvers: if not hasattr(resolver, '__getitem__'): name = type(resolver).__name__ raise TypeError(f"Resolver of type '{name}' does not implement the __getitem__ method") def _check_expression(expr) -> None: if not expr: raise ValueError('expr cannot be an empty string') def _convert_expression(expr) -> str: s = pprint_thing(expr) _check_expression(s) return s def _check_for_locals(expr: str, stack_level: int, parser: str) -> None: at_top_of_stack = stack_level == 0 not_pandas_parser = parser != 'pandas' if not_pandas_parser: msg = "The '@' prefix is only supported by the pandas parser" elif at_top_of_stack: msg = "The '@' prefix is not allowed in top-level eval calls.\nplease refer to your variables by name without the '@' prefix." if at_top_of_stack or not_pandas_parser: for (toknum, tokval) in tokenize_string(expr): if toknum == tokenize.OP and tokval == '@': raise SyntaxError(msg) def eval(expr: str | BinOp, parser: str='pandas', engine: str | None=None, local_dict=None, global_dict=None, resolvers=(), level: int=0, target=None, inplace: bool=False) -> Any: inplace = validate_bool_kwarg(inplace, 'inplace') exprs: list[str | BinOp] if isinstance(expr, str): _check_expression(expr) exprs = [e.strip() for e in expr.splitlines() if e.strip() != ''] else: exprs = [expr] multi_line = len(exprs) > 1 if multi_line and target is None: raise ValueError('multi-line expressions are only valid in the context of data, use DataFrame.eval') engine = _check_engine(engine) _check_parser(parser) _check_resolvers(resolvers) ret = None first_expr = True target_modified = False for expr in exprs: expr = _convert_expression(expr) _check_for_locals(expr, level, parser) env = ensure_scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) parsed_expr = Expr(expr, engine=engine, parser=parser, env=env) if engine == 'numexpr' and (is_extension_array_dtype(parsed_expr.terms.return_type) or (getattr(parsed_expr.terms, 'operand_types', None) is not None and any((is_extension_array_dtype(elem) for elem in parsed_expr.terms.operand_types)))): warnings.warn("Engine has switched to 'python' because numexpr does not support extension array dtypes. Please set your engine to python manually.", RuntimeWarning, stacklevel=find_stack_level()) engine = 'python' eng = ENGINES[engine] eng_inst = eng(parsed_expr) ret = eng_inst.evaluate() if parsed_expr.assigner is None: if multi_line: raise ValueError('Multi-line expressions are only valid if all expressions contain an assignment') if inplace: raise ValueError('Cannot operate inplace if there is no assignment') assigner = parsed_expr.assigner if env.target is not None and assigner is not None: target_modified = True if not inplace and first_expr: try: target = env.target if isinstance(target, NDFrame): target = target.copy(deep=False) else: target = target.copy() except AttributeError as err: raise ValueError('Cannot return a copy of the target') from err else: target = env.target try: if inplace and isinstance(target, NDFrame): target.loc[:, assigner] = ret else: target[assigner] = ret except (TypeError, IndexError) as err: raise ValueError('Cannot assign expression output to target') from err if not resolvers: resolvers = ({assigner: ret},) else: for resolver in resolvers: if assigner in resolver: resolver[assigner] = ret break else: resolvers += ({assigner: ret},) ret = None first_expr = False if inplace is False: return target if target_modified else ret # File: pandas-main/pandas/core/computation/expr.py """""" from __future__ import annotations import ast from functools import partial, reduce from keyword import iskeyword import tokenize from typing import TYPE_CHECKING, ClassVar, TypeVar import numpy as np from pandas.errors import UndefinedVariableError import pandas.core.common as com from pandas.core.computation.ops import ARITH_OPS_SYMS, BOOL_OPS_SYMS, CMP_OPS_SYMS, LOCAL_TAG, MATHOPS, REDUCTIONS, UNARY_OPS_SYMS, BinOp, Constant, FuncNode, Op, Term, UnaryOp, is_term from pandas.core.computation.parsing import clean_backtick_quoted_toks, tokenize_string from pandas.core.computation.scope import Scope from pandas.io.formats import printing if TYPE_CHECKING: from collections.abc import Callable def _rewrite_assign(tok: tuple[int, str]) -> tuple[int, str]: (toknum, tokval) = tok return (toknum, '==' if tokval == '=' else tokval) def _replace_booleans(tok: tuple[int, str]) -> tuple[int, str]: (toknum, tokval) = tok if toknum == tokenize.OP: if tokval == '&': return (tokenize.NAME, 'and') elif tokval == '|': return (tokenize.NAME, 'or') return (toknum, tokval) return (toknum, tokval) def _replace_locals(tok: tuple[int, str]) -> tuple[int, str]: (toknum, tokval) = tok if toknum == tokenize.OP and tokval == '@': return (tokenize.OP, LOCAL_TAG) return (toknum, tokval) def _compose2(f, g): return lambda *args, **kwargs: f(g(*args, **kwargs)) def _compose(*funcs): assert len(funcs) > 1, 'At least 2 callables must be passed to compose' return reduce(_compose2, funcs) def _preparse(source: str, f=_compose(_replace_locals, _replace_booleans, _rewrite_assign, clean_backtick_quoted_toks)) -> str: assert callable(f), 'f must be callable' return tokenize.untokenize((f(x) for x in tokenize_string(source))) def _is_type(t): return lambda x: isinstance(x.value, t) _is_list = _is_type(list) _is_str = _is_type(str) _all_nodes = frozenset((node for node in (getattr(ast, name) for name in dir(ast)) if isinstance(node, type) and issubclass(node, ast.AST))) def _filter_nodes(superclass, all_nodes=_all_nodes): node_names = (node.__name__ for node in all_nodes if issubclass(node, superclass)) return frozenset(node_names) _all_node_names = frozenset((x.__name__ for x in _all_nodes)) _mod_nodes = _filter_nodes(ast.mod) _stmt_nodes = _filter_nodes(ast.stmt) _expr_nodes = _filter_nodes(ast.expr) _expr_context_nodes = _filter_nodes(ast.expr_context) _boolop_nodes = _filter_nodes(ast.boolop) _operator_nodes = _filter_nodes(ast.operator) _unary_op_nodes = _filter_nodes(ast.unaryop) _cmp_op_nodes = _filter_nodes(ast.cmpop) _comprehension_nodes = _filter_nodes(ast.comprehension) _handler_nodes = _filter_nodes(ast.excepthandler) _arguments_nodes = _filter_nodes(ast.arguments) _keyword_nodes = _filter_nodes(ast.keyword) _alias_nodes = _filter_nodes(ast.alias) _hacked_nodes = frozenset(['Assign', 'Module', 'Expr']) _unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp', 'DictComp', 'SetComp', 'Repr', 'Lambda', 'Set', 'AST', 'Is', 'IsNot']) _unsupported_nodes = (_stmt_nodes | _mod_nodes | _handler_nodes | _arguments_nodes | _keyword_nodes | _alias_nodes | _expr_context_nodes | _unsupported_expr_nodes) - _hacked_nodes _base_supported_nodes = _all_node_names - _unsupported_nodes | _hacked_nodes intersection = _unsupported_nodes & _base_supported_nodes _msg = f'cannot both support and not support {intersection}' assert not intersection, _msg def _node_not_implemented(node_name: str) -> Callable[..., None]: def f(self, *args, **kwargs): raise NotImplementedError(f"'{node_name}' nodes are not implemented") return f _T = TypeVar('_T') def disallow(nodes: set[str]) -> Callable[[type[_T]], type[_T]]: def disallowed(cls: type[_T]) -> type[_T]: cls.unsupported_nodes = () for node in nodes: new_method = _node_not_implemented(node) name = f'visit_{node}' cls.unsupported_nodes += (name,) setattr(cls, name, new_method) return cls return disallowed def _op_maker(op_class, op_symbol): def f(self, node, *args, **kwargs): return partial(op_class, op_symbol, *args, **kwargs) return f _op_classes = {'binary': BinOp, 'unary': UnaryOp} def add_ops(op_classes): def f(cls): for (op_attr_name, op_class) in op_classes.items(): ops = getattr(cls, f'{op_attr_name}_ops') ops_map = getattr(cls, f'{op_attr_name}_op_nodes_map') for op in ops: op_node = ops_map[op] if op_node is not None: made_op = _op_maker(op_class, op) setattr(cls, f'visit_{op_node}', made_op) return cls return f @disallow(_unsupported_nodes) @add_ops(_op_classes) class BaseExprVisitor(ast.NodeVisitor): const_type: ClassVar[type[Term]] = Constant term_type: ClassVar[type[Term]] = Term binary_ops = CMP_OPS_SYMS + BOOL_OPS_SYMS + ARITH_OPS_SYMS binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn', 'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult', 'Div', 'Pow', 'FloorDiv', 'Mod') binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes)) unary_ops = UNARY_OPS_SYMS unary_op_nodes = ('UAdd', 'USub', 'Invert', 'Not') unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes)) rewrite_map = {ast.Eq: ast.In, ast.NotEq: ast.NotIn, ast.In: ast.In, ast.NotIn: ast.NotIn} unsupported_nodes: tuple[str, ...] def __init__(self, env, engine, parser, preparser=_preparse) -> None: self.env = env self.engine = engine self.parser = parser self.preparser = preparser self.assigner = None def visit(self, node, **kwargs): if isinstance(node, str): clean = self.preparser(node) try: node = ast.fix_missing_locations(ast.parse(clean)) except SyntaxError as e: if any((iskeyword(x) for x in clean.split())): e.msg = 'Python keyword not valid identifier in numexpr query' raise e method = f'visit_{type(node).__name__}' visitor = getattr(self, method) return visitor(node, **kwargs) def visit_Module(self, node, **kwargs): if len(node.body) != 1: raise SyntaxError('only a single expression is allowed') expr = node.body[0] return self.visit(expr, **kwargs) def visit_Expr(self, node, **kwargs): return self.visit(node.value, **kwargs) def _rewrite_membership_op(self, node, left, right): op_instance = node.op op_type = type(op_instance) if is_term(left) and is_term(right) and (op_type in self.rewrite_map): (left_list, right_list) = map(_is_list, (left, right)) (left_str, right_str) = map(_is_str, (left, right)) if left_list or right_list or left_str or right_str: op_instance = self.rewrite_map[op_type]() if right_str: name = self.env.add_tmp([right.value]) right = self.term_type(name, self.env) if left_str: name = self.env.add_tmp([left.value]) left = self.term_type(name, self.env) op = self.visit(op_instance) return (op, op_instance, left, right) def _maybe_transform_eq_ne(self, node, left=None, right=None): if left is None: left = self.visit(node.left, side='left') if right is None: right = self.visit(node.right, side='right') (op, op_class, left, right) = self._rewrite_membership_op(node, left, right) return (op, op_class, left, right) def _maybe_downcast_constants(self, left, right): f32 = np.dtype(np.float32) if left.is_scalar and hasattr(left, 'value') and (not right.is_scalar) and (right.return_type == f32): name = self.env.add_tmp(np.float32(left.value)) left = self.term_type(name, self.env) if right.is_scalar and hasattr(right, 'value') and (not left.is_scalar) and (left.return_type == f32): name = self.env.add_tmp(np.float32(right.value)) right = self.term_type(name, self.env) return (left, right) def _maybe_eval(self, binop, eval_in_python): return binop.evaluate(self.env, self.engine, self.parser, self.term_type, eval_in_python) def _maybe_evaluate_binop(self, op, op_class, lhs, rhs, eval_in_python=('in', 'not in'), maybe_eval_in_python=('==', '!=', '<', '>', '<=', '>=')): res = op(lhs, rhs) if res.has_invalid_return_type: raise TypeError(f"unsupported operand type(s) for {res.op}: '{lhs.type}' and '{rhs.type}'") if self.engine != 'pytables' and (res.op in CMP_OPS_SYMS and getattr(lhs, 'is_datetime', False) or getattr(rhs, 'is_datetime', False)): return self._maybe_eval(res, self.binary_ops) if res.op in eval_in_python: return self._maybe_eval(res, eval_in_python) elif self.engine != 'pytables': if getattr(lhs, 'return_type', None) == object or getattr(rhs, 'return_type', None) == object: return self._maybe_eval(res, eval_in_python + maybe_eval_in_python) return res def visit_BinOp(self, node, **kwargs): (op, op_class, left, right) = self._maybe_transform_eq_ne(node) (left, right) = self._maybe_downcast_constants(left, right) return self._maybe_evaluate_binop(op, op_class, left, right) def visit_UnaryOp(self, node, **kwargs): op = self.visit(node.op) operand = self.visit(node.operand) return op(operand) def visit_Name(self, node, **kwargs) -> Term: return self.term_type(node.id, self.env, **kwargs) def visit_NameConstant(self, node, **kwargs) -> Term: return self.const_type(node.value, self.env) def visit_Num(self, node, **kwargs) -> Term: return self.const_type(node.value, self.env) def visit_Constant(self, node, **kwargs) -> Term: return self.const_type(node.value, self.env) def visit_Str(self, node, **kwargs) -> Term: name = self.env.add_tmp(node.s) return self.term_type(name, self.env) def visit_List(self, node, **kwargs) -> Term: name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts]) return self.term_type(name, self.env) visit_Tuple = visit_List def visit_Index(self, node, **kwargs): return self.visit(node.value) def visit_Subscript(self, node, **kwargs) -> Term: from pandas import eval as pd_eval value = self.visit(node.value) slobj = self.visit(node.slice) result = pd_eval(slobj, local_dict=self.env, engine=self.engine, parser=self.parser) try: v = value.value[result] except AttributeError: lhs = pd_eval(value, local_dict=self.env, engine=self.engine, parser=self.parser) v = lhs[result] name = self.env.add_tmp(v) return self.term_type(name, env=self.env) def visit_Slice(self, node, **kwargs) -> slice: lower = node.lower if lower is not None: lower = self.visit(lower).value upper = node.upper if upper is not None: upper = self.visit(upper).value step = node.step if step is not None: step = self.visit(step).value return slice(lower, upper, step) def visit_Assign(self, node, **kwargs): if len(node.targets) != 1: raise SyntaxError('can only assign a single expression') if not isinstance(node.targets[0], ast.Name): raise SyntaxError('left hand side of an assignment must be a single name') if self.env.target is None: raise ValueError('cannot assign without a target object') try: assigner = self.visit(node.targets[0], **kwargs) except UndefinedVariableError: assigner = node.targets[0].id self.assigner = getattr(assigner, 'name', assigner) if self.assigner is None: raise SyntaxError('left hand side of an assignment must be a single resolvable name') return self.visit(node.value, **kwargs) def visit_Attribute(self, node, **kwargs): attr = node.attr value = node.value ctx = node.ctx if isinstance(ctx, ast.Load): resolved = self.visit(value).value try: v = getattr(resolved, attr) name = self.env.add_tmp(v) return self.term_type(name, self.env) except AttributeError: if isinstance(value, ast.Name) and value.id == attr: return resolved raise raise ValueError(f'Invalid Attribute context {type(ctx).__name__}') def visit_Call(self, node, side=None, **kwargs): if isinstance(node.func, ast.Attribute) and node.func.attr != '__call__': res = self.visit_Attribute(node.func) elif not isinstance(node.func, ast.Name): raise TypeError('Only named functions are supported') else: try: res = self.visit(node.func) except UndefinedVariableError: try: res = FuncNode(node.func.id) except ValueError: raise if res is None: raise ValueError(f'Invalid function call {node.func.id}') if hasattr(res, 'value'): res = res.value if isinstance(res, FuncNode): new_args = [self.visit(arg) for arg in node.args] if node.keywords: raise TypeError(f'Function "{res.name}" does not support keyword arguments') return res(*new_args) else: new_args = [self.visit(arg)(self.env) for arg in node.args] for key in node.keywords: if not isinstance(key, ast.keyword): raise ValueError(f"keyword error in function call '{node.func.id}'") if key.arg: kwargs[key.arg] = self.visit(key.value)(self.env) name = self.env.add_tmp(res(*new_args, **kwargs)) return self.term_type(name=name, env=self.env) def translate_In(self, op): return op def visit_Compare(self, node, **kwargs): ops = node.ops comps = node.comparators if len(comps) == 1: op = self.translate_In(ops[0]) binop = ast.BinOp(op=op, left=node.left, right=comps[0]) return self.visit(binop) left = node.left values = [] for (op, comp) in zip(ops, comps): new_node = self.visit(ast.Compare(comparators=[comp], left=left, ops=[self.translate_In(op)])) left = comp values.append(new_node) return self.visit(ast.BoolOp(op=ast.And(), values=values)) def _try_visit_binop(self, bop): if isinstance(bop, (Op, Term)): return bop return self.visit(bop) def visit_BoolOp(self, node, **kwargs): def visitor(x, y): lhs = self._try_visit_binop(x) rhs = self._try_visit_binop(y) (op, op_class, lhs, rhs) = self._maybe_transform_eq_ne(node, lhs, rhs) return self._maybe_evaluate_binop(op, node.op, lhs, rhs) operands = node.values return reduce(visitor, operands) _python_not_supported = frozenset(['Dict', 'BoolOp', 'In', 'NotIn']) _numexpr_supported_calls = frozenset(REDUCTIONS + MATHOPS) @disallow((_unsupported_nodes | _python_not_supported) - (_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn', 'Tuple']))) class PandasExprVisitor(BaseExprVisitor): def __init__(self, env, engine, parser, preparser=partial(_preparse, f=_compose(_replace_locals, _replace_booleans, clean_backtick_quoted_toks))) -> None: super().__init__(env, engine, parser, preparser) @disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not'])) class PythonExprVisitor(BaseExprVisitor): def __init__(self, env, engine, parser, preparser=lambda source, f=None: source) -> None: super().__init__(env, engine, parser, preparser=preparser) class Expr: env: Scope engine: str parser: str def __init__(self, expr, engine: str='numexpr', parser: str='pandas', env: Scope | None=None, level: int=0) -> None: self.expr = expr self.env = env or Scope(level=level + 1) self.engine = engine self.parser = parser self._visitor = PARSERS[parser](self.env, self.engine, self.parser) self.terms = self.parse() @property def assigner(self): return getattr(self._visitor, 'assigner', None) def __call__(self): return self.terms(self.env) def __repr__(self) -> str: return printing.pprint_thing(self.terms) def __len__(self) -> int: return len(self.expr) def parse(self): return self._visitor.visit(self.expr) @property def names(self): if is_term(self.terms): return frozenset([self.terms.name]) return frozenset((term.name for term in com.flatten(self.terms))) PARSERS = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor} # File: pandas-main/pandas/core/computation/expressions.py """""" from __future__ import annotations import operator from typing import TYPE_CHECKING import warnings import numpy as np from pandas._config import get_option from pandas.util._exceptions import find_stack_level from pandas.core import roperator from pandas.core.computation.check import NUMEXPR_INSTALLED if NUMEXPR_INSTALLED: import numexpr as ne if TYPE_CHECKING: from pandas._typing import FuncType _TEST_MODE: bool | None = None _TEST_RESULT: list[bool] = [] USE_NUMEXPR = NUMEXPR_INSTALLED _evaluate: FuncType | None = None _where: FuncType | None = None _ALLOWED_DTYPES = {'evaluate': {'int64', 'int32', 'float64', 'float32', 'bool'}, 'where': {'int64', 'float64', 'bool'}} _MIN_ELEMENTS = 1000000 def set_use_numexpr(v: bool=True) -> None: global USE_NUMEXPR if NUMEXPR_INSTALLED: USE_NUMEXPR = v global _evaluate, _where _evaluate = _evaluate_numexpr if USE_NUMEXPR else _evaluate_standard _where = _where_numexpr if USE_NUMEXPR else _where_standard def set_numexpr_threads(n=None) -> None: if NUMEXPR_INSTALLED and USE_NUMEXPR: if n is None: n = ne.detect_number_of_cores() ne.set_num_threads(n) def _evaluate_standard(op, op_str, a, b): if _TEST_MODE: _store_test_result(False) return op(a, b) def _can_use_numexpr(op, op_str, a, b, dtype_check) -> bool: if op_str is not None: if a.size > _MIN_ELEMENTS: dtypes: set[str] = set() for o in [a, b]: if hasattr(o, 'dtype'): dtypes |= {o.dtype.name} if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes: return True return False def _evaluate_numexpr(op, op_str, a, b): result = None if _can_use_numexpr(op, op_str, a, b, 'evaluate'): is_reversed = op.__name__.strip('_').startswith('r') if is_reversed: (a, b) = (b, a) a_value = a b_value = b try: result = ne.evaluate(f'a_value {op_str} b_value', local_dict={'a_value': a_value, 'b_value': b_value}, casting='safe') except TypeError: pass except NotImplementedError: if _bool_arith_fallback(op_str, a, b): pass else: raise if is_reversed: (a, b) = (b, a) if _TEST_MODE: _store_test_result(result is not None) if result is None: result = _evaluate_standard(op, op_str, a, b) return result _op_str_mapping = {operator.add: '+', roperator.radd: '+', operator.mul: '*', roperator.rmul: '*', operator.sub: '-', roperator.rsub: '-', operator.truediv: '/', roperator.rtruediv: '/', operator.floordiv: None, roperator.rfloordiv: None, operator.mod: None, roperator.rmod: None, operator.pow: '**', roperator.rpow: '**', operator.eq: '==', operator.ne: '!=', operator.le: '<=', operator.lt: '<', operator.ge: '>=', operator.gt: '>', operator.and_: '&', roperator.rand_: '&', operator.or_: '|', roperator.ror_: '|', operator.xor: '^', roperator.rxor: '^', divmod: None, roperator.rdivmod: None} def _where_standard(cond, a, b): return np.where(cond, a, b) def _where_numexpr(cond, a, b): result = None if _can_use_numexpr(None, 'where', a, b, 'where'): result = ne.evaluate('where(cond_value, a_value, b_value)', local_dict={'cond_value': cond, 'a_value': a, 'b_value': b}, casting='safe') if result is None: result = _where_standard(cond, a, b) return result set_use_numexpr(get_option('compute.use_numexpr')) def _has_bool_dtype(x): try: return x.dtype == bool except AttributeError: return isinstance(x, (bool, np.bool_)) _BOOL_OP_UNSUPPORTED = {'+': '|', '*': '&', '-': '^'} def _bool_arith_fallback(op_str, a, b) -> bool: if _has_bool_dtype(a) and _has_bool_dtype(b): if op_str in _BOOL_OP_UNSUPPORTED: warnings.warn(f'evaluating in Python space because the {op_str!r} operator is not supported by numexpr for the bool dtype, use {_BOOL_OP_UNSUPPORTED[op_str]!r} instead.', stacklevel=find_stack_level()) return True return False def evaluate(op, a, b, use_numexpr: bool=True): op_str = _op_str_mapping[op] if op_str is not None: if use_numexpr: return _evaluate(op, op_str, a, b) return _evaluate_standard(op, op_str, a, b) def where(cond, a, b, use_numexpr: bool=True): assert _where is not None return _where(cond, a, b) if use_numexpr else _where_standard(cond, a, b) def set_test_mode(v: bool=True) -> None: global _TEST_MODE, _TEST_RESULT _TEST_MODE = v _TEST_RESULT = [] def _store_test_result(used_numexpr: bool) -> None: if used_numexpr: _TEST_RESULT.append(used_numexpr) def get_test_result() -> list[bool]: global _TEST_RESULT res = _TEST_RESULT _TEST_RESULT = [] return res # File: pandas-main/pandas/core/computation/ops.py """""" from __future__ import annotations from datetime import datetime from functools import partial import operator from typing import TYPE_CHECKING, Literal import numpy as np from pandas._libs.tslibs import Timestamp from pandas.core.dtypes.common import is_list_like, is_scalar import pandas.core.common as com from pandas.core.computation.common import ensure_decoded, result_type_many from pandas.core.computation.scope import DEFAULT_GLOBALS from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded if TYPE_CHECKING: from collections.abc import Callable, Iterable, Iterator REDUCTIONS = ('sum', 'prod', 'min', 'max') _unary_math_ops = ('sin', 'cos', 'tan', 'exp', 'log', 'expm1', 'log1p', 'sqrt', 'sinh', 'cosh', 'tanh', 'arcsin', 'arccos', 'arctan', 'arccosh', 'arcsinh', 'arctanh', 'abs', 'log10', 'floor', 'ceil') _binary_math_ops = ('arctan2',) MATHOPS = _unary_math_ops + _binary_math_ops LOCAL_TAG = '__pd_eval_local_' class Term: def __new__(cls, name, env, side=None, encoding=None): klass = Constant if not isinstance(name, str) else cls supr_new = super(Term, klass).__new__ return supr_new(klass) is_local: bool def __init__(self, name, env, side=None, encoding=None) -> None: self._name = name self.env = env self.side = side tname = str(name) self.is_local = tname.startswith(LOCAL_TAG) or tname in DEFAULT_GLOBALS self._value = self._resolve_name() self.encoding = encoding @property def local_name(self) -> str: return self.name.replace(LOCAL_TAG, '') def __repr__(self) -> str: return pprint_thing(self.name) def __call__(self, *args, **kwargs): return self.value def evaluate(self, *args, **kwargs) -> Term: return self def _resolve_name(self): local_name = str(self.local_name) is_local = self.is_local if local_name in self.env.scope and isinstance(self.env.scope[local_name], type): is_local = False res = self.env.resolve(local_name, is_local=is_local) self.update(res) if hasattr(res, 'ndim') and isinstance(res.ndim, int) and (res.ndim > 2): raise NotImplementedError('N-dimensional objects, where N > 2, are not supported with eval') return res def update(self, value) -> None: key = self.name if isinstance(key, str): self.env.swapkey(self.local_name, key, new_value=value) self.value = value @property def is_scalar(self) -> bool: return is_scalar(self._value) @property def type(self): try: return self._value.values.dtype except AttributeError: try: return self._value.dtype except AttributeError: return type(self._value) return_type = type @property def raw(self) -> str: return f'{type(self).__name__}(name={self.name!r}, type={self.type})' @property def is_datetime(self) -> bool: try: t = self.type.type except AttributeError: t = self.type return issubclass(t, (datetime, np.datetime64)) @property def value(self): return self._value @value.setter def value(self, new_value) -> None: self._value = new_value @property def name(self): return self._name @property def ndim(self) -> int: return self._value.ndim class Constant(Term): def _resolve_name(self): return self._name @property def name(self): return self.value def __repr__(self) -> str: return repr(self.name) _bool_op_map = {'not': '~', 'and': '&', 'or': '|'} class Op: op: str def __init__(self, op: str, operands: Iterable[Term | Op], encoding=None) -> None: self.op = _bool_op_map.get(op, op) self.operands = operands self.encoding = encoding def __iter__(self) -> Iterator: return iter(self.operands) def __repr__(self) -> str: parened = (f'({pprint_thing(opr)})' for opr in self.operands) return pprint_thing(f' {self.op} '.join(parened)) @property def return_type(self): if self.op in CMP_OPS_SYMS + BOOL_OPS_SYMS: return np.bool_ return result_type_many(*(term.type for term in com.flatten(self))) @property def has_invalid_return_type(self) -> bool: types = self.operand_types obj_dtype_set = frozenset([np.dtype('object')]) return self.return_type == object and types - obj_dtype_set @property def operand_types(self): return frozenset((term.type for term in com.flatten(self))) @property def is_scalar(self) -> bool: return all((operand.is_scalar for operand in self.operands)) @property def is_datetime(self) -> bool: try: t = self.return_type.type except AttributeError: t = self.return_type return issubclass(t, (datetime, np.datetime64)) def _in(x, y): try: return x.isin(y) except AttributeError: if is_list_like(x): try: return y.isin(x) except AttributeError: pass return x in y def _not_in(x, y): try: return ~x.isin(y) except AttributeError: if is_list_like(x): try: return ~y.isin(x) except AttributeError: pass return x not in y CMP_OPS_SYMS = ('>', '<', '>=', '<=', '==', '!=', 'in', 'not in') _cmp_ops_funcs = (operator.gt, operator.lt, operator.ge, operator.le, operator.eq, operator.ne, _in, _not_in) _cmp_ops_dict = dict(zip(CMP_OPS_SYMS, _cmp_ops_funcs)) BOOL_OPS_SYMS = ('&', '|', 'and', 'or') _bool_ops_funcs = (operator.and_, operator.or_, operator.and_, operator.or_) _bool_ops_dict = dict(zip(BOOL_OPS_SYMS, _bool_ops_funcs)) ARITH_OPS_SYMS = ('+', '-', '*', '/', '**', '//', '%') _arith_ops_funcs = (operator.add, operator.sub, operator.mul, operator.truediv, operator.pow, operator.floordiv, operator.mod) _arith_ops_dict = dict(zip(ARITH_OPS_SYMS, _arith_ops_funcs)) _binary_ops_dict = {} for d in (_cmp_ops_dict, _bool_ops_dict, _arith_ops_dict): _binary_ops_dict.update(d) def is_term(obj) -> bool: return isinstance(obj, Term) class BinOp(Op): def __init__(self, op: str, lhs, rhs) -> None: super().__init__(op, (lhs, rhs)) self.lhs = lhs self.rhs = rhs self._disallow_scalar_only_bool_ops() self.convert_values() try: self.func = _binary_ops_dict[op] except KeyError as err: keys = list(_binary_ops_dict.keys()) raise ValueError(f'Invalid binary operator {op!r}, valid operators are {keys}') from err def __call__(self, env): left = self.lhs(env) right = self.rhs(env) return self.func(left, right) def evaluate(self, env, engine: str, parser, term_type, eval_in_python): if engine == 'python': res = self(env) else: left = self.lhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) right = self.rhs.evaluate(env, engine=engine, parser=parser, term_type=term_type, eval_in_python=eval_in_python) if self.op in eval_in_python: res = self.func(left.value, right.value) else: from pandas.core.computation.eval import eval res = eval(self, local_dict=env, engine=engine, parser=parser) name = env.add_tmp(res) return term_type(name, env=env) def convert_values(self) -> None: def stringify(value): encoder: Callable if self.encoding is not None: encoder = partial(pprint_thing_encoded, encoding=self.encoding) else: encoder = pprint_thing return encoder(value) (lhs, rhs) = (self.lhs, self.rhs) if is_term(lhs) and lhs.is_datetime and is_term(rhs) and rhs.is_scalar: v = rhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.rhs.update(v) if is_term(rhs) and rhs.is_datetime and is_term(lhs) and lhs.is_scalar: v = lhs.value if isinstance(v, (int, float)): v = stringify(v) v = Timestamp(ensure_decoded(v)) if v.tz is not None: v = v.tz_convert('UTC') self.lhs.update(v) def _disallow_scalar_only_bool_ops(self) -> None: rhs = self.rhs lhs = self.lhs rhs_rt = rhs.return_type rhs_rt = getattr(rhs_rt, 'type', rhs_rt) lhs_rt = lhs.return_type lhs_rt = getattr(lhs_rt, 'type', lhs_rt) if (lhs.is_scalar or rhs.is_scalar) and self.op in _bool_ops_dict and (not (issubclass(rhs_rt, (bool, np.bool_)) and issubclass(lhs_rt, (bool, np.bool_)))): raise NotImplementedError('cannot evaluate scalar only bool ops') UNARY_OPS_SYMS = ('+', '-', '~', 'not') _unary_ops_funcs = (operator.pos, operator.neg, operator.invert, operator.invert) _unary_ops_dict = dict(zip(UNARY_OPS_SYMS, _unary_ops_funcs)) class UnaryOp(Op): def __init__(self, op: Literal['+', '-', '~', 'not'], operand) -> None: super().__init__(op, (operand,)) self.operand = operand try: self.func = _unary_ops_dict[op] except KeyError as err: raise ValueError(f'Invalid unary operator {op!r}, valid operators are {UNARY_OPS_SYMS}') from err def __call__(self, env) -> MathCall: operand = self.operand(env) return self.func(operand) def __repr__(self) -> str: return pprint_thing(f'{self.op}({self.operand})') @property def return_type(self) -> np.dtype: operand = self.operand if operand.return_type == np.dtype('bool'): return np.dtype('bool') if isinstance(operand, Op) and (operand.op in _cmp_ops_dict or operand.op in _bool_ops_dict): return np.dtype('bool') return np.dtype('int') class MathCall(Op): def __init__(self, func, args) -> None: super().__init__(func.name, args) self.func = func def __call__(self, env): operands = [op(env) for op in self.operands] return self.func.func(*operands) def __repr__(self) -> str: operands = map(str, self.operands) return pprint_thing(f"{self.op}({','.join(operands)})") class FuncNode: def __init__(self, name: str) -> None: if name not in MATHOPS: raise ValueError(f'"{name}" is not a supported function') self.name = name self.func = getattr(np, name) def __call__(self, *args) -> MathCall: return MathCall(self, args) # File: pandas-main/pandas/core/computation/parsing.py """""" from __future__ import annotations from enum import Enum from io import StringIO from keyword import iskeyword import token import tokenize from typing import TYPE_CHECKING if TYPE_CHECKING: from collections.abc import Hashable, Iterator BACKTICK_QUOTED_STRING = 100 def create_valid_python_identifier(name: str) -> str: if name.isidentifier() and (not iskeyword(name)): return name gen = ((c, ''.join((chr(b) for b in c.encode('ascii', 'backslashreplace')))) for c in name) name = ''.join((c_escaped.replace('\\', '_UNICODE_' if c != c_escaped else '_BACKSLASH_') for (c, c_escaped) in gen)) special_characters_replacements = {char: f'_{token.tok_name[tokval]}_' for (char, tokval) in tokenize.EXACT_TOKEN_TYPES.items()} special_characters_replacements.update({' ': '_', '?': '_QUESTIONMARK_', '!': '_EXCLAMATIONMARK_', '$': '_DOLLARSIGN_', '€': '_EUROSIGN_', '°': '_DEGREESIGN_', "'": '_SINGLEQUOTE_', '"': '_DOUBLEQUOTE_', '#': '_HASH_', '`': '_BACKTICK_'}) name = ''.join([special_characters_replacements.get(char, char) for char in name]) name = f'BACKTICK_QUOTED_STRING_{name}' if not name.isidentifier(): raise SyntaxError(f"Could not convert '{name}' to a valid Python identifier.") return name def clean_backtick_quoted_toks(tok: tuple[int, str]) -> tuple[int, str]: (toknum, tokval) = tok if toknum == BACKTICK_QUOTED_STRING: return (tokenize.NAME, create_valid_python_identifier(tokval)) return (toknum, tokval) def clean_column_name(name: Hashable) -> Hashable: try: name = name.replace('`', '``') if isinstance(name, str) else name tokenized = tokenize_string(f'`{name}`') tokval = next(tokenized)[1] return create_valid_python_identifier(tokval) except SyntaxError: return name def tokenize_backtick_quoted_string(token_generator: Iterator[tokenize.TokenInfo], source: str, string_start: int) -> tuple[int, str]: for (_, tokval, start, _, _) in token_generator: if tokval == '`': string_end = start[1] break return (BACKTICK_QUOTED_STRING, source[string_start:string_end]) class ParseState(Enum): DEFAULT = 0 IN_BACKTICK = 1 IN_SINGLE_QUOTE = 2 IN_DOUBLE_QUOTE = 3 def _split_by_backtick(s: str) -> list[tuple[bool, str]]: substrings = [] substr: list[str] = [] i = 0 parse_state = ParseState.DEFAULT while i < len(s): char = s[i] match char: case '`': if parse_state == ParseState.DEFAULT: if substr: substrings.append((False, ''.join(substr))) substr = [char] i += 1 parse_state = ParseState.IN_BACKTICK continue elif parse_state == ParseState.IN_BACKTICK: next_char = s[i + 1] if i != len(s) - 1 else None if next_char == '`': substr.append(char) substr.append(next_char) i += 2 continue else: substr.append(char) substrings.append((True, ''.join(substr))) substr = [] i += 1 parse_state = ParseState.DEFAULT continue case "'": if parse_state == ParseState.DEFAULT: parse_state = ParseState.IN_SINGLE_QUOTE elif parse_state == ParseState.IN_SINGLE_QUOTE and s[i - 1] != '\\': parse_state = ParseState.DEFAULT case '"': if parse_state == ParseState.DEFAULT: parse_state = ParseState.IN_DOUBLE_QUOTE elif parse_state == ParseState.IN_DOUBLE_QUOTE and s[i - 1] != '\\': parse_state = ParseState.DEFAULT substr.append(char) i += 1 if substr: substrings.append((False, ''.join(substr))) return substrings def tokenize_string(source: str) -> Iterator[tuple[int, str]]: source = ''.join((create_valid_python_identifier(substring[1:-1]) if is_backtick_quoted else substring for (is_backtick_quoted, substring) in _split_by_backtick(source))) line_reader = StringIO(source).readline token_generator = tokenize.generate_tokens(line_reader) for (toknum, tokval, _, _, _) in token_generator: yield (toknum, tokval) # File: pandas-main/pandas/core/computation/pytables.py """""" from __future__ import annotations import ast from decimal import Decimal, InvalidOperation from functools import partial from typing import TYPE_CHECKING, Any, ClassVar import numpy as np from pandas._libs.tslibs import Timedelta, Timestamp from pandas.errors import UndefinedVariableError from pandas.core.dtypes.common import is_list_like import pandas.core.common as com from pandas.core.computation import expr, ops, scope as _scope from pandas.core.computation.common import ensure_decoded from pandas.core.computation.expr import BaseExprVisitor from pandas.core.computation.ops import is_term from pandas.core.construction import extract_array from pandas.core.indexes.base import Index from pandas.io.formats.printing import pprint_thing, pprint_thing_encoded if TYPE_CHECKING: from pandas._typing import Self, npt class PyTablesScope(_scope.Scope): __slots__ = ('queryables',) queryables: dict[str, Any] def __init__(self, level: int, global_dict=None, local_dict=None, queryables: dict[str, Any] | None=None) -> None: super().__init__(level + 1, global_dict=global_dict, local_dict=local_dict) self.queryables = queryables or {} class Term(ops.Term): env: PyTablesScope def __new__(cls, name, env, side=None, encoding=None): if isinstance(name, str): klass = cls else: klass = Constant return object.__new__(klass) def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: super().__init__(name, env, side=side, encoding=encoding) def _resolve_name(self): if self.side == 'left': if self.name not in self.env.queryables: raise NameError(f'name {self.name!r} is not defined') return self.name try: return self.env.resolve(self.name, is_local=False) except UndefinedVariableError: return self.name @property def value(self): return self._value class Constant(Term): def __init__(self, name, env: PyTablesScope, side=None, encoding=None) -> None: assert isinstance(env, PyTablesScope), type(env) super().__init__(name, env, side=side, encoding=encoding) def _resolve_name(self): return self._name class BinOp(ops.BinOp): _max_selectors = 31 op: str queryables: dict[str, Any] condition: str | None def __init__(self, op: str, lhs, rhs, queryables: dict[str, Any], encoding) -> None: super().__init__(op, lhs, rhs) self.queryables = queryables self.encoding = encoding self.condition = None def _disallow_scalar_only_bool_ops(self) -> None: pass def prune(self, klass): def pr(left, right): if left is None: return right elif right is None: return left k = klass if isinstance(left, ConditionBinOp): if isinstance(right, ConditionBinOp): k = JointConditionBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right elif isinstance(left, FilterBinOp): if isinstance(right, FilterBinOp): k = JointFilterBinOp elif isinstance(left, k): return left elif isinstance(right, k): return right return k(self.op, left, right, queryables=self.queryables, encoding=self.encoding).evaluate() (left, right) = (self.lhs, self.rhs) if is_term(left) and is_term(right): res = pr(left.value, right.value) elif not is_term(left) and is_term(right): res = pr(left.prune(klass), right.value) elif is_term(left) and (not is_term(right)): res = pr(left.value, right.prune(klass)) elif not (is_term(left) or is_term(right)): res = pr(left.prune(klass), right.prune(klass)) return res def conform(self, rhs): if not is_list_like(rhs): rhs = [rhs] if isinstance(rhs, np.ndarray): rhs = rhs.ravel() return rhs @property def is_valid(self) -> bool: return self.lhs in self.queryables @property def is_in_table(self) -> bool: return self.queryables.get(self.lhs) is not None @property def kind(self): return getattr(self.queryables.get(self.lhs), 'kind', None) @property def meta(self): return getattr(self.queryables.get(self.lhs), 'meta', None) @property def metadata(self): return getattr(self.queryables.get(self.lhs), 'metadata', None) def generate(self, v) -> str: val = v.tostring(self.encoding) return f'({self.lhs} {self.op} {val})' def convert_value(self, v) -> TermValue: def stringify(value): if self.encoding is not None: return pprint_thing_encoded(value, encoding=self.encoding) return pprint_thing(value) kind = ensure_decoded(self.kind) meta = ensure_decoded(self.meta) if kind == 'datetime' or (kind and kind.startswith('datetime64')): if isinstance(v, (int, float)): v = stringify(v) v = ensure_decoded(v) v = Timestamp(v).as_unit('ns') if v.tz is not None: v = v.tz_convert('UTC') return TermValue(v, v._value, kind) elif kind in ('timedelta64', 'timedelta'): if isinstance(v, str): v = Timedelta(v) else: v = Timedelta(v, unit='s') v = v.as_unit('ns')._value return TermValue(int(v), v, kind) elif meta == 'category': metadata = extract_array(self.metadata, extract_numpy=True) result: npt.NDArray[np.intp] | np.intp | int if v not in metadata: result = -1 else: result = metadata.searchsorted(v, side='left') return TermValue(result, result, 'integer') elif kind == 'integer': try: v_dec = Decimal(v) except InvalidOperation: float(v) else: v = int(v_dec.to_integral_exact(rounding='ROUND_HALF_EVEN')) return TermValue(v, v, kind) elif kind == 'float': v = float(v) return TermValue(v, v, kind) elif kind == 'bool': if isinstance(v, str): v = v.strip().lower() not in ['false', 'f', 'no', 'n', 'none', '0', '[]', '{}', ''] else: v = bool(v) return TermValue(v, v, kind) elif isinstance(v, str): return TermValue(v, stringify(v), 'string') else: raise TypeError(f'Cannot compare {v} of type {type(v)} to {kind} column') def convert_values(self) -> None: pass class FilterBinOp(BinOp): filter: tuple[Any, Any, Index] | None = None def __repr__(self) -> str: if self.filter is None: return 'Filter: Not Initialized' return pprint_thing(f'[Filter : [{self.filter[0]}] -> [{self.filter[1]}]') def invert(self) -> Self: if self.filter is not None: self.filter = (self.filter[0], self.generate_filter_op(invert=True), self.filter[2]) return self def format(self): return [self.filter] def evaluate(self) -> Self | None: if not self.is_valid: raise ValueError(f'query term is not valid [{self}]') rhs = self.conform(self.rhs) values = list(rhs) if self.is_in_table: if self.op in ['==', '!='] and len(values) > self._max_selectors: filter_op = self.generate_filter_op() self.filter = (self.lhs, filter_op, Index(values)) return self return None if self.op in ['==', '!=']: filter_op = self.generate_filter_op() self.filter = (self.lhs, filter_op, Index(values)) else: raise TypeError(f'passing a filterable condition to a non-table indexer [{self}]') return self def generate_filter_op(self, invert: bool=False): if self.op == '!=' and (not invert) or (self.op == '==' and invert): return lambda axis, vals: ~axis.isin(vals) else: return lambda axis, vals: axis.isin(vals) class JointFilterBinOp(FilterBinOp): def format(self): raise NotImplementedError('unable to collapse Joint Filters') def evaluate(self) -> Self: return self class ConditionBinOp(BinOp): def __repr__(self) -> str: return pprint_thing(f'[Condition : [{self.condition}]]') def invert(self): raise NotImplementedError('cannot use an invert condition when passing to numexpr') def format(self): return self.condition def evaluate(self) -> Self | None: if not self.is_valid: raise ValueError(f'query term is not valid [{self}]') if not self.is_in_table: return None rhs = self.conform(self.rhs) values = [self.convert_value(v) for v in rhs] if self.op in ['==', '!=']: if len(values) <= self._max_selectors: vs = [self.generate(v) for v in values] self.condition = f"({' | '.join(vs)})" else: return None else: self.condition = self.generate(values[0]) return self class JointConditionBinOp(ConditionBinOp): def evaluate(self) -> Self: self.condition = f'({self.lhs.condition} {self.op} {self.rhs.condition})' return self class UnaryOp(ops.UnaryOp): def prune(self, klass): if self.op != '~': raise NotImplementedError('UnaryOp only support invert type ops') operand = self.operand operand = operand.prune(klass) if operand is not None and (issubclass(klass, ConditionBinOp) and operand.condition is not None or (not issubclass(klass, ConditionBinOp) and issubclass(klass, FilterBinOp) and (operand.filter is not None))): return operand.invert() return None class PyTablesExprVisitor(BaseExprVisitor): const_type: ClassVar[type[ops.Term]] = Constant term_type: ClassVar[type[Term]] = Term def __init__(self, env, engine, parser, **kwargs) -> None: super().__init__(env, engine, parser) for bin_op in self.binary_ops: bin_node = self.binary_op_nodes_map[bin_op] setattr(self, f'visit_{bin_node}', lambda node, bin_op=bin_op: partial(BinOp, bin_op, **kwargs)) def visit_UnaryOp(self, node, **kwargs) -> ops.Term | UnaryOp | None: if isinstance(node.op, (ast.Not, ast.Invert)): return UnaryOp('~', self.visit(node.operand)) elif isinstance(node.op, ast.USub): return self.const_type(-self.visit(node.operand).value, self.env) elif isinstance(node.op, ast.UAdd): raise NotImplementedError('Unary addition not supported') return None def visit_Index(self, node, **kwargs): return self.visit(node.value).value def visit_Assign(self, node, **kwargs): cmpr = ast.Compare(ops=[ast.Eq()], left=node.targets[0], comparators=[node.value]) return self.visit(cmpr) def visit_Subscript(self, node, **kwargs) -> ops.Term: value = self.visit(node.value) slobj = self.visit(node.slice) try: value = value.value except AttributeError: pass if isinstance(slobj, Term): slobj = slobj.value try: return self.const_type(value[slobj], self.env) except TypeError as err: raise ValueError(f'cannot subscript {value!r} with {slobj!r}') from err def visit_Attribute(self, node, **kwargs): attr = node.attr value = node.value ctx = type(node.ctx) if ctx == ast.Load: resolved = self.visit(value) try: resolved = resolved.value except AttributeError: pass try: return self.term_type(getattr(resolved, attr), self.env) except AttributeError: if isinstance(value, ast.Name) and value.id == attr: return resolved raise ValueError(f'Invalid Attribute context {ctx.__name__}') def translate_In(self, op): return ast.Eq() if isinstance(op, ast.In) else op def _rewrite_membership_op(self, node, left, right): return (self.visit(node.op), node.op, left, right) def _validate_where(w): if not (isinstance(w, (PyTablesExpr, str)) or is_list_like(w)): raise TypeError('where must be passed as a string, PyTablesExpr, or list-like of PyTablesExpr') return w class PyTablesExpr(expr.Expr): _visitor: PyTablesExprVisitor | None env: PyTablesScope expr: str def __init__(self, where, queryables: dict[str, Any] | None=None, encoding=None, scope_level: int=0) -> None: where = _validate_where(where) self.encoding = encoding self.condition = None self.filter = None self.terms = None self._visitor = None local_dict: _scope.DeepChainMap[Any, Any] | None = None if isinstance(where, PyTablesExpr): local_dict = where.env.scope _where = where.expr elif is_list_like(where): where = list(where) for (idx, w) in enumerate(where): if isinstance(w, PyTablesExpr): local_dict = w.env.scope else: where[idx] = _validate_where(w) _where = ' & '.join([f'({w})' for w in com.flatten(where)]) else: _where = where self.expr = _where self.env = PyTablesScope(scope_level + 1, local_dict=local_dict) if queryables is not None and isinstance(self.expr, str): self.env.queryables.update(queryables) self._visitor = PyTablesExprVisitor(self.env, queryables=queryables, parser='pytables', engine='pytables', encoding=encoding) self.terms = self.parse() def __repr__(self) -> str: if self.terms is not None: return pprint_thing(self.terms) return pprint_thing(self.expr) def evaluate(self): try: self.condition = self.terms.prune(ConditionBinOp) except AttributeError as err: raise ValueError(f'cannot process expression [{self.expr}], [{self}] is not a valid condition') from err try: self.filter = self.terms.prune(FilterBinOp) except AttributeError as err: raise ValueError(f'cannot process expression [{self.expr}], [{self}] is not a valid filter') from err return (self.condition, self.filter) class TermValue: def __init__(self, value, converted, kind: str) -> None: assert isinstance(kind, str), kind self.value = value self.converted = converted self.kind = kind def tostring(self, encoding) -> str: if self.kind == 'string': if encoding is not None: return str(self.converted) return f'"{self.converted}"' elif self.kind == 'float': return repr(self.converted) return str(self.converted) def maybe_expression(s) -> bool: if not isinstance(s, str): return False operations = PyTablesExprVisitor.binary_ops + PyTablesExprVisitor.unary_ops + ('=',) return any((op in s for op in operations)) # File: pandas-main/pandas/core/computation/scope.py """""" from __future__ import annotations from collections import ChainMap import datetime import inspect from io import StringIO import itertools import pprint import struct import sys from typing import TypeVar import numpy as np from pandas._libs.tslibs import Timestamp from pandas.errors import UndefinedVariableError _KT = TypeVar('_KT') _VT = TypeVar('_VT') class DeepChainMap(ChainMap[_KT, _VT]): def __setitem__(self, key: _KT, value: _VT) -> None: for mapping in self.maps: if key in mapping: mapping[key] = value return self.maps[0][key] = value def __delitem__(self, key: _KT) -> None: for mapping in self.maps: if key in mapping: del mapping[key] return raise KeyError(key) def ensure_scope(level: int, global_dict=None, local_dict=None, resolvers=(), target=None) -> Scope: return Scope(level + 1, global_dict=global_dict, local_dict=local_dict, resolvers=resolvers, target=target) def _replacer(x) -> str: try: hexin = ord(x) except TypeError: hexin = x return hex(hexin) def _raw_hex_id(obj) -> str: packed = struct.pack('@P', id(obj)) return ''.join([_replacer(x) for x in packed]) DEFAULT_GLOBALS = {'Timestamp': Timestamp, 'datetime': datetime.datetime, 'True': True, 'False': False, 'list': list, 'tuple': tuple, 'inf': np.inf, 'Inf': np.inf} def _get_pretty_string(obj) -> str: sio = StringIO() pprint.pprint(obj, stream=sio) return sio.getvalue() class Scope: __slots__ = ['level', 'scope', 'target', 'resolvers', 'temps'] level: int scope: DeepChainMap resolvers: DeepChainMap temps: dict def __init__(self, level: int, global_dict=None, local_dict=None, resolvers=(), target=None) -> None: self.level = level + 1 self.scope = DeepChainMap(DEFAULT_GLOBALS.copy()) self.target = target if isinstance(local_dict, Scope): self.scope.update(local_dict.scope) if local_dict.target is not None: self.target = local_dict.target self._update(local_dict.level) frame = sys._getframe(self.level) try: scope_global = self.scope.new_child((global_dict if global_dict is not None else frame.f_globals).copy()) self.scope = DeepChainMap(scope_global) if not isinstance(local_dict, Scope): scope_local = self.scope.new_child((local_dict if local_dict is not None else frame.f_locals).copy()) self.scope = DeepChainMap(scope_local) finally: del frame if isinstance(local_dict, Scope): resolvers += tuple(local_dict.resolvers.maps) self.resolvers = DeepChainMap(*resolvers) self.temps = {} def __repr__(self) -> str: scope_keys = _get_pretty_string(list(self.scope.keys())) res_keys = _get_pretty_string(list(self.resolvers.keys())) return f'{type(self).__name__}(scope={scope_keys}, resolvers={res_keys})' @property def has_resolvers(self) -> bool: return bool(len(self.resolvers)) def resolve(self, key: str, is_local: bool): try: if is_local: return self.scope[key] if self.has_resolvers: return self.resolvers[key] assert not is_local and (not self.has_resolvers) return self.scope[key] except KeyError: try: return self.temps[key] except KeyError as err: raise UndefinedVariableError(key, is_local) from err def swapkey(self, old_key: str, new_key: str, new_value=None) -> None: if self.has_resolvers: maps = self.resolvers.maps + self.scope.maps else: maps = self.scope.maps maps.append(self.temps) for mapping in maps: if old_key in mapping: mapping[new_key] = new_value return def _get_vars(self, stack, scopes: list[str]) -> None: variables = itertools.product(scopes, stack) for (scope, (frame, _, _, _, _, _)) in variables: try: d = getattr(frame, f'f_{scope}') self.scope = DeepChainMap(self.scope.new_child(d)) finally: del frame def _update(self, level: int) -> None: sl = level + 1 stack = inspect.stack() try: self._get_vars(stack[:sl], scopes=['locals']) finally: del stack[:], stack def add_tmp(self, value) -> str: name = f'{type(value).__name__}_{self.ntemps}_{_raw_hex_id(self)}' assert name not in self.temps self.temps[name] = value assert name in self.temps return name @property def ntemps(self) -> int: return len(self.temps) @property def full_scope(self) -> DeepChainMap: maps = [self.temps] + self.resolvers.maps + self.scope.maps return DeepChainMap(*maps) # File: pandas-main/pandas/core/config_init.py """""" from __future__ import annotations from collections.abc import Callable import os from typing import Any import pandas._config.config as cf from pandas._config.config import is_bool, is_callable, is_instance_factory, is_int, is_nonnegative_int, is_one_of_factory, is_str, is_text use_bottleneck_doc = '\n: bool\n Use the bottleneck library to accelerate if it is installed,\n the default is True\n Valid values: False,True\n' def use_bottleneck_cb(key: str) -> None: from pandas.core import nanops nanops.set_use_bottleneck(cf.get_option(key)) use_numexpr_doc = '\n: bool\n Use the numexpr library to accelerate computation if it is installed,\n the default is True\n Valid values: False,True\n' def use_numexpr_cb(key: str) -> None: from pandas.core.computation import expressions expressions.set_use_numexpr(cf.get_option(key)) use_numba_doc = '\n: bool\n Use the numba engine option for select operations if it is installed,\n the default is False\n Valid values: False,True\n' def use_numba_cb(key: str) -> None: from pandas.core.util import numba_ numba_.set_use_numba(cf.get_option(key)) with cf.config_prefix('compute'): cf.register_option('use_bottleneck', True, use_bottleneck_doc, validator=is_bool, cb=use_bottleneck_cb) cf.register_option('use_numexpr', True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb) cf.register_option('use_numba', False, use_numba_doc, validator=is_bool, cb=use_numba_cb) pc_precision_doc = '\n: int\n Floating point output precision in terms of number of places after the\n decimal, for regular formatting as well as scientific notation. Similar\n to ``precision`` in :meth:`numpy.set_printoptions`.\n' pc_max_rows_doc = "\n: int\n If max_rows is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 and pandas will auto-detect\n the height of the terminal and print a truncated object which fits\n the screen height. The IPython notebook, IPython qtconsole, or\n IDLE do not run in a terminal and hence it is not possible to do\n correct auto-detection.\n" pc_min_rows_doc = '\n: int\n The numbers of rows to show in a truncated view (when `max_rows` is\n exceeded). Ignored when `max_rows` is set to None or 0. When set to\n None, follows the value of `max_rows`.\n' pc_max_cols_doc = "\n: int\n If max_cols is exceeded, switch to truncate view. Depending on\n `large_repr`, objects are either centrally truncated or printed as\n a summary view. 'None' value means unlimited.\n\n In case python/IPython is running in a terminal and `large_repr`\n equals 'truncate' this can be set to 0 or None and pandas will auto-detect\n the width of the terminal and print a truncated object which fits\n the screen width. The IPython notebook, IPython qtconsole, or IDLE\n do not run in a terminal and hence it is not possible to do\n correct auto-detection and defaults to 20.\n" pc_max_categories_doc = '\n: int\n This sets the maximum number of categories pandas should output when\n printing out a `Categorical` or a Series of dtype "category".\n' pc_max_info_cols_doc = '\n: int\n max_info_columns is used in DataFrame.info method to decide if\n per column information will be printed.\n' pc_nb_repr_h_doc = '\n: boolean\n When True, IPython notebook will use html representation for\n pandas objects (if it is available).\n' pc_pprint_nest_depth = '\n: int\n Controls the number of nested levels to process when pretty-printing\n' pc_multi_sparse_doc = '\n: boolean\n "sparsify" MultiIndex display (don\'t display repeated\n elements in outer levels within groups)\n' float_format_doc = '\n: callable\n The callable should accept a floating point number and return\n a string with the desired format of the number. This is used\n in some places like SeriesFormatter.\n See formats.format.EngFormatter for an example.\n' max_colwidth_doc = '\n: int or None\n The maximum width in characters of a column in the repr of\n a pandas data structure. When the column overflows, a "..."\n placeholder is embedded in the output. A \'None\' value means unlimited.\n' colheader_justify_doc = "\n: 'left'/'right'\n Controls the justification of column headers. used by DataFrameFormatter.\n" pc_expand_repr_doc = '\n: boolean\n Whether to print out the full DataFrame repr for wide DataFrames across\n multiple lines, `max_columns` is still respected, but the output will\n wrap-around across multiple "pages" if its width exceeds `display.width`.\n' pc_show_dimensions_doc = "\n: boolean or 'truncate'\n Whether to print out dimensions at the end of DataFrame repr.\n If 'truncate' is specified, only print out the dimensions if the\n frame is truncated (e.g. not display all rows and/or columns)\n" pc_east_asian_width_doc = '\n: boolean\n Whether to use the Unicode East Asian Width to calculate the display text\n width.\n Enabling this may affect to the performance (default: False)\n' pc_table_schema_doc = '\n: boolean\n Whether to publish a Table Schema representation for frontends\n that support it.\n (default: False)\n' pc_html_border_doc = '\n: int\n A ``border=value`` attribute is inserted in the ```` tag\n for the DataFrame HTML repr.\n' pc_html_use_mathjax_doc = ': boolean\n When True, Jupyter notebook will process table contents using MathJax,\n rendering mathematical expressions enclosed by the dollar symbol.\n (default: True)\n' pc_max_dir_items = ": int\n The number of items that will be added to `dir(...)`. 'None' value means\n unlimited. Because dir is cached, changing this option will not immediately\n affect already existing dataframes until a column is deleted or added.\n\n This is for instance used to suggest columns from a dataframe to tab\n completion.\n" pc_width_doc = '\n: int\n Width of the display in characters. In case python/IPython is running in\n a terminal this can be set to None and pandas will correctly auto-detect\n the width.\n Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a\n terminal and hence it is not possible to correctly detect the width.\n' pc_chop_threshold_doc = '\n: float or None\n if set to a float value, all float values smaller than the given threshold\n will be displayed as exactly 0 by repr and friends.\n' pc_max_seq_items = '\n: int or None\n When pretty-printing a long sequence, no more then `max_seq_items`\n will be printed. If items are omitted, they will be denoted by the\n addition of "..." to the resulting string.\n\n If set to None, the number of items to be printed is unlimited.\n' pc_max_info_rows_doc = '\n: int\n df.info() will usually show null-counts for each column.\n For large frames this can be quite slow. max_info_rows and max_info_cols\n limit this null check only to frames with smaller dimensions than\n specified.\n' pc_large_repr_doc = "\n: 'truncate'/'info'\n For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can\n show a truncated table, or switch to the view from\n df.info() (the behaviour in earlier versions of pandas).\n" pc_memory_usage_doc = "\n: bool, string or None\n This specifies if the memory usage of a DataFrame should be displayed when\n df.info() is called. Valid values True,False,'deep'\n" def table_schema_cb(key: str) -> None: from pandas.io.formats.printing import enable_data_resource_formatter enable_data_resource_formatter(cf.get_option(key)) def is_terminal() -> bool: try: ip = get_ipython() except NameError: return True else: if hasattr(ip, 'kernel'): return False else: return True with cf.config_prefix('display'): cf.register_option('precision', 6, pc_precision_doc, validator=is_nonnegative_int) cf.register_option('float_format', None, float_format_doc, validator=is_one_of_factory([None, is_callable])) cf.register_option('max_info_rows', 1690785, pc_max_info_rows_doc, validator=is_int) cf.register_option('max_rows', 60, pc_max_rows_doc, validator=is_nonnegative_int) cf.register_option('min_rows', 10, pc_min_rows_doc, validator=is_instance_factory((type(None), int))) cf.register_option('max_categories', 8, pc_max_categories_doc, validator=is_int) cf.register_option('max_colwidth', 50, max_colwidth_doc, validator=is_nonnegative_int) if is_terminal(): max_cols = 0 else: max_cols = 20 cf.register_option('max_columns', max_cols, pc_max_cols_doc, validator=is_nonnegative_int) cf.register_option('large_repr', 'truncate', pc_large_repr_doc, validator=is_one_of_factory(['truncate', 'info'])) cf.register_option('max_info_columns', 100, pc_max_info_cols_doc, validator=is_int) cf.register_option('colheader_justify', 'right', colheader_justify_doc, validator=is_text) cf.register_option('notebook_repr_html', True, pc_nb_repr_h_doc, validator=is_bool) cf.register_option('pprint_nest_depth', 3, pc_pprint_nest_depth, validator=is_int) cf.register_option('multi_sparse', True, pc_multi_sparse_doc, validator=is_bool) cf.register_option('expand_frame_repr', True, pc_expand_repr_doc) cf.register_option('show_dimensions', 'truncate', pc_show_dimensions_doc, validator=is_one_of_factory([True, False, 'truncate'])) cf.register_option('chop_threshold', None, pc_chop_threshold_doc) cf.register_option('max_seq_items', 100, pc_max_seq_items) cf.register_option('width', 80, pc_width_doc, validator=is_instance_factory((type(None), int))) cf.register_option('memory_usage', True, pc_memory_usage_doc, validator=is_one_of_factory([None, True, False, 'deep'])) cf.register_option('unicode.east_asian_width', False, pc_east_asian_width_doc, validator=is_bool) cf.register_option('unicode.ambiguous_as_wide', False, pc_east_asian_width_doc, validator=is_bool) cf.register_option('html.table_schema', False, pc_table_schema_doc, validator=is_bool, cb=table_schema_cb) cf.register_option('html.border', 1, pc_html_border_doc, validator=is_int) cf.register_option('html.use_mathjax', True, pc_html_use_mathjax_doc, validator=is_bool) cf.register_option('max_dir_items', 100, pc_max_dir_items, validator=is_nonnegative_int) tc_sim_interactive_doc = '\n: boolean\n Whether to simulate interactive mode for purposes of testing\n' with cf.config_prefix('mode'): cf.register_option('sim_interactive', False, tc_sim_interactive_doc) copy_on_write_doc = '\n: bool\n Use new copy-view behaviour using Copy-on-Write. Defaults to False,\n unless overridden by the \'PANDAS_COPY_ON_WRITE\' environment variable\n (if set to "1" for True, needs to be set before pandas is imported).\n' with cf.config_prefix('mode'): cf.register_option('copy_on_write', 'warn' if os.environ.get('PANDAS_COPY_ON_WRITE', '0') == 'warn' else os.environ.get('PANDAS_COPY_ON_WRITE', '0') == '1', copy_on_write_doc, validator=is_one_of_factory([True, False, 'warn'])) chained_assignment = '\n: string\n Raise an exception, warn, or no action if trying to use chained assignment,\n The default is warn\n' with cf.config_prefix('mode'): cf.register_option('chained_assignment', 'warn', chained_assignment, validator=is_one_of_factory([None, 'warn', 'raise'])) performance_warnings = '\n: boolean\n Whether to show or hide PerformanceWarnings.\n' with cf.config_prefix('mode'): cf.register_option('performance_warnings', True, performance_warnings, validator=is_bool) string_storage_doc = '\n: string\n The default storage for StringDtype.\n' def is_valid_string_storage(value: Any) -> None: legal_values = ['auto', 'python', 'pyarrow'] if value not in legal_values: msg = 'Value must be one of python|pyarrow' if value == 'pyarrow_numpy': msg += ". 'pyarrow_numpy' was specified, but this option should be enabled using pandas.options.future.infer_string instead" raise ValueError(msg) with cf.config_prefix('mode'): cf.register_option('string_storage', 'auto', string_storage_doc, validator=is_valid_string_storage) reader_engine_doc = "\n: string\n The default Excel reader engine for '{ext}' files. Available options:\n auto, {others}.\n" _xls_options = ['xlrd', 'calamine'] _xlsm_options = ['xlrd', 'openpyxl', 'calamine'] _xlsx_options = ['xlrd', 'openpyxl', 'calamine'] _ods_options = ['odf', 'calamine'] _xlsb_options = ['pyxlsb', 'calamine'] with cf.config_prefix('io.excel.xls'): cf.register_option('reader', 'auto', reader_engine_doc.format(ext='xls', others=', '.join(_xls_options)), validator=is_one_of_factory(_xls_options + ['auto'])) with cf.config_prefix('io.excel.xlsm'): cf.register_option('reader', 'auto', reader_engine_doc.format(ext='xlsm', others=', '.join(_xlsm_options)), validator=is_one_of_factory(_xlsm_options + ['auto'])) with cf.config_prefix('io.excel.xlsx'): cf.register_option('reader', 'auto', reader_engine_doc.format(ext='xlsx', others=', '.join(_xlsx_options)), validator=is_one_of_factory(_xlsx_options + ['auto'])) with cf.config_prefix('io.excel.ods'): cf.register_option('reader', 'auto', reader_engine_doc.format(ext='ods', others=', '.join(_ods_options)), validator=is_one_of_factory(_ods_options + ['auto'])) with cf.config_prefix('io.excel.xlsb'): cf.register_option('reader', 'auto', reader_engine_doc.format(ext='xlsb', others=', '.join(_xlsb_options)), validator=is_one_of_factory(_xlsb_options + ['auto'])) writer_engine_doc = "\n: string\n The default Excel writer engine for '{ext}' files. Available options:\n auto, {others}.\n" _xlsm_options = ['openpyxl'] _xlsx_options = ['openpyxl', 'xlsxwriter'] _ods_options = ['odf'] with cf.config_prefix('io.excel.xlsm'): cf.register_option('writer', 'auto', writer_engine_doc.format(ext='xlsm', others=', '.join(_xlsm_options)), validator=str) with cf.config_prefix('io.excel.xlsx'): cf.register_option('writer', 'auto', writer_engine_doc.format(ext='xlsx', others=', '.join(_xlsx_options)), validator=str) with cf.config_prefix('io.excel.ods'): cf.register_option('writer', 'auto', writer_engine_doc.format(ext='ods', others=', '.join(_ods_options)), validator=str) parquet_engine_doc = "\n: string\n The default parquet reader/writer engine. Available options:\n 'auto', 'pyarrow', 'fastparquet', the default is 'auto'\n" with cf.config_prefix('io.parquet'): cf.register_option('engine', 'auto', parquet_engine_doc, validator=is_one_of_factory(['auto', 'pyarrow', 'fastparquet'])) sql_engine_doc = "\n: string\n The default sql reader/writer engine. Available options:\n 'auto', 'sqlalchemy', the default is 'auto'\n" with cf.config_prefix('io.sql'): cf.register_option('engine', 'auto', sql_engine_doc, validator=is_one_of_factory(['auto', 'sqlalchemy'])) plotting_backend_doc = '\n: str\n The plotting backend to use. The default value is "matplotlib", the\n backend provided with pandas. Other backends can be specified by\n providing the name of the module that implements the backend.\n' def register_plotting_backend_cb(key: str | None) -> None: if key == 'matplotlib': return from pandas.plotting._core import _get_plot_backend _get_plot_backend(key) with cf.config_prefix('plotting'): cf.register_option('backend', defval='matplotlib', doc=plotting_backend_doc, validator=register_plotting_backend_cb) register_converter_doc = "\n: bool or 'auto'.\n Whether to register converters with matplotlib's units registry for\n dates, times, datetimes, and Periods. Toggling to False will remove\n the converters, restoring any converters that pandas overwrote.\n" def register_converter_cb(key: str) -> None: from pandas.plotting import deregister_matplotlib_converters, register_matplotlib_converters if cf.get_option(key): register_matplotlib_converters() else: deregister_matplotlib_converters() with cf.config_prefix('plotting.matplotlib'): cf.register_option('register_converters', 'auto', register_converter_doc, validator=is_one_of_factory(['auto', True, False]), cb=register_converter_cb) styler_sparse_index_doc = '\n: bool\n Whether to sparsify the display of a hierarchical index. Setting to False will\n display each explicit level element in a hierarchical key for each row.\n' styler_sparse_columns_doc = '\n: bool\n Whether to sparsify the display of hierarchical columns. Setting to False will\n display each explicit level element in a hierarchical key for each column.\n' styler_render_repr = '\n: str\n Determine which output to use in Jupyter Notebook in {"html", "latex"}.\n' styler_max_elements = '\n: int\n The maximum number of data-cell (', indent) else: self.write(f'', indent) indent += indent_delta for (i, s) in enumerate(line): val_tag = tags.get(i, None) if header or (self.bold_rows and i < nindex_levels): self.write_th(s, indent=indent, header=header, tags=val_tag) else: self.write_td(s, indent, tags=val_tag) indent -= indent_delta self.write('', indent) def _write_table(self, indent: int=0) -> None: _classes = ['dataframe'] use_mathjax = get_option('display.html.use_mathjax') if not use_mathjax: _classes.append('tex2jax_ignore') if self.classes is not None: if isinstance(self.classes, str): self.classes = self.classes.split() if not isinstance(self.classes, (list, tuple)): raise TypeError(f'classes must be a string, list, or tuple, not {type(self.classes)}') _classes.extend(self.classes) if self.table_id is None: id_section = '' else: id_section = f' id="{self.table_id}"' if self.border is None: border_attr = '' else: border_attr = f' border="{self.border}"' self.write(f'''''', indent) if self.fmt.header or self.show_row_idx_names: self._write_header(indent + self.indent_delta) self._write_body(indent + self.indent_delta) self.write('
) elements that will be rendered before\n trimming will occur over columns, rows or both if needed.\n' styler_max_rows = '\n: int, optional\n The maximum number of rows that will be rendered. May still be reduced to\n satisfy ``max_elements``, which takes precedence.\n' styler_max_columns = '\n: int, optional\n The maximum number of columns that will be rendered. May still be reduced to\n satisfy ``max_elements``, which takes precedence.\n' styler_precision = '\n: int\n The precision for floats and complex numbers.\n' styler_decimal = '\n: str\n The character representation for the decimal separator for floats and complex.\n' styler_thousands = '\n: str, optional\n The character representation for thousands separator for floats, int and complex.\n' styler_na_rep = '\n: str, optional\n The string representation for values identified as missing.\n' styler_escape = '\n: str, optional\n Whether to escape certain characters according to the given context; html or latex.\n' styler_formatter = '\n: str, callable, dict, optional\n A formatter object to be used as default within ``Styler.format``.\n' styler_multirow_align = '\n: {"c", "t", "b"}\n The specifier for vertical alignment of sparsified LaTeX multirows.\n' styler_multicol_align = '\n: {"r", "c", "l", "naive-l", "naive-r"}\n The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe\n decorators can also be added to non-naive values to draw vertical\n rules, e.g. "\\|r" will draw a rule on the left side of right aligned merged cells.\n' styler_hrules = '\n: bool\n Whether to add horizontal rules on top and bottom and below the headers.\n' styler_environment = '\n: str\n The environment to replace ``\\begin{table}``. If "longtable" is used results\n in a specific longtable environment format.\n' styler_encoding = '\n: str\n The encoding used for output HTML and LaTeX files.\n' styler_mathjax = '\n: bool\n If False will render special CSS classes to table attributes that indicate Mathjax\n will not be used in Jupyter Notebook.\n' with cf.config_prefix('styler'): cf.register_option('sparse.index', True, styler_sparse_index_doc, validator=is_bool) cf.register_option('sparse.columns', True, styler_sparse_columns_doc, validator=is_bool) cf.register_option('render.repr', 'html', styler_render_repr, validator=is_one_of_factory(['html', 'latex'])) cf.register_option('render.max_elements', 2 ** 18, styler_max_elements, validator=is_nonnegative_int) cf.register_option('render.max_rows', None, styler_max_rows, validator=is_nonnegative_int) cf.register_option('render.max_columns', None, styler_max_columns, validator=is_nonnegative_int) cf.register_option('render.encoding', 'utf-8', styler_encoding, validator=is_str) cf.register_option('format.decimal', '.', styler_decimal, validator=is_str) cf.register_option('format.precision', 6, styler_precision, validator=is_nonnegative_int) cf.register_option('format.thousands', None, styler_thousands, validator=is_instance_factory((type(None), str))) cf.register_option('format.na_rep', None, styler_na_rep, validator=is_instance_factory((type(None), str))) cf.register_option('format.escape', None, styler_escape, validator=is_one_of_factory([None, 'html', 'latex', 'latex-math'])) cf.register_option('format.formatter', None, styler_formatter, validator=is_instance_factory((type(None), dict, Callable, str))) cf.register_option('html.mathjax', True, styler_mathjax, validator=is_bool) cf.register_option('latex.multirow_align', 'c', styler_multirow_align, validator=is_one_of_factory(['c', 't', 'b', 'naive'])) val_mca = ['r', '|r|', '|r', 'r|', 'c', '|c|', '|c', 'c|', 'l', '|l|', '|l', 'l|'] val_mca += ['naive-l', 'naive-r'] cf.register_option('latex.multicol_align', 'r', styler_multicol_align, validator=is_one_of_factory(val_mca)) cf.register_option('latex.hrules', False, styler_hrules, validator=is_bool) cf.register_option('latex.environment', None, styler_environment, validator=is_instance_factory((type(None), str))) with cf.config_prefix('future'): cf.register_option('infer_string', True if os.environ.get('PANDAS_FUTURE_INFER_STRING', '0') == '1' else False, 'Whether to infer sequence of str objects as pyarrow string dtype, which will be the default in pandas 3.0 (at which point this option will be deprecated).', validator=is_one_of_factory([True, False])) cf.register_option('no_silent_downcasting', False, 'Whether to opt-in to the future behavior which will *not* silently downcast results from Series and DataFrame `where`, `mask`, and `clip` methods. Silent downcasting will be removed in pandas 3.0 (at which point this option will be deprecated).', validator=is_one_of_factory([True, False])) # File: pandas-main/pandas/core/construction.py """""" from __future__ import annotations from typing import TYPE_CHECKING, cast, overload import numpy as np from numpy import ma from pandas._config import using_string_dtype from pandas._libs import lib from pandas._libs.tslibs import get_supported_dtype, is_supported_dtype from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar, construct_1d_object_array_from_listlike, maybe_cast_to_datetime, maybe_cast_to_integer_array, maybe_convert_platform, maybe_infer_to_datetimelike, maybe_promote from pandas.core.dtypes.common import ensure_object, is_list_like, is_object_dtype, pandas_dtype from pandas.core.dtypes.dtypes import NumpyEADtype from pandas.core.dtypes.generic import ABCDataFrame, ABCExtensionArray, ABCIndex, ABCSeries from pandas.core.dtypes.missing import isna import pandas.core.common as com if TYPE_CHECKING: from collections.abc import Sequence from pandas._typing import AnyArrayLike, ArrayLike, Dtype, DtypeObj, T from pandas import Index, Series from pandas.core.arrays import DatetimeArray, ExtensionArray, TimedeltaArray def array(data: Sequence[object] | AnyArrayLike, dtype: Dtype | None=None, copy: bool=True) -> ExtensionArray: from pandas.core.arrays import BooleanArray, DatetimeArray, ExtensionArray, FloatingArray, IntegerArray, NumpyExtensionArray, TimedeltaArray from pandas.core.arrays.string_ import StringDtype if lib.is_scalar(data): msg = f"Cannot pass scalar '{data}' to 'pandas.array'." raise ValueError(msg) elif isinstance(data, ABCDataFrame): raise TypeError("Cannot pass DataFrame to 'pandas.array'") if dtype is None and isinstance(data, (ABCSeries, ABCIndex, ExtensionArray)): dtype = data.dtype data = extract_array(data, extract_numpy=True) if dtype is not None: dtype = pandas_dtype(dtype) if isinstance(data, ExtensionArray) and (dtype is None or data.dtype == dtype): if copy: return data.copy() return data if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) if dtype is None: was_ndarray = isinstance(data, np.ndarray) if not was_ndarray or data.dtype == object: result = lib.maybe_convert_objects(ensure_object(data), convert_non_numeric=True, convert_to_nullable_dtype=True, dtype_if_all_nat=None) result = ensure_wrapped_if_datetimelike(result) if isinstance(result, np.ndarray): if len(result) == 0 and (not was_ndarray): return FloatingArray._from_sequence(data, dtype='Float64') return NumpyExtensionArray._from_sequence(data, dtype=result.dtype, copy=copy) if result is data and copy: return result.copy() return result data = cast(np.ndarray, data) result = ensure_wrapped_if_datetimelike(data) if result is not data: result = cast('DatetimeArray | TimedeltaArray', result) if copy and result.dtype == data.dtype: return result.copy() return result if data.dtype.kind in 'SU': dtype = StringDtype() cls = dtype.construct_array_type() return cls._from_sequence(data, dtype=dtype, copy=copy) elif data.dtype.kind in 'iu': return IntegerArray._from_sequence(data, copy=copy) elif data.dtype.kind == 'f': if data.dtype == np.float16: return NumpyExtensionArray._from_sequence(data, dtype=data.dtype, copy=copy) return FloatingArray._from_sequence(data, copy=copy) elif data.dtype.kind == 'b': return BooleanArray._from_sequence(data, dtype='boolean', copy=copy) else: return NumpyExtensionArray._from_sequence(data, dtype=data.dtype, copy=copy) if lib.is_np_dtype(dtype, 'M') and is_supported_dtype(dtype): return DatetimeArray._from_sequence(data, dtype=dtype, copy=copy) if lib.is_np_dtype(dtype, 'm') and is_supported_dtype(dtype): return TimedeltaArray._from_sequence(data, dtype=dtype, copy=copy) elif lib.is_np_dtype(dtype, 'mM'): raise ValueError("datetime64 and timedelta64 dtype resolutions other than 's', 'ms', 'us', and 'ns' are no longer supported.") return NumpyExtensionArray._from_sequence(data, dtype=dtype, copy=copy) _typs = frozenset({'index', 'rangeindex', 'multiindex', 'datetimeindex', 'timedeltaindex', 'periodindex', 'categoricalindex', 'intervalindex', 'series'}) @overload def extract_array(obj: Series | Index, extract_numpy: bool=..., extract_range: bool=...) -> ArrayLike: ... @overload def extract_array(obj: T, extract_numpy: bool=..., extract_range: bool=...) -> T | ArrayLike: ... def extract_array(obj: T, extract_numpy: bool=False, extract_range: bool=False) -> T | ArrayLike: typ = getattr(obj, '_typ', None) if typ in _typs: if typ == 'rangeindex': if extract_range: return obj._values return obj return obj._values elif extract_numpy and typ == 'npy_extension': return obj.to_numpy() return obj def ensure_wrapped_if_datetimelike(arr): if isinstance(arr, np.ndarray): if arr.dtype.kind == 'M': from pandas.core.arrays import DatetimeArray dtype = get_supported_dtype(arr.dtype) return DatetimeArray._from_sequence(arr, dtype=dtype) elif arr.dtype.kind == 'm': from pandas.core.arrays import TimedeltaArray dtype = get_supported_dtype(arr.dtype) return TimedeltaArray._from_sequence(arr, dtype=dtype) return arr def sanitize_masked_array(data: ma.MaskedArray) -> np.ndarray: mask = ma.getmaskarray(data) if mask.any(): (dtype, fill_value) = maybe_promote(data.dtype, np.nan) dtype = cast(np.dtype, dtype) data = ma.asarray(data.astype(dtype, copy=True)) data.soften_mask() data[mask] = fill_value else: data = data.copy() return data def sanitize_array(data, index: Index | None, dtype: DtypeObj | None=None, copy: bool=False, *, allow_2d: bool=False) -> ArrayLike: original_dtype = dtype if isinstance(data, ma.MaskedArray): data = sanitize_masked_array(data) if isinstance(dtype, NumpyEADtype): dtype = dtype.numpy_dtype infer_object = not isinstance(data, (ABCIndex, ABCSeries)) data = extract_array(data, extract_numpy=True, extract_range=True) if isinstance(data, np.ndarray) and data.ndim == 0: if dtype is None: dtype = data.dtype data = lib.item_from_zerodim(data) elif isinstance(data, range): data = range_to_ndarray(data) copy = False if not is_list_like(data): if index is None: raise ValueError('index must be specified when data is not list-like') if isinstance(data, str) and using_string_dtype() and (original_dtype is None): from pandas.core.arrays.string_ import StringDtype dtype = StringDtype(na_value=np.nan) data = construct_1d_arraylike_from_scalar(data, len(index), dtype) return data elif isinstance(data, ABCExtensionArray): if dtype is not None: subarr = data.astype(dtype, copy=copy) elif copy: subarr = data.copy() else: subarr = data elif isinstance(dtype, ExtensionDtype): _sanitize_non_ordered(data) cls = dtype.construct_array_type() subarr = cls._from_sequence(data, dtype=dtype, copy=copy) elif isinstance(data, np.ndarray): if isinstance(data, np.matrix): data = data.A if dtype is None: subarr = data if data.dtype == object and infer_object: subarr = maybe_infer_to_datetimelike(data) elif data.dtype.kind == 'U' and using_string_dtype(): from pandas.core.arrays.string_ import StringDtype dtype = StringDtype(na_value=np.nan) subarr = dtype.construct_array_type()._from_sequence(data, dtype=dtype) if (subarr is data or (subarr.dtype == 'str' and subarr.dtype.storage == 'python')) and copy: subarr = subarr.copy() else: subarr = _try_cast(data, dtype, copy) elif hasattr(data, '__array__'): if not copy: data = np.asarray(data) else: data = np.array(data, copy=copy) return sanitize_array(data, index=index, dtype=dtype, copy=False, allow_2d=allow_2d) else: _sanitize_non_ordered(data) data = list(data) if len(data) == 0 and dtype is None: subarr = np.array([], dtype=np.float64) elif dtype is not None: subarr = _try_cast(data, dtype, copy) else: subarr = maybe_convert_platform(data) if subarr.dtype == object: subarr = cast(np.ndarray, subarr) subarr = maybe_infer_to_datetimelike(subarr) subarr = _sanitize_ndim(subarr, data, dtype, index, allow_2d=allow_2d) if isinstance(subarr, np.ndarray): dtype = cast(np.dtype, dtype) subarr = _sanitize_str_dtypes(subarr, data, dtype, copy) return subarr def range_to_ndarray(rng: range) -> np.ndarray: try: arr = np.arange(rng.start, rng.stop, rng.step, dtype='int64') except OverflowError: if rng.start >= 0 and rng.step > 0 or rng.step < 0 <= rng.stop: try: arr = np.arange(rng.start, rng.stop, rng.step, dtype='uint64') except OverflowError: arr = construct_1d_object_array_from_listlike(list(rng)) else: arr = construct_1d_object_array_from_listlike(list(rng)) return arr def _sanitize_non_ordered(data) -> None: if isinstance(data, (set, frozenset)): raise TypeError(f"'{type(data).__name__}' type is unordered") def _sanitize_ndim(result: ArrayLike, data, dtype: DtypeObj | None, index: Index | None, *, allow_2d: bool=False) -> ArrayLike: if getattr(result, 'ndim', 0) == 0: raise ValueError('result should be arraylike with ndim > 0') if result.ndim == 1: result = _maybe_repeat(result, index) elif result.ndim > 1: if isinstance(data, np.ndarray): if allow_2d: return result raise ValueError(f'Data must be 1-dimensional, got ndarray of shape {data.shape} instead') if is_object_dtype(dtype) and isinstance(dtype, ExtensionDtype): result = com.asarray_tuplesafe(data, dtype=np.dtype('object')) cls = dtype.construct_array_type() result = cls._from_sequence(result, dtype=dtype) else: result = com.asarray_tuplesafe(data, dtype=dtype) return result def _sanitize_str_dtypes(result: np.ndarray, data, dtype: np.dtype | None, copy: bool) -> np.ndarray: if issubclass(result.dtype.type, str): if not lib.is_scalar(data): if not np.all(isna(data)): data = np.asarray(data, dtype=dtype) if not copy: result = np.asarray(data, dtype=object) else: result = np.array(data, dtype=object, copy=copy) return result def _maybe_repeat(arr: ArrayLike, index: Index | None) -> ArrayLike: if index is not None: if 1 == len(arr) != len(index): arr = arr.repeat(len(index)) return arr def _try_cast(arr: list | np.ndarray, dtype: np.dtype, copy: bool) -> ArrayLike: is_ndarray = isinstance(arr, np.ndarray) if dtype == object: if not is_ndarray: subarr = construct_1d_object_array_from_listlike(arr) return subarr return ensure_wrapped_if_datetimelike(arr).astype(dtype, copy=copy) elif dtype.kind == 'U': if is_ndarray: arr = cast(np.ndarray, arr) shape = arr.shape if arr.ndim > 1: arr = arr.ravel() else: shape = (len(arr),) return lib.ensure_string_array(arr, convert_na_value=False, copy=copy).reshape(shape) elif dtype.kind in 'mM': return maybe_cast_to_datetime(arr, dtype) elif dtype.kind in 'iu': subarr = maybe_cast_to_integer_array(arr, dtype) elif not copy: subarr = np.asarray(arr, dtype=dtype) else: subarr = np.array(arr, dtype=dtype, copy=copy) return subarr # File: pandas-main/pandas/core/dtypes/api.py from pandas.core.dtypes.common import is_any_real_numeric_dtype, is_array_like, is_bool, is_bool_dtype, is_categorical_dtype, is_complex, is_complex_dtype, is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype, is_datetime64tz_dtype, is_dict_like, is_dtype_equal, is_extension_array_dtype, is_file_like, is_float, is_float_dtype, is_hashable, is_int64_dtype, is_integer, is_integer_dtype, is_interval_dtype, is_iterator, is_list_like, is_named_tuple, is_number, is_numeric_dtype, is_object_dtype, is_period_dtype, is_re, is_re_compilable, is_scalar, is_signed_integer_dtype, is_sparse, is_string_dtype, is_timedelta64_dtype, is_timedelta64_ns_dtype, is_unsigned_integer_dtype, pandas_dtype __all__ = ['is_any_real_numeric_dtype', 'is_array_like', 'is_bool', 'is_bool_dtype', 'is_categorical_dtype', 'is_complex', 'is_complex_dtype', 'is_datetime64_any_dtype', 'is_datetime64_dtype', 'is_datetime64_ns_dtype', 'is_datetime64tz_dtype', 'is_dict_like', 'is_dtype_equal', 'is_extension_array_dtype', 'is_file_like', 'is_float', 'is_float_dtype', 'is_hashable', 'is_int64_dtype', 'is_integer', 'is_integer_dtype', 'is_interval_dtype', 'is_iterator', 'is_list_like', 'is_named_tuple', 'is_number', 'is_numeric_dtype', 'is_object_dtype', 'is_period_dtype', 'is_re', 'is_re_compilable', 'is_scalar', 'is_signed_integer_dtype', 'is_sparse', 'is_string_dtype', 'is_timedelta64_dtype', 'is_timedelta64_ns_dtype', 'is_unsigned_integer_dtype', 'pandas_dtype'] # File: pandas-main/pandas/core/dtypes/astype.py """""" from __future__ import annotations import inspect from typing import TYPE_CHECKING, overload import warnings import numpy as np from pandas._libs import lib from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas.errors import IntCastingNaNError from pandas.core.dtypes.common import is_object_dtype, is_string_dtype, pandas_dtype from pandas.core.dtypes.dtypes import ExtensionDtype, NumpyEADtype if TYPE_CHECKING: from pandas._typing import ArrayLike, DtypeObj, IgnoreRaise from pandas.core.arrays import ExtensionArray @overload def _astype_nansafe(arr: np.ndarray, dtype: np.dtype, copy: bool=..., skipna: bool=...) -> np.ndarray: ... @overload def _astype_nansafe(arr: np.ndarray, dtype: ExtensionDtype, copy: bool=..., skipna: bool=...) -> ExtensionArray: ... def _astype_nansafe(arr: np.ndarray, dtype: DtypeObj, copy: bool=True, skipna: bool=False) -> ArrayLike: if isinstance(dtype, ExtensionDtype): return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy) elif not isinstance(dtype, np.dtype): raise ValueError('dtype must be np.dtype or ExtensionDtype') if arr.dtype.kind in 'mM': from pandas.core.construction import ensure_wrapped_if_datetimelike arr = ensure_wrapped_if_datetimelike(arr) res = arr.astype(dtype, copy=copy) return np.asarray(res) if issubclass(dtype.type, str): shape = arr.shape if arr.ndim > 1: arr = arr.ravel() return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False).reshape(shape) elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in 'iu': return _astype_float_to_int_nansafe(arr, dtype, copy) elif arr.dtype == object: if lib.is_np_dtype(dtype, 'M'): from pandas.core.arrays import DatetimeArray dta = DatetimeArray._from_sequence(arr, dtype=dtype) return dta._ndarray elif lib.is_np_dtype(dtype, 'm'): from pandas.core.construction import ensure_wrapped_if_datetimelike tdvals = array_to_timedelta64(arr).view('m8[ns]') tda = ensure_wrapped_if_datetimelike(tdvals) return tda.astype(dtype, copy=False)._ndarray if dtype.name in ('datetime64', 'timedelta64'): msg = f"The '{dtype.name}' dtype has no unit. Please pass in '{dtype.name}[ns]' instead." raise ValueError(msg) if copy or arr.dtype == object or dtype == object: return arr.astype(dtype, copy=True) return arr.astype(dtype, copy=copy) def _astype_float_to_int_nansafe(values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray: if not np.isfinite(values).all(): raise IntCastingNaNError('Cannot convert non-finite values (NA or inf) to integer') if dtype.kind == 'u': if not (values >= 0).all(): raise ValueError(f'Cannot losslessly cast from {values.dtype} to {dtype}') with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) return values.astype(dtype, copy=copy) def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool=False) -> ArrayLike: if values.dtype == dtype: if copy: return values.copy() return values if not isinstance(values, np.ndarray): values = values.astype(dtype, copy=copy) else: values = _astype_nansafe(values, dtype, copy=copy) if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): values = np.array(values, dtype=object) return values def astype_array_safe(values: ArrayLike, dtype, copy: bool=False, errors: IgnoreRaise='raise') -> ArrayLike: errors_legal_values = ('raise', 'ignore') if errors not in errors_legal_values: invalid_arg = f"Expected value of kwarg 'errors' to be one of {list(errors_legal_values)}. Supplied value is '{errors}'" raise ValueError(invalid_arg) if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): msg = f"Expected an instance of {dtype.__name__}, but got the class instead. Try instantiating 'dtype'." raise TypeError(msg) dtype = pandas_dtype(dtype) if isinstance(dtype, NumpyEADtype): dtype = dtype.numpy_dtype try: new_values = astype_array(values, dtype, copy=copy) except (ValueError, TypeError): if errors == 'ignore': new_values = values else: raise return new_values def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool: if dtype.kind in 'iufb' and dtype.kind == new_dtype.kind: if hasattr(dtype, 'itemsize') and hasattr(new_dtype, 'itemsize'): return dtype.itemsize == new_dtype.itemsize if isinstance(dtype, np.dtype) and (not isinstance(new_dtype, np.dtype)): (new_dtype, dtype) = (dtype, new_dtype) if dtype == new_dtype: return True elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype): return False elif is_string_dtype(dtype) and is_string_dtype(new_dtype): return True elif is_object_dtype(dtype) and new_dtype.kind == 'O': return True elif dtype.kind in 'mM' and new_dtype.kind in 'mM': dtype = getattr(dtype, 'numpy_dtype', dtype) new_dtype = getattr(new_dtype, 'numpy_dtype', new_dtype) return getattr(dtype, 'unit', None) == getattr(new_dtype, 'unit', None) numpy_dtype = getattr(dtype, 'numpy_dtype', None) new_numpy_dtype = getattr(new_dtype, 'numpy_dtype', None) if numpy_dtype is None and isinstance(dtype, np.dtype): numpy_dtype = dtype if new_numpy_dtype is None and isinstance(new_dtype, np.dtype): new_numpy_dtype = new_dtype if numpy_dtype is not None and new_numpy_dtype is not None: return numpy_dtype == new_numpy_dtype return True # File: pandas-main/pandas/core/dtypes/base.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any, TypeVar, cast, overload import numpy as np from pandas._libs import missing as libmissing from pandas._libs.hashtable import object_hash from pandas._libs.properties import cache_readonly from pandas.errors import AbstractMethodError from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCSeries if TYPE_CHECKING: from pandas._typing import DtypeObj, Self, Shape, npt, type_t from pandas import Index from pandas.core.arrays import ExtensionArray ExtensionDtypeT = TypeVar('ExtensionDtypeT', bound='ExtensionDtype') class ExtensionDtype: _metadata: tuple[str, ...] = () def __str__(self) -> str: return self.name def __eq__(self, other: object) -> bool: if isinstance(other, str): try: other = self.construct_from_string(other) except TypeError: return False if isinstance(other, type(self)): return all((getattr(self, attr) == getattr(other, attr) for attr in self._metadata)) return False def __hash__(self) -> int: return object_hash(tuple((getattr(self, attr) for attr in self._metadata))) def __ne__(self, other: object) -> bool: return not self.__eq__(other) @property def na_value(self) -> object: return np.nan @property def type(self) -> type_t[Any]: raise AbstractMethodError(self) @property def kind(self) -> str: return 'O' @property def name(self) -> str: raise AbstractMethodError(self) @property def names(self) -> list[str] | None: return None @classmethod def construct_array_type(cls) -> type_t[ExtensionArray]: raise AbstractMethodError(cls) def empty(self, shape: Shape) -> ExtensionArray: cls = self.construct_array_type() return cls._empty(shape, dtype=self) @classmethod def construct_from_string(cls, string: str) -> Self: if not isinstance(string, str): raise TypeError(f"'construct_from_string' expects a string, got {type(string)}") assert isinstance(cls.name, str), (cls, type(cls.name)) if string != cls.name: raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") return cls() @classmethod def is_dtype(cls, dtype: object) -> bool: dtype = getattr(dtype, 'dtype', dtype) if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)): return False elif dtype is None: return False elif isinstance(dtype, cls): return True if isinstance(dtype, str): try: return cls.construct_from_string(dtype) is not None except TypeError: return False return False @property def _is_numeric(self) -> bool: return False @property def _is_boolean(self) -> bool: return False def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if len(set(dtypes)) == 1: return self else: return None @property def _can_hold_na(self) -> bool: return True @property def _is_immutable(self) -> bool: return False @cache_readonly def index_class(self) -> type_t[Index]: from pandas import Index return Index @property def _supports_2d(self) -> bool: return False @property def _can_fast_transpose(self) -> bool: return False class StorageExtensionDtype(ExtensionDtype): name: str _metadata = ('storage',) def __init__(self, storage: str | None=None) -> None: self.storage = storage def __repr__(self) -> str: return f'{self.name}[{self.storage}]' def __str__(self) -> str: return self.name def __eq__(self, other: object) -> bool: if isinstance(other, str) and other == self.name: return True return super().__eq__(other) def __hash__(self) -> int: return super().__hash__() @property def na_value(self) -> libmissing.NAType: return libmissing.NA def register_extension_dtype(cls: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: _registry.register(cls) return cls class Registry: def __init__(self) -> None: self.dtypes: list[type_t[ExtensionDtype]] = [] def register(self, dtype: type_t[ExtensionDtype]) -> None: if not issubclass(dtype, ExtensionDtype): raise ValueError('can only register pandas extension dtypes') self.dtypes.append(dtype) @overload def find(self, dtype: type_t[ExtensionDtypeT]) -> type_t[ExtensionDtypeT]: ... @overload def find(self, dtype: ExtensionDtypeT) -> ExtensionDtypeT: ... @overload def find(self, dtype: str) -> ExtensionDtype | None: ... @overload def find(self, dtype: npt.DTypeLike) -> type_t[ExtensionDtype] | ExtensionDtype | None: ... def find(self, dtype: type_t[ExtensionDtype] | ExtensionDtype | npt.DTypeLike) -> type_t[ExtensionDtype] | ExtensionDtype | None: if not isinstance(dtype, str): dtype_type: type_t if not isinstance(dtype, type): dtype_type = type(dtype) else: dtype_type = dtype if issubclass(dtype_type, ExtensionDtype): return cast('ExtensionDtype | type_t[ExtensionDtype]', dtype) return None for dtype_type in self.dtypes: try: return dtype_type.construct_from_string(dtype) except TypeError: pass return None _registry = Registry() # File: pandas-main/pandas/core/dtypes/cast.py """""" from __future__ import annotations import datetime as dt import functools from typing import TYPE_CHECKING, Any, Literal, TypeVar, cast, overload import warnings import numpy as np from pandas._config import using_string_dtype from pandas._libs import Interval, Period, lib from pandas._libs.missing import NA, NAType, checknull from pandas._libs.tslibs import NaT, OutOfBoundsDatetime, OutOfBoundsTimedelta, Timedelta, Timestamp, is_supported_dtype from pandas._libs.tslibs.timedeltas import array_to_timedelta64 from pandas.errors import IntCastingNaNError, LossySetitemError from pandas.core.dtypes.common import ensure_int8, ensure_int16, ensure_int32, ensure_int64, ensure_object, ensure_str, is_bool, is_complex, is_float, is_integer, is_object_dtype, is_scalar, is_string_dtype, pandas_dtype as pandas_dtype_func from pandas.core.dtypes.dtypes import ArrowDtype, BaseMaskedDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PandasExtensionDtype, PeriodDtype from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndex, ABCSeries from pandas.core.dtypes.inference import is_list_like from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna, na_value_for_dtype, notna from pandas.io._util import _arrow_dtype_mapping if TYPE_CHECKING: from collections.abc import Sequence, Sized from pandas._typing import ArrayLike, Dtype, DtypeObj, NumpyIndexT, Scalar, npt from pandas import Index from pandas.core.arrays import Categorical, DatetimeArray, ExtensionArray, IntervalArray, PeriodArray, TimedeltaArray _int8_max = np.iinfo(np.int8).max _int16_max = np.iinfo(np.int16).max _int32_max = np.iinfo(np.int32).max _dtype_obj = np.dtype(object) NumpyArrayT = TypeVar('NumpyArrayT', bound=np.ndarray) def maybe_convert_platform(values: list | tuple | range | np.ndarray | ExtensionArray) -> ArrayLike: arr: ArrayLike if isinstance(values, (list, tuple, range)): arr = construct_1d_object_array_from_listlike(values) else: arr = values if arr.dtype == _dtype_obj: arr = cast(np.ndarray, arr) arr = lib.maybe_convert_objects(arr) return arr def is_nested_object(obj) -> bool: return bool(isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype) and any((isinstance(v, ABCSeries) for v in obj._values))) def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None=None) -> Scalar: if dtype == _dtype_obj: pass elif isinstance(value, (np.datetime64, dt.datetime)): value = Timestamp(value) elif isinstance(value, (np.timedelta64, dt.timedelta)): value = Timedelta(value) return value def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType: if is_float(value): value = float(value) elif is_integer(value): value = int(value) elif is_bool(value): value = bool(value) elif isinstance(value, (np.datetime64, np.timedelta64)): value = maybe_box_datetimelike(value) elif value is NA: value = None return value def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar: if is_valid_na_for_dtype(value, dtype): value = dtype.type('NaT', 'ns') elif isinstance(value, Timestamp): if value.tz is None: value = value.to_datetime64() elif not isinstance(dtype, DatetimeTZDtype): raise TypeError('Cannot unbox tzaware Timestamp to tznaive dtype') elif isinstance(value, Timedelta): value = value.to_timedelta64() _disallow_mismatched_datetimelike(value, dtype) return value def _disallow_mismatched_datetimelike(value, dtype: DtypeObj) -> None: vdtype = getattr(value, 'dtype', None) if vdtype is None: return elif vdtype.kind == 'm' and dtype.kind == 'M' or (vdtype.kind == 'M' and dtype.kind == 'm'): raise TypeError(f'Cannot cast {value!r} to {dtype}') @overload def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray: ... @overload def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike: ... def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike: if isinstance(result, ABCSeries): result = result._values do_round = False if isinstance(dtype, str): if dtype == 'infer': inferred_type = lib.infer_dtype(result, skipna=False) if inferred_type == 'boolean': dtype = 'bool' elif inferred_type == 'integer': dtype = 'int64' elif inferred_type == 'datetime64': dtype = 'datetime64[ns]' elif inferred_type in ['timedelta', 'timedelta64']: dtype = 'timedelta64[ns]' elif inferred_type == 'floating': dtype = 'int64' if issubclass(result.dtype.type, np.number): do_round = True else: dtype = 'object' dtype = np.dtype(dtype) if not isinstance(dtype, np.dtype): raise TypeError(dtype) converted = maybe_downcast_numeric(result, dtype, do_round) if converted is not result: return converted if dtype.kind in 'mM' and result.dtype.kind in 'if': result = result.astype(dtype) elif dtype.kind == 'm' and result.dtype == _dtype_obj: result = cast(np.ndarray, result) result = array_to_timedelta64(result) elif dtype == np.dtype('M8[ns]') and result.dtype == _dtype_obj: result = cast(np.ndarray, result) return np.asarray(maybe_cast_to_datetime(result, dtype=dtype)) return result @overload def maybe_downcast_numeric(result: np.ndarray, dtype: np.dtype, do_round: bool=False) -> np.ndarray: ... @overload def maybe_downcast_numeric(result: ExtensionArray, dtype: DtypeObj, do_round: bool=False) -> ArrayLike: ... def maybe_downcast_numeric(result: ArrayLike, dtype: DtypeObj, do_round: bool=False) -> ArrayLike: if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype): return result def trans(x): if do_round: return x.round() return x if dtype.kind == result.dtype.kind: if result.dtype.itemsize <= dtype.itemsize and result.size: return result if dtype.kind in 'biu': if not result.size: return trans(result).astype(dtype) if isinstance(result, np.ndarray): element = result.item(0) else: element = result.iloc[0] if not isinstance(element, (np.integer, np.floating, int, float, bool)): return result if issubclass(result.dtype.type, (np.object_, np.number)) and notna(result).all(): new_result = trans(result).astype(dtype) if new_result.dtype.kind == 'O' or result.dtype.kind == 'O': if (new_result == result).all(): return new_result elif np.allclose(new_result, result, rtol=0): return new_result elif issubclass(dtype.type, np.floating) and result.dtype.kind != 'b' and (not is_string_dtype(result.dtype)): with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'overflow encountered in cast', RuntimeWarning) new_result = result.astype(dtype) size_tols = {4: 0.0005, 8: 5e-08, 16: 5e-16} atol = size_tols.get(new_result.dtype.itemsize, 0.0) if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol): return new_result elif dtype.kind == result.dtype.kind == 'c': new_result = result.astype(dtype) if np.array_equal(new_result, result, equal_nan=True): return new_result return result def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT: dtype = arr.dtype if dtype.kind == 'i' and dtype != np.int64: return arr.astype(np.int64) elif dtype.kind == 'u' and dtype != np.uint64: return arr.astype(np.uint64) elif dtype.kind == 'f' and dtype != np.float64: return arr.astype(np.float64) else: return arr def maybe_cast_pointwise_result(result: ArrayLike, dtype: DtypeObj, numeric_only: bool=False, same_dtype: bool=True) -> ArrayLike: if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() if same_dtype: result = _maybe_cast_to_extension_array(cls, result, dtype=dtype) else: result = _maybe_cast_to_extension_array(cls, result) elif numeric_only and dtype.kind in 'iufcb' or not numeric_only: result = maybe_downcast_to_dtype(result, dtype) return result def _maybe_cast_to_extension_array(cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None=None) -> ArrayLike: result: ArrayLike if dtype is not None: try: result = cls._from_scalars(obj, dtype=dtype) except (TypeError, ValueError): return obj return result try: result = cls._from_sequence(obj, dtype=dtype) except Exception: result = obj return result @overload def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: ... @overload def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: ... def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: if isinstance(dtype, ExtensionDtype): if dtype._can_hold_na: return dtype elif isinstance(dtype, IntervalDtype): return IntervalDtype(np.float64, closed=dtype.closed) return _dtype_obj elif dtype.kind == 'b': return _dtype_obj elif dtype.kind in 'iu': return np.dtype(np.float64) return dtype _canonical_nans = {np.datetime64: np.datetime64('NaT', 'ns'), np.timedelta64: np.timedelta64('NaT', 'ns'), type(np.nan): np.nan} def maybe_promote(dtype: np.dtype, fill_value=np.nan): orig = fill_value orig_is_nat = False if checknull(fill_value): if fill_value is not NA: try: orig_is_nat = np.isnat(fill_value) except TypeError: pass fill_value = _canonical_nans.get(type(fill_value), fill_value) try: (dtype, fill_value) = _maybe_promote_cached(dtype, fill_value, type(fill_value)) except TypeError: (dtype, fill_value) = _maybe_promote(dtype, fill_value) if dtype == _dtype_obj and orig is not None or (orig_is_nat and np.datetime_data(orig)[0] != 'ns'): fill_value = orig return (dtype, fill_value) @functools.lru_cache def _maybe_promote_cached(dtype, fill_value, fill_value_type): return _maybe_promote(dtype, fill_value) def _maybe_promote(dtype: np.dtype, fill_value=np.nan): if not is_scalar(fill_value): if dtype != object: raise ValueError('fill_value must be a scalar') dtype = _dtype_obj return (dtype, fill_value) if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in 'iufcmM': dtype = ensure_dtype_can_hold_na(dtype) fv = na_value_for_dtype(dtype) return (dtype, fv) elif isinstance(dtype, CategoricalDtype): if fill_value in dtype.categories or isna(fill_value): return (dtype, fill_value) else: return (object, ensure_object(fill_value)) elif isna(fill_value): dtype = _dtype_obj if fill_value is None: fill_value = np.nan return (dtype, fill_value) if issubclass(dtype.type, np.datetime64): (inferred, fv) = infer_dtype_from_scalar(fill_value) if inferred == dtype: return (dtype, fv) from pandas.core.arrays import DatetimeArray dta = DatetimeArray._from_sequence([], dtype='M8[ns]') try: fv = dta._validate_setitem_value(fill_value) return (dta.dtype, fv) except (ValueError, TypeError): return (_dtype_obj, fill_value) elif issubclass(dtype.type, np.timedelta64): (inferred, fv) = infer_dtype_from_scalar(fill_value) if inferred == dtype: return (dtype, fv) elif inferred.kind == 'm': unit = np.datetime_data(dtype)[0] try: td = Timedelta(fill_value).as_unit(unit, round_ok=False) except OutOfBoundsTimedelta: return (_dtype_obj, fill_value) else: return (dtype, td.asm8) return (_dtype_obj, fill_value) elif is_float(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) elif issubclass(dtype.type, np.integer): dtype = np.dtype(np.float64) elif dtype.kind == 'f': mst = np.min_scalar_type(fill_value) if mst > dtype: dtype = mst elif dtype.kind == 'c': mst = np.min_scalar_type(fill_value) dtype = np.promote_types(dtype, mst) elif is_bool(fill_value): if not issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) elif is_integer(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) elif issubclass(dtype.type, np.integer): if not np_can_cast_scalar(fill_value, dtype): mst = np.min_scalar_type(fill_value) dtype = np.promote_types(dtype, mst) if dtype.kind == 'f': dtype = np.dtype(np.object_) elif is_complex(fill_value): if issubclass(dtype.type, np.bool_): dtype = np.dtype(np.object_) elif issubclass(dtype.type, (np.integer, np.floating)): mst = np.min_scalar_type(fill_value) dtype = np.promote_types(dtype, mst) elif dtype.kind == 'c': mst = np.min_scalar_type(fill_value) if mst > dtype: dtype = mst else: dtype = np.dtype(np.object_) if issubclass(dtype.type, (bytes, str)): dtype = np.dtype(np.object_) fill_value = _ensure_dtype_type(fill_value, dtype) return (dtype, fill_value) def _ensure_dtype_type(value, dtype: np.dtype): if dtype == _dtype_obj: return value return dtype.type(value) def infer_dtype_from(val) -> tuple[DtypeObj, Any]: if not is_list_like(val): return infer_dtype_from_scalar(val) return infer_dtype_from_array(val) def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: dtype: DtypeObj = _dtype_obj if isinstance(val, np.ndarray): if val.ndim != 0: msg = 'invalid ndarray passed to infer_dtype_from_scalar' raise ValueError(msg) dtype = val.dtype val = lib.item_from_zerodim(val) elif isinstance(val, str): dtype = _dtype_obj if using_string_dtype(): from pandas.core.arrays.string_ import StringDtype dtype = StringDtype(na_value=np.nan) elif isinstance(val, (np.datetime64, dt.datetime)): try: val = Timestamp(val) except OutOfBoundsDatetime: return (_dtype_obj, val) if val is NaT or val.tz is None: val = val.to_datetime64() dtype = val.dtype else: dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz) elif isinstance(val, (np.timedelta64, dt.timedelta)): try: val = Timedelta(val) except (OutOfBoundsTimedelta, OverflowError): dtype = _dtype_obj else: if val is NaT: val = np.timedelta64('NaT', 'ns') else: val = val.asm8 dtype = val.dtype elif is_bool(val): dtype = np.dtype(np.bool_) elif is_integer(val): if isinstance(val, np.integer): dtype = np.dtype(type(val)) else: dtype = np.dtype(np.int64) try: np.array(val, dtype=dtype) except OverflowError: dtype = np.array(val).dtype elif is_float(val): if isinstance(val, np.floating): dtype = np.dtype(type(val)) else: dtype = np.dtype(np.float64) elif is_complex(val): dtype = np.dtype(np.complex128) if isinstance(val, Period): dtype = PeriodDtype(freq=val.freq) elif isinstance(val, Interval): subtype = infer_dtype_from_scalar(val.left)[0] dtype = IntervalDtype(subtype=subtype, closed=val.closed) return (dtype, val) def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: return {maybe_box_datetimelike(key): value for (key, value) in d.items()} def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]: if isinstance(arr, np.ndarray): return (arr.dtype, arr) if not is_list_like(arr): raise TypeError("'arr' must be list-like") arr_dtype = getattr(arr, 'dtype', None) if isinstance(arr_dtype, ExtensionDtype): return (arr.dtype, arr) elif isinstance(arr, ABCSeries): return (arr.dtype, np.asarray(arr)) inferred = lib.infer_dtype(arr, skipna=False) if inferred in ['string', 'bytes', 'mixed', 'mixed-integer']: return (np.dtype(np.object_), arr) arr = np.asarray(arr) return (arr.dtype, arr) def _maybe_infer_dtype_type(element): tipo = None if hasattr(element, 'dtype'): tipo = element.dtype elif is_list_like(element): element = np.asarray(element) tipo = element.dtype return tipo def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None: non_string_dtypes = dtype_set - {np.dtype('S').type, np.dtype(' np.ndarray: length = len(categories) if length < _int8_max: return ensure_int8(indexer) elif length < _int16_max: return ensure_int16(indexer) elif length < _int32_max: return ensure_int32(indexer) return ensure_int64(indexer) def convert_dtypes(input_array: ArrayLike, convert_string: bool=True, convert_integer: bool=True, convert_boolean: bool=True, convert_floating: bool=True, infer_objects: bool=False, dtype_backend: Literal['numpy_nullable', 'pyarrow']='numpy_nullable') -> DtypeObj: from pandas.core.arrays.string_ import StringDtype inferred_dtype: str | DtypeObj if (convert_string or convert_integer or convert_boolean or convert_floating) and isinstance(input_array, np.ndarray): if input_array.dtype == object: inferred_dtype = lib.infer_dtype(input_array) else: inferred_dtype = input_array.dtype if is_string_dtype(inferred_dtype): if not convert_string or inferred_dtype == 'bytes': inferred_dtype = input_array.dtype else: inferred_dtype = pandas_dtype_func('string') if convert_integer: target_int_dtype = pandas_dtype_func('Int64') if input_array.dtype.kind in 'iu': from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE inferred_dtype = NUMPY_INT_TO_DTYPE.get(input_array.dtype, target_int_dtype) elif input_array.dtype.kind in 'fcb': arr = input_array[notna(input_array)] if (arr.astype(int) == arr).all(): inferred_dtype = target_int_dtype else: inferred_dtype = input_array.dtype elif infer_objects and input_array.dtype == object and (isinstance(inferred_dtype, str) and inferred_dtype == 'integer'): inferred_dtype = target_int_dtype if convert_floating: if input_array.dtype.kind in 'fcb': from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get(input_array.dtype, pandas_dtype_func('Float64')) if convert_integer: arr = input_array[notna(input_array)] if (arr.astype(int) == arr).all(): inferred_dtype = pandas_dtype_func('Int64') else: inferred_dtype = inferred_float_dtype else: inferred_dtype = inferred_float_dtype elif infer_objects and input_array.dtype == object and (isinstance(inferred_dtype, str) and inferred_dtype == 'mixed-integer-float'): inferred_dtype = pandas_dtype_func('Float64') if convert_boolean: if input_array.dtype.kind == 'b': inferred_dtype = pandas_dtype_func('boolean') elif isinstance(inferred_dtype, str) and inferred_dtype == 'boolean': inferred_dtype = pandas_dtype_func('boolean') if isinstance(inferred_dtype, str): inferred_dtype = input_array.dtype elif convert_string and isinstance(input_array.dtype, StringDtype) and (input_array.dtype.na_value is np.nan): inferred_dtype = pandas_dtype_func('string') else: inferred_dtype = input_array.dtype if dtype_backend == 'pyarrow': from pandas.core.arrays.arrow.array import to_pyarrow_type from pandas.core.arrays.string_ import StringDtype assert not isinstance(inferred_dtype, str) if convert_integer and inferred_dtype.kind in 'iu' or (convert_floating and inferred_dtype.kind in 'fc') or (convert_boolean and inferred_dtype.kind == 'b') or (convert_string and isinstance(inferred_dtype, StringDtype)) or (inferred_dtype.kind not in 'iufcb' and (not isinstance(inferred_dtype, StringDtype))): if isinstance(inferred_dtype, PandasExtensionDtype) and (not isinstance(inferred_dtype, DatetimeTZDtype)): base_dtype = inferred_dtype.base elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)): base_dtype = inferred_dtype.numpy_dtype elif isinstance(inferred_dtype, StringDtype): base_dtype = np.dtype(str) else: base_dtype = inferred_dtype if base_dtype.kind == 'O' and input_array.size > 0 and isna(input_array).all(): import pyarrow as pa pa_type = pa.null() else: pa_type = to_pyarrow_type(base_dtype) if pa_type is not None: inferred_dtype = ArrowDtype(pa_type) elif dtype_backend == 'numpy_nullable' and isinstance(inferred_dtype, ArrowDtype): inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype] return inferred_dtype def maybe_infer_to_datetimelike(value: npt.NDArray[np.object_]) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray: if not isinstance(value, np.ndarray) or value.dtype != object: raise TypeError(type(value)) if value.ndim != 1: raise ValueError(value.ndim) if not len(value): return value return lib.maybe_convert_objects(value, convert_numeric=False, convert_non_numeric=True, dtype_if_all_nat=np.dtype('M8[s]')) def maybe_cast_to_datetime(value: np.ndarray | list, dtype: np.dtype) -> ExtensionArray | np.ndarray: from pandas.core.arrays.datetimes import DatetimeArray from pandas.core.arrays.timedeltas import TimedeltaArray assert dtype.kind in 'mM' if not is_list_like(value): raise TypeError('value must be listlike') _ensure_nanosecond_dtype(dtype) if lib.is_np_dtype(dtype, 'm'): res = TimedeltaArray._from_sequence(value, dtype=dtype) return res else: try: dta = DatetimeArray._from_sequence(value, dtype=dtype) except ValueError as err: if 'cannot supply both a tz and a timezone-naive dtype' in str(err): raise ValueError('Cannot convert timezone-aware data to timezone-naive dtype. Use pd.Series(values).dt.tz_localize(None) instead.') from err raise return dta def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None: msg = f"The '{dtype.name}' dtype has no unit. Please pass in '{dtype.name}[ns]' instead." dtype = getattr(dtype, 'subtype', dtype) if not isinstance(dtype, np.dtype): pass elif dtype.kind in 'mM': if not is_supported_dtype(dtype): if dtype.name in ['datetime64', 'timedelta64']: raise ValueError(msg) raise TypeError(f"dtype={dtype} is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'") def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj: new_dtype: DtypeObj if isinstance(left_dtype, np.dtype) and left_dtype.kind in 'iuc' and (lib.is_integer(right) or lib.is_float(right)): if lib.is_float(right) and right.is_integer() and (left_dtype.kind != 'f'): right = int(right) if isinstance(right, int) and (not isinstance(right, np.integer)): right_dtype = np.min_scalar_type(right) if right == 0: right = left_dtype elif not np.issubdtype(left_dtype, np.unsignedinteger) and 0 < right <= np.iinfo(right_dtype).max: right = np.dtype(f'i{right_dtype.itemsize}') else: right = right_dtype new_dtype = np.result_type(left_dtype, right) elif is_valid_na_for_dtype(right, left_dtype): new_dtype = ensure_dtype_can_hold_na(left_dtype) else: (dtype, _) = infer_dtype_from(right) new_dtype = find_common_type([left_dtype, dtype]) return new_dtype def common_dtype_categorical_compat(objs: Sequence[Index | ArrayLike], dtype: DtypeObj) -> DtypeObj: if lib.is_np_dtype(dtype, 'iu'): for obj in objs: obj_dtype = getattr(obj, 'dtype', None) if isinstance(obj_dtype, CategoricalDtype): if isinstance(obj, ABCIndex): hasnas = obj.hasnans else: hasnas = cast('Categorical', obj)._hasna if hasnas: dtype = np.dtype(np.float64) break return dtype def np_find_common_type(*dtypes: np.dtype) -> np.dtype: try: common_dtype = np.result_type(*dtypes) if common_dtype.kind in 'mMSU': common_dtype = np.dtype('O') except TypeError: common_dtype = np.dtype('O') return common_dtype @overload def find_common_type(types: list[np.dtype]) -> np.dtype: ... @overload def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: ... @overload def find_common_type(types: list[DtypeObj]) -> DtypeObj: ... def find_common_type(types): if not types: raise ValueError('no types given') first = types[0] if lib.dtypes_all_equal(list(types)): return first types = list(dict.fromkeys(types).keys()) if any((isinstance(t, ExtensionDtype) for t in types)): for t in types: if isinstance(t, ExtensionDtype): res = t._get_common_dtype(types) if res is not None: return res return np.dtype('object') if all((lib.is_np_dtype(t, 'M') for t in types)): return np.dtype(max(types)) if all((lib.is_np_dtype(t, 'm') for t in types)): return np.dtype(max(types)) has_bools = any((t.kind == 'b' for t in types)) if has_bools: for t in types: if t.kind in 'iufc': return np.dtype('object') return np_find_common_type(*types) def construct_2d_arraylike_from_scalar(value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool) -> np.ndarray: shape = (length, width) if dtype.kind in 'mM': value = _maybe_box_and_unbox_datetimelike(value, dtype) elif dtype == _dtype_obj: if isinstance(value, (np.timedelta64, np.datetime64)): out = np.empty(shape, dtype=object) out.fill(value) return out try: if not copy: arr = np.asarray(value, dtype=dtype) else: arr = np.array(value, dtype=dtype, copy=copy) except (ValueError, TypeError) as err: raise TypeError(f'DataFrame constructor called with incompatible data and dtype: {err}') from err if arr.ndim != 0: raise ValueError('DataFrame constructor not properly called!') return np.full(shape, arr) def construct_1d_arraylike_from_scalar(value: Scalar, length: int, dtype: DtypeObj | None) -> ArrayLike: if dtype is None: try: (dtype, value) = infer_dtype_from_scalar(value) except OutOfBoundsDatetime: dtype = _dtype_obj if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() seq = [] if length == 0 else [value] return cls._from_sequence(seq, dtype=dtype).repeat(length) if length and dtype.kind in 'iu' and isna(value): dtype = np.dtype('float64') elif lib.is_np_dtype(dtype, 'US'): dtype = np.dtype('object') if not isna(value): value = ensure_str(value) elif dtype.kind in 'mM': value = _maybe_box_and_unbox_datetimelike(value, dtype) subarr = np.empty(length, dtype=dtype) if length: subarr.fill(value) return subarr def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj): if isinstance(value, dt.datetime): value = maybe_box_datetimelike(value, dtype) return _maybe_unbox_datetimelike(value, dtype) def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray: result = np.empty(len(values), dtype='object') result[:] = values return result def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray: assert dtype.kind in 'iu' try: if not isinstance(arr, np.ndarray): with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'NumPy will stop allowing conversion of out-of-bound Python int', DeprecationWarning) casted = np.asarray(arr, dtype=dtype) else: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) casted = arr.astype(dtype, copy=False) except OverflowError as err: raise OverflowError(f'The elements provided in the data cannot all be casted to the dtype {dtype}') from err if isinstance(arr, np.ndarray) and arr.dtype == dtype: return casted with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) warnings.filterwarnings('ignore', 'elementwise comparison failed', FutureWarning) if np.array_equal(arr, casted): return casted arr = np.asarray(arr) if np.issubdtype(arr.dtype, str): if (casted.astype(str) == arr).all(): return casted raise ValueError(f'string values cannot be losslessly cast to {dtype}') if dtype.kind == 'u' and (arr < 0).any(): raise OverflowError('Trying to coerce negative values to unsigned integers') if arr.dtype.kind == 'f': if not np.isfinite(arr).all(): raise IntCastingNaNError('Cannot convert non-finite values (NA or inf) to integer') raise ValueError('Trying to coerce float values to integers') if arr.dtype == object: raise ValueError('Trying to coerce object values to integers') if casted.dtype < arr.dtype: raise ValueError(f'Values are too large to be losslessly converted to {dtype}. To cast anyway, use pd.Series(values).astype({dtype})') if arr.dtype.kind in 'mM': raise TypeError(f'Constructing a Series or DataFrame from {arr.dtype} values and dtype={dtype} is not supported. Use values.view({dtype}) instead.') raise ValueError(f'values cannot be losslessly cast to {dtype}') def can_hold_element(arr: ArrayLike, element: Any) -> bool: dtype = arr.dtype if not isinstance(dtype, np.dtype) or dtype.kind in 'mM': if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)): arr = cast('PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray', arr) try: arr._validate_setitem_value(element) return True except (ValueError, TypeError): return False return True try: np_can_hold_element(dtype, element) return True except (TypeError, LossySetitemError): return False def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: if dtype == _dtype_obj: return element tipo = _maybe_infer_dtype_type(element) if dtype.kind in 'iu': if isinstance(element, range): if _dtype_can_hold_range(element, dtype): return element raise LossySetitemError if is_integer(element) or (is_float(element) and element.is_integer()): info = np.iinfo(dtype) if info.min <= element <= info.max: return dtype.type(element) raise LossySetitemError if tipo is not None: if tipo.kind not in 'iu': if isinstance(element, np.ndarray) and element.dtype.kind == 'f': with np.errstate(invalid='ignore'): casted = element.astype(dtype) comp = casted == element if comp.all(): return casted raise LossySetitemError elif isinstance(element, ABCExtensionArray) and isinstance(element.dtype, CategoricalDtype): try: casted = element.astype(dtype) except (ValueError, TypeError) as err: raise LossySetitemError from err comp = casted == element if not comp.all(): raise LossySetitemError return casted raise LossySetitemError if dtype.kind == 'u' and isinstance(element, np.ndarray) and (element.dtype.kind == 'i'): casted = element.astype(dtype) if (casted == element).all(): return casted raise LossySetitemError if dtype.itemsize < tipo.itemsize: raise LossySetitemError if not isinstance(tipo, np.dtype): arr = element._values if isinstance(element, ABCSeries) else element if arr._hasna: raise LossySetitemError return element return element raise LossySetitemError if dtype.kind == 'f': if lib.is_integer(element) or lib.is_float(element): casted = dtype.type(element) if np.isnan(casted) or casted == element: return casted raise LossySetitemError if tipo is not None: if tipo.kind not in 'iuf': raise LossySetitemError if not isinstance(tipo, np.dtype): if element._hasna: raise LossySetitemError return element elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind: if isinstance(element, np.ndarray): casted = element.astype(dtype) if np.array_equal(casted, element, equal_nan=True): return casted raise LossySetitemError return element raise LossySetitemError if dtype.kind == 'c': if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element): if np.isnan(element): return dtype.type(element) with warnings.catch_warnings(): warnings.filterwarnings('ignore') casted = dtype.type(element) if casted == element: return casted raise LossySetitemError if tipo is not None: if tipo.kind in 'iufc': return element raise LossySetitemError raise LossySetitemError if dtype.kind == 'b': if tipo is not None: if tipo.kind == 'b': if not isinstance(tipo, np.dtype): if element._hasna: raise LossySetitemError return element raise LossySetitemError if lib.is_bool(element): return element raise LossySetitemError if dtype.kind == 'S': if tipo is not None: if tipo.kind == 'S' and tipo.itemsize <= dtype.itemsize: return element raise LossySetitemError if isinstance(element, bytes) and len(element) <= dtype.itemsize: return element raise LossySetitemError if dtype.kind == 'V': raise LossySetitemError raise NotImplementedError(dtype) def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool: if not len(rng): return True return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype) def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool: try: np_can_hold_element(dtype, element) return True except (LossySetitemError, NotImplementedError): return False # File: pandas-main/pandas/core/dtypes/common.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any import warnings import numpy as np from pandas._libs import Interval, Period, algos, lib from pandas._libs.tslibs import conversion from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, SparseDtype from pandas.core.dtypes.generic import ABCIndex from pandas.core.dtypes.inference import is_array_like, is_bool, is_complex, is_dataclass, is_decimal, is_dict_like, is_file_like, is_float, is_hashable, is_integer, is_iterator, is_list_like, is_named_tuple, is_nested_list_like, is_number, is_re, is_re_compilable, is_scalar, is_sequence if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import ArrayLike, DtypeObj DT64NS_DTYPE = conversion.DT64NS_DTYPE TD64NS_DTYPE = conversion.TD64NS_DTYPE INT64_DTYPE = np.dtype(np.int64) _is_scipy_sparse = None ensure_float64 = algos.ensure_float64 ensure_int64 = algos.ensure_int64 ensure_int32 = algos.ensure_int32 ensure_int16 = algos.ensure_int16 ensure_int8 = algos.ensure_int8 ensure_platform_int = algos.ensure_platform_int ensure_object = algos.ensure_object ensure_uint64 = algos.ensure_uint64 def ensure_str(value: bytes | Any) -> str: if isinstance(value, bytes): value = value.decode('utf-8') elif not isinstance(value, str): value = str(value) return value def ensure_python_int(value: int | np.integer) -> int: if not (is_integer(value) or is_float(value)): if not is_scalar(value): raise TypeError(f'Value needs to be a scalar value, was type {type(value).__name__}') raise TypeError(f'Wrong type {type(value)} for value {value}') try: new_value = int(value) assert new_value == value except (TypeError, ValueError, AssertionError) as err: raise TypeError(f'Wrong type {type(value)} for value {value}') from err return new_value def classes(*klasses) -> Callable: return lambda tipo: issubclass(tipo, klasses) def _classes_and_not_datetimelike(*klasses) -> Callable: return lambda tipo: issubclass(tipo, klasses) and (not issubclass(tipo, (np.datetime64, np.timedelta64))) def is_object_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, classes(np.object_)) def is_sparse(arr) -> bool: warnings.warn('is_sparse is deprecated and will be removed in a future version. Check `isinstance(dtype, pd.SparseDtype)` instead.', DeprecationWarning, stacklevel=2) dtype = getattr(arr, 'dtype', arr) return isinstance(dtype, SparseDtype) def is_scipy_sparse(arr) -> bool: global _is_scipy_sparse if _is_scipy_sparse is None: try: from scipy.sparse import issparse as _is_scipy_sparse except ImportError: _is_scipy_sparse = lambda _: False assert _is_scipy_sparse is not None return _is_scipy_sparse(arr) def is_datetime64_dtype(arr_or_dtype) -> bool: if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.kind == 'M' return _is_dtype_type(arr_or_dtype, classes(np.datetime64)) def is_datetime64tz_dtype(arr_or_dtype) -> bool: warnings.warn('is_datetime64tz_dtype is deprecated and will be removed in a future version. Check `isinstance(dtype, pd.DatetimeTZDtype)` instead.', DeprecationWarning, stacklevel=2) if isinstance(arr_or_dtype, DatetimeTZDtype): return True if arr_or_dtype is None: return False return DatetimeTZDtype.is_dtype(arr_or_dtype) def is_timedelta64_dtype(arr_or_dtype) -> bool: if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype.kind == 'm' return _is_dtype_type(arr_or_dtype, classes(np.timedelta64)) def is_period_dtype(arr_or_dtype) -> bool: warnings.warn('is_period_dtype is deprecated and will be removed in a future version. Use `isinstance(dtype, pd.PeriodDtype)` instead', DeprecationWarning, stacklevel=2) if isinstance(arr_or_dtype, ExtensionDtype): return arr_or_dtype.type is Period if arr_or_dtype is None: return False return PeriodDtype.is_dtype(arr_or_dtype) def is_interval_dtype(arr_or_dtype) -> bool: warnings.warn('is_interval_dtype is deprecated and will be removed in a future version. Use `isinstance(dtype, pd.IntervalDtype)` instead', DeprecationWarning, stacklevel=2) if isinstance(arr_or_dtype, ExtensionDtype): return arr_or_dtype.type is Interval if arr_or_dtype is None: return False return IntervalDtype.is_dtype(arr_or_dtype) def is_categorical_dtype(arr_or_dtype) -> bool: warnings.warn('is_categorical_dtype is deprecated and will be removed in a future version. Use isinstance(dtype, pd.CategoricalDtype) instead', DeprecationWarning, stacklevel=2) if isinstance(arr_or_dtype, ExtensionDtype): return arr_or_dtype.name == 'category' if arr_or_dtype is None: return False return CategoricalDtype.is_dtype(arr_or_dtype) def is_string_or_object_np_dtype(dtype: np.dtype) -> bool: return dtype == object or dtype.kind in 'SU' def is_string_dtype(arr_or_dtype) -> bool: if hasattr(arr_or_dtype, 'dtype') and _get_dtype(arr_or_dtype).kind == 'O': return is_all_strings(arr_or_dtype) def condition(dtype) -> bool: if is_string_or_object_np_dtype(dtype): return True try: return dtype == 'string' except TypeError: return False return _is_dtype(arr_or_dtype, condition) def is_dtype_equal(source, target) -> bool: if isinstance(target, str): if not isinstance(source, str): try: src = _get_dtype(source) if isinstance(src, ExtensionDtype): return src == target except (TypeError, AttributeError, ImportError): return False elif isinstance(source, str): return is_dtype_equal(target, source) try: source = _get_dtype(source) target = _get_dtype(target) return source == target except (TypeError, AttributeError, ImportError): return False def is_integer_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, _classes_and_not_datetimelike(np.integer)) or _is_dtype(arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in 'iu') def is_signed_integer_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, _classes_and_not_datetimelike(np.signedinteger)) or _is_dtype(arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == 'i') def is_unsigned_integer_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, _classes_and_not_datetimelike(np.unsignedinteger)) or _is_dtype(arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind == 'u') def is_int64_dtype(arr_or_dtype) -> bool: warnings.warn('is_int64_dtype is deprecated and will be removed in a future version. Use dtype == np.int64 instead.', DeprecationWarning, stacklevel=2) return _is_dtype_type(arr_or_dtype, classes(np.int64)) def is_datetime64_any_dtype(arr_or_dtype) -> bool: if isinstance(arr_or_dtype, (np.dtype, ExtensionDtype)): return arr_or_dtype.kind == 'M' if arr_or_dtype is None: return False try: tipo = _get_dtype(arr_or_dtype) except TypeError: return False return lib.is_np_dtype(tipo, 'M') or isinstance(tipo, DatetimeTZDtype) or (isinstance(tipo, ExtensionDtype) and tipo.kind == 'M') def is_datetime64_ns_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: tipo = _get_dtype(arr_or_dtype) except TypeError: return False return tipo == DT64NS_DTYPE or (isinstance(tipo, DatetimeTZDtype) and tipo.unit == 'ns') def is_timedelta64_ns_dtype(arr_or_dtype) -> bool: return _is_dtype(arr_or_dtype, lambda dtype: dtype == TD64NS_DTYPE) def is_numeric_v_string_like(a: ArrayLike, b) -> bool: is_a_array = isinstance(a, np.ndarray) is_b_array = isinstance(b, np.ndarray) is_a_numeric_array = is_a_array and a.dtype.kind in ('u', 'i', 'f', 'c', 'b') is_b_numeric_array = is_b_array and b.dtype.kind in ('u', 'i', 'f', 'c', 'b') is_a_string_array = is_a_array and a.dtype.kind in ('S', 'U') is_b_string_array = is_b_array and b.dtype.kind in ('S', 'U') is_b_scalar_string_like = not is_b_array and isinstance(b, str) return is_a_numeric_array and is_b_scalar_string_like or (is_a_numeric_array and is_b_string_array) or (is_b_numeric_array and is_a_string_array) def needs_i8_conversion(dtype: DtypeObj | None) -> bool: if isinstance(dtype, np.dtype): return dtype.kind in 'mM' return isinstance(dtype, (PeriodDtype, DatetimeTZDtype)) def is_numeric_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, _classes_and_not_datetimelike(np.number, np.bool_)) or _is_dtype(arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ._is_numeric) def is_any_real_numeric_dtype(arr_or_dtype) -> bool: return is_numeric_dtype(arr_or_dtype) and (not is_complex_dtype(arr_or_dtype)) and (not is_bool_dtype(arr_or_dtype)) def is_float_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, classes(np.floating)) or _is_dtype(arr_or_dtype, lambda typ: isinstance(typ, ExtensionDtype) and typ.kind in 'f') def is_bool_dtype(arr_or_dtype) -> bool: if arr_or_dtype is None: return False try: dtype = _get_dtype(arr_or_dtype) except (TypeError, ValueError): return False if isinstance(dtype, CategoricalDtype): arr_or_dtype = dtype.categories if isinstance(arr_or_dtype, ABCIndex): if arr_or_dtype.inferred_type == 'boolean': if not is_bool_dtype(arr_or_dtype.dtype): warnings.warn('The behavior of is_bool_dtype with an object-dtype Index of bool objects is deprecated. In a future version, this will return False. Cast the Index to a bool dtype instead.', DeprecationWarning, stacklevel=2) return True return False elif isinstance(dtype, ExtensionDtype): return getattr(dtype, '_is_boolean', False) return issubclass(dtype.type, np.bool_) def is_1d_only_ea_dtype(dtype: DtypeObj | None) -> bool: return isinstance(dtype, ExtensionDtype) and (not dtype._supports_2d) def is_extension_array_dtype(arr_or_dtype) -> bool: dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) if isinstance(dtype, ExtensionDtype): return True elif isinstance(dtype, np.dtype): return False else: return registry.find(dtype) is not None def is_ea_or_datetimelike_dtype(dtype: DtypeObj | None) -> bool: return isinstance(dtype, ExtensionDtype) or lib.is_np_dtype(dtype, 'mM') def is_complex_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, classes(np.complexfloating)) def _is_dtype(arr_or_dtype, condition) -> bool: if arr_or_dtype is None: return False try: dtype = _get_dtype(arr_or_dtype) except (TypeError, ValueError): return False return condition(dtype) def _get_dtype(arr_or_dtype) -> DtypeObj: if arr_or_dtype is None: raise TypeError('Cannot deduce dtype from null object') if isinstance(arr_or_dtype, np.dtype): return arr_or_dtype elif isinstance(arr_or_dtype, type): return np.dtype(arr_or_dtype) elif hasattr(arr_or_dtype, 'dtype'): arr_or_dtype = arr_or_dtype.dtype return pandas_dtype(arr_or_dtype) def _is_dtype_type(arr_or_dtype, condition) -> bool: if arr_or_dtype is None: return condition(type(None)) if isinstance(arr_or_dtype, np.dtype): return condition(arr_or_dtype.type) elif isinstance(arr_or_dtype, type): if issubclass(arr_or_dtype, ExtensionDtype): arr_or_dtype = arr_or_dtype.type return condition(np.dtype(arr_or_dtype).type) if hasattr(arr_or_dtype, 'dtype'): arr_or_dtype = arr_or_dtype.dtype elif is_list_like(arr_or_dtype): return condition(type(None)) try: tipo = pandas_dtype(arr_or_dtype).type except (TypeError, ValueError): if is_scalar(arr_or_dtype): return condition(type(None)) return False return condition(tipo) def infer_dtype_from_object(dtype) -> type: if isinstance(dtype, type) and issubclass(dtype, np.generic): return dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): try: _validate_date_like_dtype(dtype) except TypeError: pass if hasattr(dtype, 'numpy_dtype'): return dtype.numpy_dtype.type return dtype.type try: dtype = pandas_dtype(dtype) except TypeError: pass if isinstance(dtype, ExtensionDtype): return dtype.type elif isinstance(dtype, str): if dtype in ['datetimetz', 'datetime64tz']: return DatetimeTZDtype.type elif dtype in ['period']: raise NotImplementedError if dtype in ['datetime', 'timedelta']: dtype += '64' try: return infer_dtype_from_object(getattr(np, dtype)) except (AttributeError, TypeError): pass return infer_dtype_from_object(np.dtype(dtype)) def _validate_date_like_dtype(dtype) -> None: try: typ = np.datetime_data(dtype)[0] except ValueError as e: raise TypeError(e) from e if typ not in ['generic', 'ns']: raise ValueError(f'{dtype.name!r} is too specific of a frequency, try passing {dtype.type.__name__!r}') def validate_all_hashable(*args, error_name: str | None=None) -> None: if not all((is_hashable(arg) for arg in args)): if error_name: raise TypeError(f'{error_name} must be a hashable type') raise TypeError('All elements must be hashable') def pandas_dtype(dtype) -> DtypeObj: if isinstance(dtype, np.ndarray): return dtype.dtype elif isinstance(dtype, (np.dtype, ExtensionDtype)): return dtype result = registry.find(dtype) if result is not None: if isinstance(result, type): warnings.warn(f'Instantiating {result.__name__} without any arguments.Pass a {result.__name__} instance to silence this warning.', UserWarning, stacklevel=find_stack_level()) result = result() return result try: with warnings.catch_warnings(): warnings.simplefilter('always', DeprecationWarning) npdtype = np.dtype(dtype) except SyntaxError as err: raise TypeError(f"data type '{dtype}' not understood") from err if is_hashable(dtype) and dtype in [object, np.object_, 'object', 'O', 'object_']: return npdtype elif npdtype.kind == 'O': raise TypeError(f"dtype '{dtype}' not understood") return npdtype def is_all_strings(value: ArrayLike) -> bool: dtype = value.dtype if isinstance(dtype, np.dtype): if len(value) == 0: return dtype == np.dtype('object') else: return dtype == np.dtype('object') and lib.is_string_array(np.asarray(value), skipna=False) elif isinstance(dtype, CategoricalDtype): return dtype.categories.inferred_type == 'string' return dtype == 'string' __all__ = ['classes', 'DT64NS_DTYPE', 'ensure_float64', 'ensure_python_int', 'ensure_str', 'infer_dtype_from_object', 'INT64_DTYPE', 'is_1d_only_ea_dtype', 'is_all_strings', 'is_any_real_numeric_dtype', 'is_array_like', 'is_bool', 'is_bool_dtype', 'is_categorical_dtype', 'is_complex', 'is_complex_dtype', 'is_dataclass', 'is_datetime64_any_dtype', 'is_datetime64_dtype', 'is_datetime64_ns_dtype', 'is_datetime64tz_dtype', 'is_decimal', 'is_dict_like', 'is_dtype_equal', 'is_ea_or_datetimelike_dtype', 'is_extension_array_dtype', 'is_file_like', 'is_float_dtype', 'is_int64_dtype', 'is_integer_dtype', 'is_interval_dtype', 'is_iterator', 'is_named_tuple', 'is_nested_list_like', 'is_number', 'is_numeric_dtype', 'is_object_dtype', 'is_period_dtype', 'is_re', 'is_re_compilable', 'is_scipy_sparse', 'is_sequence', 'is_signed_integer_dtype', 'is_sparse', 'is_string_dtype', 'is_string_or_object_np_dtype', 'is_timedelta64_dtype', 'is_timedelta64_ns_dtype', 'is_unsigned_integer_dtype', 'needs_i8_conversion', 'pandas_dtype', 'TD64NS_DTYPE', 'validate_all_hashable'] # File: pandas-main/pandas/core/dtypes/concat.py """""" from __future__ import annotations from typing import TYPE_CHECKING, cast import numpy as np from pandas._libs import lib from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.cast import common_dtype_categorical_compat, find_common_type, np_find_common_type from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCSeries if TYPE_CHECKING: from collections.abc import Sequence from pandas._typing import ArrayLike, AxisInt, DtypeObj from pandas.core.arrays import Categorical, ExtensionArray def _is_nonempty(x: ArrayLike, axis: AxisInt) -> bool: if x.ndim <= axis: return True return x.shape[axis] > 0 def concat_compat(to_concat: Sequence[ArrayLike], axis: AxisInt=0, ea_compat_axis: bool=False) -> ArrayLike: if len(to_concat) and lib.dtypes_all_equal([obj.dtype for obj in to_concat]): obj = to_concat[0] if isinstance(obj, np.ndarray): to_concat_arrs = cast('Sequence[np.ndarray]', to_concat) return np.concatenate(to_concat_arrs, axis=axis) to_concat_eas = cast('Sequence[ExtensionArray]', to_concat) if ea_compat_axis: return obj._concat_same_type(to_concat_eas) elif axis == 0: return obj._concat_same_type(to_concat_eas) else: return obj._concat_same_type(to_concat_eas, axis=axis) non_empties = [x for x in to_concat if _is_nonempty(x, axis)] (any_ea, kinds, target_dtype) = _get_result_dtype(to_concat, non_empties) if target_dtype is not None: to_concat = [astype_array(arr, target_dtype, copy=False) for arr in to_concat] if not isinstance(to_concat[0], np.ndarray): to_concat_eas = cast('Sequence[ExtensionArray]', to_concat) cls = type(to_concat[0]) if ea_compat_axis or axis == 0: return cls._concat_same_type(to_concat_eas) else: return cls._concat_same_type(to_concat_eas, axis=axis) else: to_concat_arrs = cast('Sequence[np.ndarray]', to_concat) result = np.concatenate(to_concat_arrs, axis=axis) if not any_ea and 'b' in kinds and (result.dtype.kind in 'iuf'): result = result.astype(object, copy=False) return result def _get_result_dtype(to_concat: Sequence[ArrayLike], non_empties: Sequence[ArrayLike]) -> tuple[bool, set[str], DtypeObj | None]: target_dtype = None dtypes = {obj.dtype for obj in to_concat} kinds = {obj.dtype.kind for obj in to_concat} any_ea = any((not isinstance(x, np.ndarray) for x in to_concat)) if any_ea: if len(dtypes) != 1: target_dtype = find_common_type([x.dtype for x in to_concat]) target_dtype = common_dtype_categorical_compat(to_concat, target_dtype) elif not len(non_empties): if len(kinds) != 1: if not len(kinds - {'i', 'u', 'f'}) or not len(kinds - {'b', 'i', 'u'}): pass else: target_dtype = np.dtype(object) kinds = {'o'} else: target_dtype = np_find_common_type(*dtypes) return (any_ea, kinds, target_dtype) def union_categoricals(to_union, sort_categories: bool=False, ignore_order: bool=False) -> Categorical: from pandas import Categorical from pandas.core.arrays.categorical import recode_for_categories if len(to_union) == 0: raise ValueError('No Categoricals to union') def _maybe_unwrap(x): if isinstance(x, (ABCCategoricalIndex, ABCSeries)): return x._values elif isinstance(x, Categorical): return x else: raise TypeError('all components to combine must be Categorical') to_union = [_maybe_unwrap(x) for x in to_union] first = to_union[0] if not lib.dtypes_all_equal([obj.categories.dtype for obj in to_union]): raise TypeError('dtype of categories must be the same') ordered = False if all((first._categories_match_up_to_permutation(other) for other in to_union[1:])): categories = first.categories ordered = first.ordered all_codes = [first._encode_with_my_categories(x)._codes for x in to_union] new_codes = np.concatenate(all_codes) if sort_categories and (not ignore_order) and ordered: raise TypeError('Cannot use sort_categories=True with ordered Categoricals') if sort_categories and (not categories.is_monotonic_increasing): categories = categories.sort_values() indexer = categories.get_indexer(first.categories) from pandas.core.algorithms import take_nd new_codes = take_nd(indexer, new_codes, fill_value=-1) elif ignore_order or all((not c.ordered for c in to_union)): cats = first.categories.append([c.categories for c in to_union[1:]]) categories = cats.unique() if sort_categories: categories = categories.sort_values() new_codes = [recode_for_categories(c.codes, c.categories, categories) for c in to_union] new_codes = np.concatenate(new_codes) else: if all((c.ordered for c in to_union)): msg = 'to union ordered Categoricals, all categories must be the same' raise TypeError(msg) raise TypeError('Categorical.ordered must be the same') if ignore_order: ordered = False dtype = CategoricalDtype(categories=categories, ordered=ordered) return Categorical._simple_new(new_codes, dtype=dtype) # File: pandas-main/pandas/core/dtypes/dtypes.py """""" from __future__ import annotations from datetime import date, datetime, time, timedelta from decimal import Decimal import re from typing import TYPE_CHECKING, Any, cast import warnings import zoneinfo import numpy as np from pandas._config.config import get_option from pandas._libs import lib, missing as libmissing from pandas._libs.interval import Interval from pandas._libs.properties import cache_readonly from pandas._libs.tslibs import BaseOffset, NaT, NaTType, Period, Timedelta, Timestamp, timezones, to_offset, tz_compare from pandas._libs.tslibs.dtypes import PeriodDtypeBase, abbrev_to_npy_unit from pandas._libs.tslibs.offsets import BDay from pandas.compat import pa_version_under10p1 from pandas.errors import PerformanceWarning from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype, StorageExtensionDtype, register_extension_dtype from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCIndex, ABCRangeIndex from pandas.core.dtypes.inference import is_bool, is_list_like if not pa_version_under10p1: import pyarrow as pa if TYPE_CHECKING: from collections.abc import MutableMapping from datetime import tzinfo import pyarrow as pa from pandas._typing import Dtype, DtypeObj, IntervalClosedType, Ordered, Scalar, Self, npt, type_t from pandas import Categorical, CategoricalIndex, DatetimeIndex, Index, IntervalIndex, PeriodIndex from pandas.core.arrays import BaseMaskedArray, DatetimeArray, IntervalArray, NumpyExtensionArray, PeriodArray, SparseArray from pandas.core.arrays.arrow import ArrowExtensionArray str_type = str class PandasExtensionDtype(ExtensionDtype): type: Any kind: Any subdtype = None str: str_type num = 100 shape: tuple[int, ...] = () itemsize = 8 base: DtypeObj | None = None isbuiltin = 0 isnative = 0 _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} def __repr__(self) -> str_type: return str(self) def __hash__(self) -> int: raise NotImplementedError('sub-classes should implement an __hash__ method') def __getstate__(self) -> dict[str_type, Any]: return {k: getattr(self, k, None) for k in self._metadata} @classmethod def reset_cache(cls) -> None: cls._cache_dtypes = {} class CategoricalDtypeType(type): @register_extension_dtype class CategoricalDtype(PandasExtensionDtype, ExtensionDtype): name = 'category' type: type[CategoricalDtypeType] = CategoricalDtypeType kind: str_type = 'O' str = '|O08' base = np.dtype('O') _metadata = ('categories', 'ordered') _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} _supports_2d = False _can_fast_transpose = False def __init__(self, categories=None, ordered: Ordered=False) -> None: self._finalize(categories, ordered, fastpath=False) @classmethod def _from_fastpath(cls, categories=None, ordered: bool | None=None) -> CategoricalDtype: self = cls.__new__(cls) self._finalize(categories, ordered, fastpath=True) return self @classmethod def _from_categorical_dtype(cls, dtype: CategoricalDtype, categories=None, ordered: Ordered | None=None) -> CategoricalDtype: if categories is ordered is None: return dtype if categories is None: categories = dtype.categories if ordered is None: ordered = dtype.ordered return cls(categories, ordered) @classmethod def _from_values_or_dtype(cls, values=None, categories=None, ordered: bool | None=None, dtype: Dtype | None=None) -> CategoricalDtype: if dtype is not None: if isinstance(dtype, str): if dtype == 'category': if ordered is None and cls.is_dtype(values): ordered = values.dtype.ordered dtype = CategoricalDtype(categories, ordered) else: raise ValueError(f'Unknown dtype {dtype!r}') elif categories is not None or ordered is not None: raise ValueError('Cannot specify `categories` or `ordered` together with `dtype`.') elif not isinstance(dtype, CategoricalDtype): raise ValueError(f'Cannot not construct CategoricalDtype from {dtype}') elif cls.is_dtype(values): dtype = values.dtype._from_categorical_dtype(values.dtype, categories, ordered) else: dtype = CategoricalDtype(categories, ordered) return cast(CategoricalDtype, dtype) @classmethod def construct_from_string(cls, string: str_type) -> CategoricalDtype: if not isinstance(string, str): raise TypeError(f"'construct_from_string' expects a string, got {type(string)}") if string != cls.name: raise TypeError(f"Cannot construct a 'CategoricalDtype' from '{string}'") return cls(ordered=None) def _finalize(self, categories, ordered: Ordered, fastpath: bool=False) -> None: if ordered is not None: self.validate_ordered(ordered) if categories is not None: categories = self.validate_categories(categories, fastpath=fastpath) self._categories = categories self._ordered = ordered def __setstate__(self, state: MutableMapping[str_type, Any]) -> None: self._categories = state.pop('categories', None) self._ordered = state.pop('ordered', False) def __hash__(self) -> int: if self.categories is None: if self.ordered: return -1 else: return -2 return int(self._hash_categories) def __eq__(self, other: object) -> bool: if isinstance(other, str): return other == self.name elif other is self: return True elif not (hasattr(other, 'ordered') and hasattr(other, 'categories')): return False elif self.categories is None or other.categories is None: return self.categories is other.categories elif self.ordered or other.ordered: return self.ordered == other.ordered and self.categories.equals(other.categories) else: left = self.categories right = other.categories if not left.dtype == right.dtype: return False if len(left) != len(right): return False if self.categories.equals(other.categories): return True if left.dtype != object: indexer = left.get_indexer(right) return bool((indexer != -1).all()) return set(left) == set(right) def __repr__(self) -> str_type: if self.categories is None: data = 'None' dtype = 'None' else: data = self.categories._format_data(name=type(self).__name__) if isinstance(self.categories, ABCRangeIndex): data = str(self.categories._range) data = data.rstrip(', ') dtype = self.categories.dtype return f'CategoricalDtype(categories={data}, ordered={self.ordered}, categories_dtype={dtype})' @cache_readonly def _hash_categories(self) -> int: from pandas.core.util.hashing import combine_hash_arrays, hash_array, hash_tuples categories = self.categories ordered = self.ordered if len(categories) and isinstance(categories[0], tuple): cat_list = list(categories) cat_array = hash_tuples(cat_list) else: if categories.dtype == 'O' and len({type(x) for x in categories}) != 1: hashed = hash((tuple(categories), ordered)) return hashed if DatetimeTZDtype.is_dtype(categories.dtype): categories = categories.view('datetime64[ns]') cat_array = hash_array(np.asarray(categories), categorize=False) if ordered: cat_array = np.vstack([cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)]) else: cat_array = cat_array.reshape(1, len(cat_array)) combined_hashed = combine_hash_arrays(iter(cat_array), num_items=len(cat_array)) return np.bitwise_xor.reduce(combined_hashed) @classmethod def construct_array_type(cls) -> type_t[Categorical]: from pandas import Categorical return Categorical @staticmethod def validate_ordered(ordered: Ordered) -> None: if not is_bool(ordered): raise TypeError("'ordered' must either be 'True' or 'False'") @staticmethod def validate_categories(categories, fastpath: bool=False) -> Index: from pandas.core.indexes.base import Index if not fastpath and (not is_list_like(categories)): raise TypeError(f"Parameter 'categories' must be list-like, was {categories!r}") if not isinstance(categories, ABCIndex): categories = Index._with_infer(categories, tupleize_cols=False) if not fastpath: if categories.hasnans: raise ValueError('Categorical categories cannot be null') if not categories.is_unique: raise ValueError('Categorical categories must be unique') if isinstance(categories, ABCCategoricalIndex): categories = categories.categories return categories def update_dtype(self, dtype: str_type | CategoricalDtype) -> CategoricalDtype: if isinstance(dtype, str) and dtype == 'category': return self elif not self.is_dtype(dtype): raise ValueError(f'a CategoricalDtype must be passed to perform an update, got {dtype!r}') else: dtype = cast(CategoricalDtype, dtype) if isinstance(dtype, CategoricalDtype) and dtype.categories is not None and (dtype.ordered is not None): return dtype new_categories = dtype.categories if dtype.categories is not None else self.categories new_ordered = dtype.ordered if dtype.ordered is not None else self.ordered return CategoricalDtype(new_categories, new_ordered) @property def categories(self) -> Index: return self._categories @property def ordered(self) -> Ordered: return self._ordered @property def _is_boolean(self) -> bool: from pandas.core.dtypes.common import is_bool_dtype return is_bool_dtype(self.categories) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if all((isinstance(x, CategoricalDtype) for x in dtypes)): first = dtypes[0] if all((first == other for other in dtypes[1:])): return first non_init_cats = [isinstance(x, CategoricalDtype) and x.categories is None for x in dtypes] if all(non_init_cats): return self elif any(non_init_cats): return None subtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes) non_cat_dtypes = [x.categories.dtype if isinstance(x, CategoricalDtype) else x for x in subtypes] from pandas.core.dtypes.cast import find_common_type return find_common_type(non_cat_dtypes) @cache_readonly def index_class(self) -> type_t[CategoricalIndex]: from pandas import CategoricalIndex return CategoricalIndex @register_extension_dtype class DatetimeTZDtype(PandasExtensionDtype): type: type[Timestamp] = Timestamp kind: str_type = 'M' num = 101 _metadata = ('unit', 'tz') _match = re.compile('(datetime64|M8)\\[(?P.+), (?P.+)\\]') _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} _supports_2d = True _can_fast_transpose = True @property def na_value(self) -> NaTType: return NaT @cache_readonly def base(self) -> DtypeObj: return np.dtype(f'M8[{self.unit}]') @cache_readonly def str(self) -> str: return f'|M8[{self.unit}]' def __init__(self, unit: str_type | DatetimeTZDtype='ns', tz=None) -> None: if isinstance(unit, DatetimeTZDtype): (unit, tz) = (unit.unit, unit.tz) if unit != 'ns': if isinstance(unit, str) and tz is None: result = type(self).construct_from_string(unit) unit = result.unit tz = result.tz msg = f"Passing a dtype alias like 'datetime64[ns, {tz}]' to DatetimeTZDtype is no longer supported. Use 'DatetimeTZDtype.construct_from_string()' instead." raise ValueError(msg) if unit not in ['s', 'ms', 'us', 'ns']: raise ValueError('DatetimeTZDtype only supports s, ms, us, ns units') if tz: tz = timezones.maybe_get_tz(tz) tz = timezones.tz_standardize(tz) elif tz is not None: raise zoneinfo.ZoneInfoNotFoundError(tz) if tz is None: raise TypeError("A 'tz' is required.") self._unit = unit self._tz = tz @cache_readonly def _creso(self) -> int: return abbrev_to_npy_unit(self.unit) @property def unit(self) -> str_type: return self._unit @property def tz(self) -> tzinfo: return self._tz @classmethod def construct_array_type(cls) -> type_t[DatetimeArray]: from pandas.core.arrays import DatetimeArray return DatetimeArray @classmethod def construct_from_string(cls, string: str_type) -> DatetimeTZDtype: if not isinstance(string, str): raise TypeError(f"'construct_from_string' expects a string, got {type(string)}") msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'" match = cls._match.match(string) if match: d = match.groupdict() try: return cls(unit=d['unit'], tz=d['tz']) except (KeyError, TypeError, ValueError) as err: raise TypeError(msg) from err raise TypeError(msg) def __str__(self) -> str_type: return f'datetime64[{self.unit}, {self.tz}]' @property def name(self) -> str_type: return str(self) def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other: object) -> bool: if isinstance(other, str): if other.startswith('M8['): other = f'datetime64[{other[3:]}' return other == self.name return isinstance(other, DatetimeTZDtype) and self.unit == other.unit and tz_compare(self.tz, other.tz) def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> DatetimeArray: import pyarrow from pandas.core.arrays import DatetimeArray array = array.cast(pyarrow.timestamp(unit=self._unit), safe=True) if isinstance(array, pyarrow.Array): np_arr = array.to_numpy(zero_copy_only=False) else: np_arr = array.to_numpy() return DatetimeArray._simple_new(np_arr, dtype=self) def __setstate__(self, state) -> None: self._tz = state['tz'] self._unit = state['unit'] def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if all((isinstance(t, DatetimeTZDtype) and t.tz == self.tz for t in dtypes)): np_dtype = np.max([cast(DatetimeTZDtype, t).base for t in [self, *dtypes]]) unit = np.datetime_data(np_dtype)[0] return type(self)(unit=unit, tz=self.tz) return super()._get_common_dtype(dtypes) @cache_readonly def index_class(self) -> type_t[DatetimeIndex]: from pandas import DatetimeIndex return DatetimeIndex @register_extension_dtype class PeriodDtype(PeriodDtypeBase, PandasExtensionDtype): type: type[Period] = Period kind: str_type = 'O' str = '|O08' base = np.dtype('O') num = 102 _metadata = ('freq',) _match = re.compile('(P|p)eriod\\[(?P.+)\\]') _cache_dtypes: dict[BaseOffset, int] = {} __hash__ = PeriodDtypeBase.__hash__ _freq: BaseOffset _supports_2d = True _can_fast_transpose = True def __new__(cls, freq) -> PeriodDtype: if isinstance(freq, PeriodDtype): return freq if not isinstance(freq, BaseOffset): freq = cls._parse_dtype_strict(freq) if isinstance(freq, BDay): warnings.warn("PeriodDtype[B] is deprecated and will be removed in a future version. Use a DatetimeIndex with freq='B' instead", FutureWarning, stacklevel=find_stack_level()) try: dtype_code = cls._cache_dtypes[freq] except KeyError: dtype_code = freq._period_dtype_code cls._cache_dtypes[freq] = dtype_code u = PeriodDtypeBase.__new__(cls, dtype_code, freq.n) u._freq = freq return u def __reduce__(self) -> tuple[type_t[Self], tuple[str_type]]: return (type(self), (self.name,)) @property def freq(self) -> BaseOffset: return self._freq @classmethod def _parse_dtype_strict(cls, freq: str_type) -> BaseOffset: if isinstance(freq, str): if freq.startswith(('Period[', 'period[')): m = cls._match.search(freq) if m is not None: freq = m.group('freq') freq_offset = to_offset(freq, is_period=True) if freq_offset is not None: return freq_offset raise TypeError(f'PeriodDtype argument should be string or BaseOffset, got {type(freq).__name__}') @classmethod def construct_from_string(cls, string: str_type) -> PeriodDtype: if isinstance(string, str) and string.startswith(('period[', 'Period[')) or isinstance(string, BaseOffset): try: return cls(freq=string) except ValueError: pass if isinstance(string, str): msg = f"Cannot construct a 'PeriodDtype' from '{string}'" else: msg = f"'construct_from_string' expects a string, got {type(string)}" raise TypeError(msg) def __str__(self) -> str_type: return self.name @property def name(self) -> str_type: return f'period[{self._freqstr}]' @property def na_value(self) -> NaTType: return NaT def __eq__(self, other: object) -> bool: if isinstance(other, str): return other[:1].lower() + other[1:] == self.name return super().__eq__(other) def __ne__(self, other: object) -> bool: return not self.__eq__(other) @classmethod def is_dtype(cls, dtype: object) -> bool: if isinstance(dtype, str): if dtype.startswith(('period[', 'Period[')): try: return cls._parse_dtype_strict(dtype) is not None except ValueError: return False else: return False return super().is_dtype(dtype) @classmethod def construct_array_type(cls) -> type_t[PeriodArray]: from pandas.core.arrays import PeriodArray return PeriodArray def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> PeriodArray: import pyarrow from pandas.core.arrays import PeriodArray from pandas.core.arrays.arrow._arrow_utils import pyarrow_array_to_numpy_and_mask if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: (data, mask) = pyarrow_array_to_numpy_and_mask(arr, dtype=np.dtype(np.int64)) parr = PeriodArray(data.copy(), dtype=self, copy=False) parr[~mask] = NaT results.append(parr) if not results: return PeriodArray(np.array([], dtype='int64'), dtype=self, copy=False) return PeriodArray._concat_same_type(results) @cache_readonly def index_class(self) -> type_t[PeriodIndex]: from pandas import PeriodIndex return PeriodIndex @register_extension_dtype class IntervalDtype(PandasExtensionDtype): name = 'interval' kind: str_type = 'O' str = '|O08' base = np.dtype('O') num = 103 _metadata = ('subtype', 'closed') _match = re.compile('(I|i)nterval\\[(?P[^,]+(\\[.+\\])?)(, (?P(right|left|both|neither)))?\\]') _cache_dtypes: dict[str_type, PandasExtensionDtype] = {} _subtype: None | np.dtype _closed: IntervalClosedType | None def __init__(self, subtype=None, closed: IntervalClosedType | None=None) -> None: from pandas.core.dtypes.common import is_string_dtype, pandas_dtype if closed is not None and closed not in {'right', 'left', 'both', 'neither'}: raise ValueError("closed must be one of 'right', 'left', 'both', 'neither'") if isinstance(subtype, IntervalDtype): if closed is not None and closed != subtype.closed: raise ValueError("dtype.closed and 'closed' do not match. Try IntervalDtype(dtype.subtype, closed) instead.") self._subtype = subtype._subtype self._closed = subtype._closed elif subtype is None: self._subtype = None self._closed = closed elif isinstance(subtype, str) and subtype.lower() == 'interval': self._subtype = None self._closed = closed else: if isinstance(subtype, str): m = IntervalDtype._match.search(subtype) if m is not None: gd = m.groupdict() subtype = gd['subtype'] if gd.get('closed', None) is not None: if closed is not None: if closed != gd['closed']: raise ValueError("'closed' keyword does not match value specified in dtype string") closed = gd['closed'] try: subtype = pandas_dtype(subtype) except TypeError as err: raise TypeError('could not construct IntervalDtype') from err if CategoricalDtype.is_dtype(subtype) or is_string_dtype(subtype): msg = 'category, object, and string subtypes are not supported for IntervalDtype' raise TypeError(msg) self._subtype = subtype self._closed = closed @cache_readonly def _can_hold_na(self) -> bool: subtype = self._subtype if subtype is None: raise NotImplementedError('_can_hold_na is not defined for partially-initialized IntervalDtype') if subtype.kind in 'iu': return False return True @property def closed(self) -> IntervalClosedType: return self._closed @property def subtype(self): return self._subtype @classmethod def construct_array_type(cls) -> type[IntervalArray]: from pandas.core.arrays import IntervalArray return IntervalArray @classmethod def construct_from_string(cls, string: str_type) -> IntervalDtype: if not isinstance(string, str): raise TypeError(f"'construct_from_string' expects a string, got {type(string)}") if string.lower() == 'interval' or cls._match.search(string) is not None: return cls(string) msg = f"Cannot construct a 'IntervalDtype' from '{string}'.\n\nIncorrectly formatted string passed to constructor. Valid formats include Interval or Interval[dtype] where dtype is numeric, datetime, or timedelta" raise TypeError(msg) @property def type(self) -> type[Interval]: return Interval def __str__(self) -> str_type: if self.subtype is None: return 'interval' if self.closed is None: return f'interval[{self.subtype}]' return f'interval[{self.subtype}, {self.closed}]' def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other: object) -> bool: if isinstance(other, str): return other.lower() in (self.name.lower(), str(self).lower()) elif not isinstance(other, IntervalDtype): return False elif self.subtype is None or other.subtype is None: return True elif self.closed != other.closed: return False else: return self.subtype == other.subtype def __setstate__(self, state) -> None: self._subtype = state['subtype'] self._closed = state.pop('closed', None) @classmethod def is_dtype(cls, dtype: object) -> bool: if isinstance(dtype, str): if dtype.lower().startswith('interval'): try: return cls.construct_from_string(dtype) is not None except (ValueError, TypeError): return False else: return False return super().is_dtype(dtype) def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> IntervalArray: import pyarrow from pandas.core.arrays import IntervalArray if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: if isinstance(arr, pyarrow.ExtensionArray): arr = arr.storage left = np.asarray(arr.field('left'), dtype=self.subtype) right = np.asarray(arr.field('right'), dtype=self.subtype) iarr = IntervalArray.from_arrays(left, right, closed=self.closed) results.append(iarr) if not results: return IntervalArray.from_arrays(np.array([], dtype=self.subtype), np.array([], dtype=self.subtype), closed=self.closed) return IntervalArray._concat_same_type(results) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: if not all((isinstance(x, IntervalDtype) for x in dtypes)): return None closed = cast('IntervalDtype', dtypes[0]).closed if not all((cast('IntervalDtype', x).closed == closed for x in dtypes)): return np.dtype(object) from pandas.core.dtypes.cast import find_common_type common = find_common_type([cast('IntervalDtype', x).subtype for x in dtypes]) if common == object: return np.dtype(object) return IntervalDtype(common, closed=closed) @cache_readonly def index_class(self) -> type_t[IntervalIndex]: from pandas import IntervalIndex return IntervalIndex class NumpyEADtype(ExtensionDtype): _metadata = ('_dtype',) _supports_2d = False _can_fast_transpose = False def __init__(self, dtype: npt.DTypeLike | NumpyEADtype | None) -> None: if isinstance(dtype, NumpyEADtype): dtype = dtype.numpy_dtype self._dtype = np.dtype(dtype) def __repr__(self) -> str: return f'NumpyEADtype({self.name!r})' @property def numpy_dtype(self) -> np.dtype: return self._dtype @property def name(self) -> str: return self._dtype.name @property def type(self) -> type[np.generic]: return self._dtype.type @property def _is_numeric(self) -> bool: return self.kind in set('biufc') @property def _is_boolean(self) -> bool: return self.kind == 'b' @classmethod def construct_from_string(cls, string: str) -> NumpyEADtype: try: dtype = np.dtype(string) except TypeError as err: if not isinstance(string, str): msg = f"'construct_from_string' expects a string, got {type(string)}" else: msg = f"Cannot construct a 'NumpyEADtype' from '{string}'" raise TypeError(msg) from err return cls(dtype) @classmethod def construct_array_type(cls) -> type_t[NumpyExtensionArray]: from pandas.core.arrays import NumpyExtensionArray return NumpyExtensionArray @property def kind(self) -> str: return self._dtype.kind @property def itemsize(self) -> int: return self._dtype.itemsize class BaseMaskedDtype(ExtensionDtype): base = None type: type _internal_fill_value: Scalar @property def _truthy_value(self): if self.kind == 'f': return 1.0 if self.kind in 'iu': return 1 return True @property def _falsey_value(self): if self.kind == 'f': return 0.0 if self.kind in 'iu': return 0 return False @property def na_value(self) -> libmissing.NAType: return libmissing.NA @cache_readonly def numpy_dtype(self) -> np.dtype: return np.dtype(self.type) @cache_readonly def kind(self) -> str: return self.numpy_dtype.kind @cache_readonly def itemsize(self) -> int: return self.numpy_dtype.itemsize @classmethod def construct_array_type(cls) -> type_t[BaseMaskedArray]: raise NotImplementedError @classmethod def from_numpy_dtype(cls, dtype: np.dtype) -> BaseMaskedDtype: if dtype.kind == 'b': from pandas.core.arrays.boolean import BooleanDtype return BooleanDtype() elif dtype.kind in 'iu': from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE return NUMPY_INT_TO_DTYPE[dtype] elif dtype.kind == 'f': from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE return NUMPY_FLOAT_TO_DTYPE[dtype] else: raise NotImplementedError(dtype) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: from pandas.core.dtypes.cast import find_common_type new_dtype = find_common_type([dtype.numpy_dtype if isinstance(dtype, BaseMaskedDtype) else dtype for dtype in dtypes]) if not isinstance(new_dtype, np.dtype): return None try: return type(self).from_numpy_dtype(new_dtype) except (KeyError, NotImplementedError): return None @register_extension_dtype class SparseDtype(ExtensionDtype): _is_immutable = True _metadata = ('_dtype', '_fill_value', '_is_na_fill_value') def __init__(self, dtype: Dtype=np.float64, fill_value: Any=None) -> None: if isinstance(dtype, type(self)): if fill_value is None: fill_value = dtype.fill_value dtype = dtype.subtype from pandas.core.dtypes.common import is_string_dtype, pandas_dtype from pandas.core.dtypes.missing import na_value_for_dtype dtype = pandas_dtype(dtype) if is_string_dtype(dtype): dtype = np.dtype('object') if not isinstance(dtype, np.dtype): raise TypeError('SparseDtype subtype must be a numpy dtype') if fill_value is None: fill_value = na_value_for_dtype(dtype) self._dtype = dtype self._fill_value = fill_value self._check_fill_value() def __hash__(self) -> int: return super().__hash__() def __eq__(self, other: object) -> bool: if isinstance(other, str): try: other = self.construct_from_string(other) except TypeError: return False if isinstance(other, type(self)): subtype = self.subtype == other.subtype if self._is_na_fill_value or other._is_na_fill_value: fill_value = isinstance(self.fill_value, type(other.fill_value)) or isinstance(other.fill_value, type(self.fill_value)) else: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'elementwise comparison failed', category=DeprecationWarning) fill_value = self.fill_value == other.fill_value return subtype and fill_value return False @property def fill_value(self): return self._fill_value def _check_fill_value(self) -> None: if not lib.is_scalar(self._fill_value): raise ValueError(f'fill_value must be a scalar. Got {self._fill_value} instead') from pandas.core.dtypes.cast import can_hold_element from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna from pandas.core.construction import ensure_wrapped_if_datetimelike val = self._fill_value if isna(val): if not is_valid_na_for_dtype(val, self.subtype): raise ValueError('fill_value must be a valid value for the SparseDtype.subtype') else: dummy = np.empty(0, dtype=self.subtype) dummy = ensure_wrapped_if_datetimelike(dummy) if not can_hold_element(dummy, val): raise ValueError('fill_value must be a valid value for the SparseDtype.subtype') @property def _is_na_fill_value(self) -> bool: from pandas import isna return isna(self.fill_value) @property def _is_numeric(self) -> bool: return not self.subtype == object @property def _is_boolean(self) -> bool: return self.subtype.kind == 'b' @property def kind(self) -> str: return self.subtype.kind @property def type(self): return self.subtype.type @property def subtype(self): return self._dtype @property def name(self) -> str: return f'Sparse[{self.subtype.name}, {self.fill_value!r}]' def __repr__(self) -> str: return self.name @classmethod def construct_array_type(cls) -> type_t[SparseArray]: from pandas.core.arrays.sparse.array import SparseArray return SparseArray @classmethod def construct_from_string(cls, string: str) -> SparseDtype: if not isinstance(string, str): raise TypeError(f"'construct_from_string' expects a string, got {type(string)}") msg = f"Cannot construct a 'SparseDtype' from '{string}'" if string.startswith('Sparse'): try: (sub_type, has_fill_value) = cls._parse_subtype(string) except ValueError as err: raise TypeError(msg) from err else: result = SparseDtype(sub_type) msg = f"Cannot construct a 'SparseDtype' from '{string}'.\n\nIt looks like the fill_value in the string is not the default for the dtype. Non-default fill_values are not supported. Use the 'SparseDtype()' constructor instead." if has_fill_value and str(result) != string: raise TypeError(msg) return result else: raise TypeError(msg) @staticmethod def _parse_subtype(dtype: str) -> tuple[str, bool]: xpr = re.compile('Sparse\\[(?P[^,]*)(, )?(?P.*?)?\\]$') m = xpr.match(dtype) has_fill_value = False if m: subtype = m.groupdict()['subtype'] has_fill_value = bool(m.groupdict()['fill_value']) elif dtype == 'Sparse': subtype = 'float64' else: raise ValueError(f'Cannot parse {dtype}') return (subtype, has_fill_value) @classmethod def is_dtype(cls, dtype: object) -> bool: dtype = getattr(dtype, 'dtype', dtype) if isinstance(dtype, str) and dtype.startswith('Sparse'): (sub_type, _) = cls._parse_subtype(dtype) dtype = np.dtype(sub_type) elif isinstance(dtype, cls): return True return isinstance(dtype, np.dtype) or dtype == 'Sparse' def update_dtype(self, dtype) -> SparseDtype: from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.common import pandas_dtype cls = type(self) dtype = pandas_dtype(dtype) if not isinstance(dtype, cls): if not isinstance(dtype, np.dtype): raise TypeError('sparse arrays of extension dtypes not supported') fv_asarray = np.atleast_1d(np.array(self.fill_value)) fvarr = astype_array(fv_asarray, dtype) fill_value = fvarr[0] dtype = cls(dtype, fill_value=fill_value) return dtype @property def _subtype_with_str(self): if isinstance(self.fill_value, str): return type(self.fill_value) return self.subtype def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: from pandas.core.dtypes.cast import np_find_common_type if any((isinstance(x, ExtensionDtype) and (not isinstance(x, SparseDtype)) for x in dtypes)): return None fill_values = [x.fill_value for x in dtypes if isinstance(x, SparseDtype)] fill_value = fill_values[0] from pandas import isna if get_option('performance_warnings') and (not (len(set(fill_values)) == 1 or isna(fill_values).all())): warnings.warn(f"Concatenating sparse arrays with multiple fill values: '{fill_values}'. Picking the first and converting the rest.", PerformanceWarning, stacklevel=find_stack_level()) np_dtypes = (x.subtype if isinstance(x, SparseDtype) else x for x in dtypes) return SparseDtype(np_find_common_type(*np_dtypes), fill_value=fill_value) @register_extension_dtype class ArrowDtype(StorageExtensionDtype): _metadata = ('storage', 'pyarrow_dtype') def __init__(self, pyarrow_dtype: pa.DataType) -> None: super().__init__('pyarrow') if pa_version_under10p1: raise ImportError('pyarrow>=10.0.1 is required for ArrowDtype') if not isinstance(pyarrow_dtype, pa.DataType): raise ValueError(f'pyarrow_dtype ({pyarrow_dtype}) must be an instance of a pyarrow.DataType. Got {type(pyarrow_dtype)} instead.') self.pyarrow_dtype = pyarrow_dtype def __repr__(self) -> str: return self.name def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other: object) -> bool: if not isinstance(other, type(self)): return super().__eq__(other) return self.pyarrow_dtype == other.pyarrow_dtype @property def type(self): pa_type = self.pyarrow_dtype if pa.types.is_integer(pa_type): return int elif pa.types.is_floating(pa_type): return float elif pa.types.is_string(pa_type) or pa.types.is_large_string(pa_type): return str elif pa.types.is_binary(pa_type) or pa.types.is_fixed_size_binary(pa_type) or pa.types.is_large_binary(pa_type): return bytes elif pa.types.is_boolean(pa_type): return bool elif pa.types.is_duration(pa_type): if pa_type.unit == 'ns': return Timedelta else: return timedelta elif pa.types.is_timestamp(pa_type): if pa_type.unit == 'ns': return Timestamp else: return datetime elif pa.types.is_date(pa_type): return date elif pa.types.is_time(pa_type): return time elif pa.types.is_decimal(pa_type): return Decimal elif pa.types.is_dictionary(pa_type): return CategoricalDtypeType elif pa.types.is_list(pa_type) or pa.types.is_large_list(pa_type): return list elif pa.types.is_fixed_size_list(pa_type): return list elif pa.types.is_map(pa_type): return list elif pa.types.is_struct(pa_type): return dict elif pa.types.is_null(pa_type): return type(pa_type) elif isinstance(pa_type, pa.ExtensionType): return type(self)(pa_type.storage_type).type raise NotImplementedError(pa_type) @property def name(self) -> str: return f'{self.pyarrow_dtype!s}[{self.storage}]' @cache_readonly def numpy_dtype(self) -> np.dtype: if pa.types.is_timestamp(self.pyarrow_dtype): return np.dtype(f'datetime64[{self.pyarrow_dtype.unit}]') if pa.types.is_duration(self.pyarrow_dtype): return np.dtype(f'timedelta64[{self.pyarrow_dtype.unit}]') if pa.types.is_string(self.pyarrow_dtype) or pa.types.is_large_string(self.pyarrow_dtype): return np.dtype(str) try: return np.dtype(self.pyarrow_dtype.to_pandas_dtype()) except (NotImplementedError, TypeError): return np.dtype(object) @cache_readonly def kind(self) -> str: if pa.types.is_timestamp(self.pyarrow_dtype): return 'M' return self.numpy_dtype.kind @cache_readonly def itemsize(self) -> int: return self.numpy_dtype.itemsize @classmethod def construct_array_type(cls) -> type_t[ArrowExtensionArray]: from pandas.core.arrays.arrow import ArrowExtensionArray return ArrowExtensionArray @classmethod def construct_from_string(cls, string: str) -> ArrowDtype: if not isinstance(string, str): raise TypeError(f"'construct_from_string' expects a string, got {type(string)}") if not string.endswith('[pyarrow]'): raise TypeError(f"'{string}' must end with '[pyarrow]'") if string == 'string[pyarrow]': raise TypeError('string[pyarrow] should be constructed by StringDtype') base_type = string[:-9] try: pa_dtype = pa.type_for_alias(base_type) except ValueError as err: has_parameters = re.search('[\\[\\(].*[\\]\\)]', base_type) if has_parameters: try: return cls._parse_temporal_dtype_string(base_type) except (NotImplementedError, ValueError): pass raise NotImplementedError(f'Passing pyarrow type specific parameters ({has_parameters.group()}) in the string is not supported. Please construct an ArrowDtype object with a pyarrow_dtype instance with specific parameters.') from err raise TypeError(f"'{base_type}' is not a valid pyarrow data type.") from err return cls(pa_dtype) @classmethod def _parse_temporal_dtype_string(cls, string: str) -> ArrowDtype: (head, tail) = string.split('[', 1) if not tail.endswith(']'): raise ValueError tail = tail[:-1] if head == 'timestamp': assert ',' in tail (unit, tz) = tail.split(',', 1) unit = unit.strip() tz = tz.strip() if tz.startswith('tz='): tz = tz[3:] pa_type = pa.timestamp(unit, tz=tz) dtype = cls(pa_type) return dtype raise NotImplementedError(string) @property def _is_numeric(self) -> bool: return pa.types.is_integer(self.pyarrow_dtype) or pa.types.is_floating(self.pyarrow_dtype) or pa.types.is_decimal(self.pyarrow_dtype) @property def _is_boolean(self) -> bool: return pa.types.is_boolean(self.pyarrow_dtype) def _get_common_dtype(self, dtypes: list[DtypeObj]) -> DtypeObj | None: from pandas.core.dtypes.cast import find_common_type null_dtype = type(self)(pa.null()) new_dtype = find_common_type([dtype.numpy_dtype if isinstance(dtype, ArrowDtype) else dtype for dtype in dtypes if dtype != null_dtype]) if not isinstance(new_dtype, np.dtype): return None try: pa_dtype = pa.from_numpy_dtype(new_dtype) return type(self)(pa_dtype) except NotImplementedError: return None def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> ArrowExtensionArray: array_class = self.construct_array_type() arr = array.cast(self.pyarrow_dtype, safe=True) return array_class(arr) # File: pandas-main/pandas/core/dtypes/generic.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Type, cast if TYPE_CHECKING: from pandas import Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index, IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series, TimedeltaIndex from pandas.core.arrays import DatetimeArray, ExtensionArray, NumpyExtensionArray, PeriodArray, TimedeltaArray from pandas.core.generic import NDFrame def create_pandas_abc_type(name, attr, comp) -> type: def _check(inst) -> bool: return getattr(inst, attr, '_typ') in comp @classmethod def _instancecheck(cls, inst) -> bool: return _check(inst) and (not isinstance(inst, type)) @classmethod def _subclasscheck(cls, inst) -> bool: if not isinstance(inst, type): raise TypeError('issubclass() arg 1 must be a class') return _check(inst) dct = {'__instancecheck__': _instancecheck, '__subclasscheck__': _subclasscheck} meta = type('ABCBase', (type,), dct) return meta(name, (), dct) ABCRangeIndex = cast('Type[RangeIndex]', create_pandas_abc_type('ABCRangeIndex', '_typ', ('rangeindex',))) ABCMultiIndex = cast('Type[MultiIndex]', create_pandas_abc_type('ABCMultiIndex', '_typ', ('multiindex',))) ABCDatetimeIndex = cast('Type[DatetimeIndex]', create_pandas_abc_type('ABCDatetimeIndex', '_typ', ('datetimeindex',))) ABCTimedeltaIndex = cast('Type[TimedeltaIndex]', create_pandas_abc_type('ABCTimedeltaIndex', '_typ', ('timedeltaindex',))) ABCPeriodIndex = cast('Type[PeriodIndex]', create_pandas_abc_type('ABCPeriodIndex', '_typ', ('periodindex',))) ABCCategoricalIndex = cast('Type[CategoricalIndex]', create_pandas_abc_type('ABCCategoricalIndex', '_typ', ('categoricalindex',))) ABCIntervalIndex = cast('Type[IntervalIndex]', create_pandas_abc_type('ABCIntervalIndex', '_typ', ('intervalindex',))) ABCIndex = cast('Type[Index]', create_pandas_abc_type('ABCIndex', '_typ', {'index', 'rangeindex', 'multiindex', 'datetimeindex', 'timedeltaindex', 'periodindex', 'categoricalindex', 'intervalindex'})) ABCNDFrame = cast('Type[NDFrame]', create_pandas_abc_type('ABCNDFrame', '_typ', ('series', 'dataframe'))) ABCSeries = cast('Type[Series]', create_pandas_abc_type('ABCSeries', '_typ', ('series',))) ABCDataFrame = cast('Type[DataFrame]', create_pandas_abc_type('ABCDataFrame', '_typ', ('dataframe',))) ABCCategorical = cast('Type[Categorical]', create_pandas_abc_type('ABCCategorical', '_typ', 'categorical')) ABCDatetimeArray = cast('Type[DatetimeArray]', create_pandas_abc_type('ABCDatetimeArray', '_typ', 'datetimearray')) ABCTimedeltaArray = cast('Type[TimedeltaArray]', create_pandas_abc_type('ABCTimedeltaArray', '_typ', 'timedeltaarray')) ABCPeriodArray = cast('Type[PeriodArray]', create_pandas_abc_type('ABCPeriodArray', '_typ', ('periodarray',))) ABCExtensionArray = cast('Type[ExtensionArray]', create_pandas_abc_type('ABCExtensionArray', '_typ', {'extension', 'categorical', 'periodarray', 'datetimearray', 'timedeltaarray'})) ABCNumpyExtensionArray = cast('Type[NumpyExtensionArray]', create_pandas_abc_type('ABCNumpyExtensionArray', '_typ', ('npy_extension',))) # File: pandas-main/pandas/core/dtypes/inference.py """""" from __future__ import annotations from collections import abc from numbers import Number import re from re import Pattern from typing import TYPE_CHECKING import numpy as np from pandas._libs import lib if TYPE_CHECKING: from collections.abc import Hashable from pandas._typing import TypeGuard is_bool = lib.is_bool is_integer = lib.is_integer is_float = lib.is_float is_complex = lib.is_complex is_scalar = lib.is_scalar is_decimal = lib.is_decimal is_list_like = lib.is_list_like is_iterator = lib.is_iterator def is_number(obj: object) -> TypeGuard[Number | np.number]: return isinstance(obj, (Number, np.number)) def iterable_not_string(obj: object) -> bool: return isinstance(obj, abc.Iterable) and (not isinstance(obj, str)) def is_file_like(obj: object) -> bool: if not (hasattr(obj, 'read') or hasattr(obj, 'write')): return False return bool(hasattr(obj, '__iter__')) def is_re(obj: object) -> TypeGuard[Pattern]: return isinstance(obj, Pattern) def is_re_compilable(obj: object) -> bool: try: re.compile(obj) except TypeError: return False else: return True def is_array_like(obj: object) -> bool: return is_list_like(obj) and hasattr(obj, 'dtype') def is_nested_list_like(obj: object) -> bool: return is_list_like(obj) and hasattr(obj, '__len__') and (len(obj) > 0) and all((is_list_like(item) for item in obj)) def is_dict_like(obj: object) -> bool: dict_like_attrs = ('__getitem__', 'keys', '__contains__') return all((hasattr(obj, attr) for attr in dict_like_attrs)) and (not isinstance(obj, type)) def is_named_tuple(obj: object) -> bool: return isinstance(obj, abc.Sequence) and hasattr(obj, '_fields') def is_hashable(obj: object) -> TypeGuard[Hashable]: try: hash(obj) except TypeError: return False else: return True def is_sequence(obj: object) -> bool: try: iter(obj) len(obj) return not isinstance(obj, (str, bytes)) except (TypeError, AttributeError): return False def is_dataclass(item: object) -> bool: try: import dataclasses return dataclasses.is_dataclass(item) and (not isinstance(item, type)) except ImportError: return False # File: pandas-main/pandas/core/dtypes/missing.py """""" from __future__ import annotations from decimal import Decimal from typing import TYPE_CHECKING, overload import warnings import numpy as np from pandas._libs import lib import pandas._libs.missing as libmissing from pandas._libs.tslibs import NaT, iNaT from pandas.core.dtypes.common import DT64NS_DTYPE, TD64NS_DTYPE, ensure_object, is_scalar, is_string_or_object_np_dtype from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries from pandas.core.dtypes.inference import is_list_like if TYPE_CHECKING: from re import Pattern from pandas._libs.missing import NAType from pandas._libs.tslibs import NaTType from pandas._typing import ArrayLike, DtypeObj, NDFrame, NDFrameT, Scalar, npt from pandas import Series from pandas.core.indexes.base import Index isposinf_scalar = libmissing.isposinf_scalar isneginf_scalar = libmissing.isneginf_scalar _dtype_object = np.dtype('object') _dtype_str = np.dtype(str) @overload def isna(obj: Scalar | Pattern | NAType | NaTType) -> bool: ... @overload def isna(obj: ArrayLike | Index | list) -> npt.NDArray[np.bool_]: ... @overload def isna(obj: NDFrameT) -> NDFrameT: ... @overload def isna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... @overload def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def isna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: return _isna(obj) isnull = isna def _isna(obj): if is_scalar(obj): return libmissing.checknull(obj) elif isinstance(obj, ABCMultiIndex): raise NotImplementedError('isna is not defined for MultiIndex') elif isinstance(obj, type): return False elif isinstance(obj, (np.ndarray, ABCExtensionArray)): return _isna_array(obj) elif isinstance(obj, ABCIndex): if not obj._can_hold_na: return obj.isna() return _isna_array(obj._values) elif isinstance(obj, ABCSeries): result = _isna_array(obj._values) result = obj._constructor(result, index=obj.index, name=obj.name, copy=False) return result elif isinstance(obj, ABCDataFrame): return obj.isna() elif isinstance(obj, list): return _isna_array(np.asarray(obj, dtype=object)) elif hasattr(obj, '__array__'): return _isna_array(np.asarray(obj)) else: return False def _isna_array(values: ArrayLike) -> npt.NDArray[np.bool_] | NDFrame: dtype = values.dtype result: npt.NDArray[np.bool_] | NDFrame if not isinstance(values, np.ndarray): result = values.isna() elif isinstance(values, np.rec.recarray): result = _isna_recarray_dtype(values) elif is_string_or_object_np_dtype(values.dtype): result = _isna_string_dtype(values) elif dtype.kind in 'mM': result = values.view('i8') == iNaT else: result = np.isnan(values) return result def _isna_string_dtype(values: np.ndarray) -> npt.NDArray[np.bool_]: dtype = values.dtype if dtype.kind in ('S', 'U'): result = np.zeros(values.shape, dtype=bool) elif values.ndim in {1, 2}: result = libmissing.isnaobj(values) else: result = libmissing.isnaobj(values.ravel()) result = result.reshape(values.shape) return result def _isna_recarray_dtype(values: np.rec.recarray) -> npt.NDArray[np.bool_]: result = np.zeros(values.shape, dtype=bool) for (i, record) in enumerate(values): record_as_array = np.array(record.tolist()) does_record_contain_nan = isna_all(record_as_array) result[i] = np.any(does_record_contain_nan) return result @overload def notna(obj: Scalar | Pattern | NAType | NaTType) -> bool: ... @overload def notna(obj: ArrayLike | Index | list) -> npt.NDArray[np.bool_]: ... @overload def notna(obj: NDFrameT) -> NDFrameT: ... @overload def notna(obj: NDFrameT | ArrayLike | Index | list) -> NDFrameT | npt.NDArray[np.bool_]: ... @overload def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: ... def notna(obj: object) -> bool | npt.NDArray[np.bool_] | NDFrame: res = isna(obj) if isinstance(res, bool): return not res return ~res notnull = notna def array_equivalent(left, right, strict_nan: bool=False, dtype_equal: bool=False) -> bool: (left, right) = (np.asarray(left), np.asarray(right)) if left.shape != right.shape: return False if dtype_equal: if left.dtype.kind in 'fc': return _array_equivalent_float(left, right) elif left.dtype.kind in 'mM': return _array_equivalent_datetimelike(left, right) elif is_string_or_object_np_dtype(left.dtype): return _array_equivalent_object(left, right, strict_nan) else: return np.array_equal(left, right) if left.dtype.kind in 'OSU' or right.dtype.kind in 'OSU': return _array_equivalent_object(left, right, strict_nan) if left.dtype.kind in 'fc': if not (left.size and right.size): return True return ((left == right) | isna(left) & isna(right)).all() elif left.dtype.kind in 'mM' or right.dtype.kind in 'mM': if left.dtype != right.dtype: return False left = left.view('i8') right = right.view('i8') if (left.dtype.type is np.void or right.dtype.type is np.void) and left.dtype != right.dtype: return False return np.array_equal(left, right) def _array_equivalent_float(left: np.ndarray, right: np.ndarray) -> bool: return bool(((left == right) | np.isnan(left) & np.isnan(right)).all()) def _array_equivalent_datetimelike(left: np.ndarray, right: np.ndarray) -> bool: return np.array_equal(left.view('i8'), right.view('i8')) def _array_equivalent_object(left: np.ndarray, right: np.ndarray, strict_nan: bool) -> bool: left = ensure_object(left) right = ensure_object(right) mask: npt.NDArray[np.bool_] | None = None if strict_nan: mask = isna(left) & isna(right) if not mask.any(): mask = None try: if mask is None: return lib.array_equivalent_object(left, right) if not lib.array_equivalent_object(left[~mask], right[~mask]): return False left_remaining = left[mask] right_remaining = right[mask] except ValueError: left_remaining = left right_remaining = right for (left_value, right_value) in zip(left_remaining, right_remaining): if left_value is NaT and right_value is not NaT: return False elif left_value is libmissing.NA and right_value is not libmissing.NA: return False elif isinstance(left_value, float) and np.isnan(left_value): if not isinstance(right_value, float) or not np.isnan(right_value): return False else: with warnings.catch_warnings(): warnings.simplefilter('ignore', DeprecationWarning) try: if np.any(np.asarray(left_value != right_value)): return False except TypeError as err: if 'boolean value of NA is ambiguous' in str(err): return False raise except ValueError: return False return True def array_equals(left: ArrayLike, right: ArrayLike) -> bool: if left.dtype != right.dtype: return False elif isinstance(left, ABCExtensionArray): return left.equals(right) else: return array_equivalent(left, right, dtype_equal=True) def infer_fill_value(val): if not is_list_like(val): val = [val] val = np.asarray(val) if val.dtype.kind in 'mM': return np.array('NaT', dtype=val.dtype) elif val.dtype == object: dtype = lib.infer_dtype(ensure_object(val), skipna=False) if dtype in ['datetime', 'datetime64']: return np.array('NaT', dtype=DT64NS_DTYPE) elif dtype in ['timedelta', 'timedelta64']: return np.array('NaT', dtype=TD64NS_DTYPE) return np.array(np.nan, dtype=object) elif val.dtype.kind == 'U': return np.array(np.nan, dtype=val.dtype) return np.nan def construct_1d_array_from_inferred_fill_value(value: object, length: int) -> ArrayLike: from pandas.core.algorithms import take_nd from pandas.core.construction import sanitize_array from pandas.core.indexes.base import Index arr = sanitize_array(value, Index(range(1)), copy=False) taker = -1 * np.ones(length, dtype=np.intp) return take_nd(arr, taker) def maybe_fill(arr: np.ndarray) -> np.ndarray: if arr.dtype.kind not in 'iub': arr.fill(np.nan) return arr def na_value_for_dtype(dtype: DtypeObj, compat: bool=True): if isinstance(dtype, ExtensionDtype): return dtype.na_value elif dtype.kind in 'mM': unit = np.datetime_data(dtype)[0] return dtype.type('NaT', unit) elif dtype.kind in 'fc': return np.nan elif dtype.kind in 'iu': if compat: return 0 return np.nan elif dtype.kind == 'b': if compat: return False return np.nan return np.nan def remove_na_arraylike(arr: Series | Index | np.ndarray): if isinstance(arr.dtype, ExtensionDtype): return arr[notna(arr)] else: return arr[notna(np.asarray(arr))] def is_valid_na_for_dtype(obj, dtype: DtypeObj) -> bool: if not lib.is_scalar(obj) or not isna(obj): return False elif dtype.kind == 'M': if isinstance(dtype, np.dtype): return not isinstance(obj, (np.timedelta64, Decimal)) return not isinstance(obj, (np.timedelta64, np.datetime64, Decimal)) elif dtype.kind == 'm': return not isinstance(obj, (np.datetime64, Decimal)) elif dtype.kind in 'iufc': return obj is not NaT and (not isinstance(obj, (np.datetime64, np.timedelta64))) elif dtype.kind == 'b': return lib.is_float(obj) or obj is None or obj is libmissing.NA elif dtype == _dtype_str: return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal, float)) elif dtype == _dtype_object: return True elif isinstance(dtype, PeriodDtype): return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) elif isinstance(dtype, IntervalDtype): return lib.is_float(obj) or obj is None or obj is libmissing.NA elif isinstance(dtype, CategoricalDtype): return is_valid_na_for_dtype(obj, dtype.categories.dtype) return not isinstance(obj, (np.datetime64, np.timedelta64, Decimal)) def isna_all(arr: ArrayLike) -> bool: total_len = len(arr) chunk_len = max(total_len // 40, 1000) dtype = arr.dtype if lib.is_np_dtype(dtype, 'f'): checker = np.isnan elif lib.is_np_dtype(dtype, 'mM') or isinstance(dtype, (DatetimeTZDtype, PeriodDtype)): checker = lambda x: np.asarray(x.view('i8')) == iNaT else: checker = _isna_array return all((checker(arr[i:i + chunk_len]).all() for i in range(0, total_len, chunk_len))) # File: pandas-main/pandas/core/flags.py from __future__ import annotations from typing import TYPE_CHECKING import weakref if TYPE_CHECKING: from pandas.core.generic import NDFrame class Flags: _keys: set[str] = {'allows_duplicate_labels'} def __init__(self, obj: NDFrame, *, allows_duplicate_labels: bool) -> None: self._allows_duplicate_labels = allows_duplicate_labels self._obj = weakref.ref(obj) @property def allows_duplicate_labels(self) -> bool: return self._allows_duplicate_labels @allows_duplicate_labels.setter def allows_duplicate_labels(self, value: bool) -> None: value = bool(value) obj = self._obj() if obj is None: raise ValueError("This flag's object has been deleted.") if not value: for ax in obj.axes: ax._maybe_check_unique() self._allows_duplicate_labels = value def __getitem__(self, key: str): if key not in self._keys: raise KeyError(key) return getattr(self, key) def __setitem__(self, key: str, value) -> None: if key not in self._keys: raise ValueError(f'Unknown flag {key}. Must be one of {self._keys}') setattr(self, key, value) def __repr__(self) -> str: return f'' def __eq__(self, other: object) -> bool: if isinstance(other, type(self)): return self.allows_duplicate_labels == other.allows_duplicate_labels return False # File: pandas-main/pandas/core/frame.py """""" from __future__ import annotations import collections from collections import abc from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence import functools from io import StringIO import itertools import operator import sys from textwrap import dedent from typing import TYPE_CHECKING, Any, Literal, cast, overload import warnings import numpy as np from numpy import ma from pandas._config import get_option from pandas._libs import algos as libalgos, lib, properties from pandas._libs.hashtable import duplicated from pandas._libs.lib import is_range_indexer from pandas.compat import PYPY from pandas.compat._constants import REF_COUNT from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import ChainedAssignmentError, InvalidIndexError from pandas.errors.cow import _chained_assignment_method_msg, _chained_assignment_msg from pandas.util._decorators import Appender, Substitution, deprecate_nonkeyword_arguments, doc, set_module from pandas.util._exceptions import find_stack_level, rewrite_warning from pandas.util._validators import validate_ascending, validate_bool_kwarg, validate_percentile from pandas.core.dtypes.cast import LossySetitemError, can_hold_element, construct_1d_arraylike_from_scalar, construct_2d_arraylike_from_scalar, find_common_type, infer_dtype_from_scalar, invalidate_string_dtypes, maybe_downcast_to_dtype from pandas.core.dtypes.common import infer_dtype_from_object, is_1d_only_ea_dtype, is_array_like, is_bool_dtype, is_dataclass, is_dict_like, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_iterator, is_list_like, is_scalar, is_sequence, needs_i8_conversion, pandas_dtype from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ArrowDtype, BaseMaskedDtype, ExtensionDtype from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms, common as com, nanops, ops, roperator from pandas.core.accessor import Accessor from pandas.core.apply import reconstruct_and_relabel_result from pandas.core.array_algos.take import take_2d_multi from pandas.core.arraylike import OpsMixin from pandas.core.arrays import BaseMaskedArray, DatetimeArray, ExtensionArray, PeriodArray, TimedeltaArray from pandas.core.arrays.sparse import SparseFrameAccessor from pandas.core.construction import ensure_wrapped_if_datetimelike, sanitize_array, sanitize_masked_array from pandas.core.generic import NDFrame, make_doc from pandas.core.indexers import check_key_length from pandas.core.indexes.api import DatetimeIndex, Index, PeriodIndex, default_index, ensure_index, ensure_index_from_sequences from pandas.core.indexes.multi import MultiIndex, maybe_droplevels from pandas.core.indexing import check_bool_indexer, check_dict_or_set_indexers from pandas.core.internals import BlockManager from pandas.core.internals.construction import arrays_to_mgr, dataclasses_to_dicts, dict_to_mgr, ndarray_to_mgr, nested_data_to_arrays, rec_array_to_mgr, reorder_arrays, to_arrays, treat_as_nested from pandas.core.methods import selectn from pandas.core.reshape.melt import melt from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import get_group_index, lexsort_indexer, nargsort from pandas.io.common import get_handle from pandas.io.formats import console, format as fmt from pandas.io.formats.info import INFO_DOCSTRING, DataFrameInfo, frame_sub_kwargs import pandas.plotting if TYPE_CHECKING: import datetime from pandas._libs.internals import BlockValuesRefs from pandas._typing import AggFuncType, AnyAll, AnyArrayLike, ArrayLike, Axes, Axis, AxisInt, ColspaceArgType, CompressionOptions, CorrelationMethod, DropKeep, Dtype, DtypeObj, FilePath, FloatFormatType, FormattersType, Frequency, FromDictOrient, HashableT, HashableT2, IgnoreRaise, IndexKeyFunc, IndexLabel, JoinValidate, Level, ListLike, MergeHow, MergeValidate, MutableMappingT, NaPosition, NsmallestNlargestKeep, PythonFuncType, QuantileInterpolation, ReadBuffer, ReindexMethod, Renamer, Scalar, Self, SequenceNotStr, SortKind, StorageOptions, Suffixes, T, ToStataByteorder, ToTimestampHow, UpdateJoin, ValueKeyFunc, WriteBuffer, XMLParsers, npt from pandas.core.groupby.generic import DataFrameGroupBy from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg from pandas.core.internals.managers import SingleBlockManager from pandas.io.formats.style import Styler _shared_doc_kwargs = {'axes': 'index, columns', 'klass': 'DataFrame', 'axes_single_arg': "{0 or 'index', 1 or 'columns'}", 'axis': "axis : {0 or 'index', 1 or 'columns'}, default 0\n If 0 or 'index': apply function to each column.\n If 1 or 'columns': apply function to each row.", 'inplace': '\n inplace : bool, default False\n Whether to modify the DataFrame rather than creating a new one.', 'optional_by': "\nby : str or list of str\n Name or list of names to sort by.\n\n - if `axis` is 0 or `'index'` then `by` may contain index\n levels and/or column labels.\n - if `axis` is 1 or `'columns'` then `by` may contain column\n levels and/or index labels.", 'optional_reindex': "\nlabels : array-like, optional\n New labels / index to conform the axis specified by 'axis' to.\nindex : array-like, optional\n New labels for the index. Preferably an Index object to avoid\n duplicating data.\ncolumns : array-like, optional\n New labels for the columns. Preferably an Index object to avoid\n duplicating data.\naxis : int or str, optional\n Axis to target. Can be either the axis name ('index', 'columns')\n or number (0, 1)."} _merge_doc = '\nMerge DataFrame or named Series objects with a database-style join.\n\nA named Series object is treated as a DataFrame with a single named column.\n\nThe join is done on columns or indexes. If joining columns on\ncolumns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes\non indexes or indexes on a column or columns, the index will be passed on.\nWhen performing a cross merge, no column specifications to merge on are\nallowed.\n\n.. warning::\n\n If both key columns contain rows where the key is a null value, those\n rows will be matched against each other. This is different from usual SQL\n join behaviour and can lead to unexpected results.\n\nParameters\n----------%s\nright : DataFrame or named Series\n Object to merge with.\nhow : {\'left\', \'right\', \'outer\', \'inner\', \'cross\'}, default \'inner\'\n Type of merge to be performed.\n\n * left: use only keys from left frame, similar to a SQL left outer join;\n preserve key order.\n * right: use only keys from right frame, similar to a SQL right outer join;\n preserve key order.\n * outer: use union of keys from both frames, similar to a SQL full outer\n join; sort keys lexicographically.\n * inner: use intersection of keys from both frames, similar to a SQL inner\n join; preserve the order of the left keys.\n * cross: creates the cartesian product from both frames, preserves the order\n of the left keys.\non : label or list\n Column or index level names to join on. These must be found in both\n DataFrames. If `on` is None and not merging on indexes then this defaults\n to the intersection of the columns in both DataFrames.\nleft_on : label or list, or array-like\n Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\nright_on : label or list, or array-like\n Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\nleft_index : bool, default False\n Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index\n or a number of columns) must match the number of levels.\nright_index : bool, default False\n Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\nsort : bool, default False\n Sort the join keys lexicographically in the result DataFrame. If False,\n the order of the join keys depends on the join type (how keyword).\nsuffixes : list-like, default is ("_x", "_y")\n A length-2 sequence where each element is optionally a string\n indicating the suffix to add to overlapping column names in\n `left` and `right` respectively. Pass a value of `None` instead\n of a string to indicate that the column name from `left` or\n `right` should be left as-is, with no suffix. At least one of the\n values must not be None.\ncopy : bool, default False\n If False, avoid copy if possible.\n\n .. note::\n The `copy` keyword will change behavior in pandas 3.0.\n `Copy-on-Write\n `__\n will be enabled by default, which means that all methods with a\n `copy` keyword will use a lazy copy mechanism to defer the copy and\n ignore the `copy` keyword. The `copy` keyword will be removed in a\n future version of pandas.\n\n You can already get the future behavior and improvements through\n enabling copy on write ``pd.options.mode.copy_on_write = True``\n\n .. deprecated:: 3.0.0\nindicator : bool or str, default False\n If True, adds a column to the output DataFrame called "_merge" with\n information on the source of each row. The column can be given a different\n name by providing a string argument. The column will have a Categorical\n type with the value of "left_only" for observations whose merge key only\n appears in the left DataFrame, "right_only" for observations\n whose merge key only appears in the right DataFrame, and "both"\n if the observation\'s merge key is found in both DataFrames.\n\nvalidate : str, optional\n If specified, checks if merge is of specified type.\n\n * "one_to_one" or "1:1": check if merge keys are unique in both\n left and right datasets.\n * "one_to_many" or "1:m": check if merge keys are unique in left\n dataset.\n * "many_to_one" or "m:1": check if merge keys are unique in right\n dataset.\n * "many_to_many" or "m:m": allowed, but does not result in checks.\n\nReturns\n-------\nDataFrame\n A DataFrame of the two merged objects.\n\nSee Also\n--------\nmerge_ordered : Merge with optional filling/interpolation.\nmerge_asof : Merge on nearest keys.\nDataFrame.join : Similar method using indices.\n\nExamples\n--------\n>>> df1 = pd.DataFrame({\'lkey\': [\'foo\', \'bar\', \'baz\', \'foo\'],\n... \'value\': [1, 2, 3, 5]})\n>>> df2 = pd.DataFrame({\'rkey\': [\'foo\', \'bar\', \'baz\', \'foo\'],\n... \'value\': [5, 6, 7, 8]})\n>>> df1\n lkey value\n0 foo 1\n1 bar 2\n2 baz 3\n3 foo 5\n>>> df2\n rkey value\n0 foo 5\n1 bar 6\n2 baz 7\n3 foo 8\n\nMerge df1 and df2 on the lkey and rkey columns. The value columns have\nthe default suffixes, _x and _y, appended.\n\n>>> df1.merge(df2, left_on=\'lkey\', right_on=\'rkey\')\n lkey value_x rkey value_y\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 bar 2 bar 6\n3 baz 3 baz 7\n4 foo 5 foo 5\n5 foo 5 foo 8\n\nMerge DataFrames df1 and df2 with specified left and right suffixes\nappended to any overlapping columns.\n\n>>> df1.merge(df2, left_on=\'lkey\', right_on=\'rkey\',\n... suffixes=(\'_left\', \'_right\'))\n lkey value_left rkey value_right\n0 foo 1 foo 5\n1 foo 1 foo 8\n2 bar 2 bar 6\n3 baz 3 baz 7\n4 foo 5 foo 5\n5 foo 5 foo 8\n\nMerge DataFrames df1 and df2, but raise an exception if the DataFrames have\nany overlapping columns.\n\n>>> df1.merge(df2, left_on=\'lkey\', right_on=\'rkey\', suffixes=(False, False))\nTraceback (most recent call last):\n...\nValueError: columns overlap but no suffix specified:\n Index([\'value\'], dtype=\'object\')\n\n>>> df1 = pd.DataFrame({\'a\': [\'foo\', \'bar\'], \'b\': [1, 2]})\n>>> df2 = pd.DataFrame({\'a\': [\'foo\', \'baz\'], \'c\': [3, 4]})\n>>> df1\n a b\n0 foo 1\n1 bar 2\n>>> df2\n a c\n0 foo 3\n1 baz 4\n\n>>> df1.merge(df2, how=\'inner\', on=\'a\')\n a b c\n0 foo 1 3\n\n>>> df1.merge(df2, how=\'left\', on=\'a\')\n a b c\n0 foo 1 3.0\n1 bar 2 NaN\n\n>>> df1 = pd.DataFrame({\'left\': [\'foo\', \'bar\']})\n>>> df2 = pd.DataFrame({\'right\': [7, 8]})\n>>> df1\n left\n0 foo\n1 bar\n>>> df2\n right\n0 7\n1 8\n\n>>> df1.merge(df2, how=\'cross\')\n left right\n0 foo 7\n1 foo 8\n2 bar 7\n3 bar 8\n' @set_module('pandas') class DataFrame(NDFrame, OpsMixin): _internal_names_set = {'columns', 'index'} | NDFrame._internal_names_set _typ = 'dataframe' _HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray) _accessors: set[str] = {'sparse'} _hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([]) _mgr: BlockManager __pandas_priority__ = 4000 @property def _constructor(self) -> type[DataFrame]: return DataFrame def _constructor_from_mgr(self, mgr, axes) -> DataFrame: df = DataFrame._from_mgr(mgr, axes=axes) if type(self) is DataFrame: return df elif type(self).__name__ == 'GeoDataFrame': return self._constructor(mgr) return self._constructor(df) _constructor_sliced: Callable[..., Series] = Series def _constructor_sliced_from_mgr(self, mgr, axes) -> Series: ser = Series._from_mgr(mgr, axes) ser._name = None if type(self) is DataFrame: return ser return self._constructor_sliced(ser) def __init__(self, data=None, index: Axes | None=None, columns: Axes | None=None, dtype: Dtype | None=None, copy: bool | None=None) -> None: allow_mgr = False if dtype is not None: dtype = self._validate_dtype(dtype) if isinstance(data, DataFrame): data = data._mgr allow_mgr = True if not copy: data = data.copy(deep=False) if isinstance(data, BlockManager): if not allow_mgr: warnings.warn(f'Passing a {type(data).__name__} to {type(self).__name__} is deprecated and will raise in a future version. Use public APIs instead.', DeprecationWarning, stacklevel=1) data = data.copy(deep=False) if index is None and columns is None and (dtype is None) and (not copy): NDFrame.__init__(self, data) return if isinstance(index, set): raise ValueError('index cannot be a set') if isinstance(columns, set): raise ValueError('columns cannot be a set') if copy is None: if isinstance(data, dict): copy = True elif not isinstance(data, (Index, DataFrame, Series)): copy = True else: copy = False if data is None: index = index if index is not None else default_index(0) columns = columns if columns is not None else default_index(0) dtype = dtype if dtype is not None else pandas_dtype(object) data = [] if isinstance(data, BlockManager): mgr = self._init_mgr(data, axes={'index': index, 'columns': columns}, dtype=dtype, copy=copy) elif isinstance(data, dict): mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, ma.MaskedArray): from numpy.ma import mrecords if isinstance(data, mrecords.MaskedRecords): raise TypeError('MaskedRecords are not supported. Pass {name: data[name] for name in data.dtype.names} instead') data = sanitize_masked_array(data) mgr = ndarray_to_mgr(data, index, columns, dtype=dtype, copy=copy) elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)): if data.dtype.names: data = cast(np.ndarray, data) mgr = rec_array_to_mgr(data, index, columns, dtype, copy) elif getattr(data, 'name', None) is not None: mgr = dict_to_mgr({data.name: data}, index, columns, dtype=dtype, copy=copy) else: mgr = ndarray_to_mgr(data, index, columns, dtype=dtype, copy=copy) elif is_list_like(data): if not isinstance(data, abc.Sequence): if hasattr(data, '__array__'): data = np.asarray(data) else: data = list(data) if len(data) > 0: if is_dataclass(data[0]): data = dataclasses_to_dicts(data) if not isinstance(data, np.ndarray) and treat_as_nested(data): if columns is not None: columns = ensure_index(columns) (arrays, columns, index) = nested_data_to_arrays(data, columns, index, dtype) mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype) else: mgr = ndarray_to_mgr(data, index, columns, dtype=dtype, copy=copy) else: mgr = dict_to_mgr({}, index, columns if columns is not None else default_index(0), dtype=dtype) else: if index is None or columns is None: raise ValueError('DataFrame constructor not properly called!') index = ensure_index(index) columns = ensure_index(columns) if not dtype: (dtype, _) = infer_dtype_from_scalar(data) if isinstance(dtype, ExtensionDtype): values = [construct_1d_arraylike_from_scalar(data, len(index), dtype) for _ in range(len(columns))] mgr = arrays_to_mgr(values, columns, index, dtype=None) else: arr2d = construct_2d_arraylike_from_scalar(data, len(index), len(columns), dtype, copy) mgr = ndarray_to_mgr(arr2d, index, columns, dtype=arr2d.dtype, copy=False) NDFrame.__init__(self, mgr) def __dataframe__(self, nan_as_null: bool=False, allow_copy: bool=True) -> DataFrameXchg: from pandas.core.interchange.dataframe import PandasDataFrameXchg return PandasDataFrameXchg(self, allow_copy=allow_copy) def __arrow_c_stream__(self, requested_schema=None): pa = import_optional_dependency('pyarrow', min_version='14.0.0') if requested_schema is not None: requested_schema = pa.Schema._import_from_c_capsule(requested_schema) table = pa.Table.from_pandas(self, schema=requested_schema) return table.__arrow_c_stream__() @property def axes(self) -> list[Index]: return [self.index, self.columns] @property def shape(self) -> tuple[int, int]: return (len(self.index), len(self.columns)) @property def _is_homogeneous_type(self) -> bool: return len({block.values.dtype for block in self._mgr.blocks}) <= 1 @property def _can_fast_transpose(self) -> bool: blocks = self._mgr.blocks if len(blocks) != 1: return False dtype = blocks[0].dtype return not is_1d_only_ea_dtype(dtype) @property def _values(self) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray: mgr = self._mgr blocks = mgr.blocks if len(blocks) != 1: return ensure_wrapped_if_datetimelike(self.values) arr = blocks[0].values if arr.ndim == 1: return self.values arr = cast('np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray', arr) return arr.T def _repr_fits_vertical_(self) -> bool: max_rows = get_option('display.max_rows') return len(self) <= max_rows def _repr_fits_horizontal_(self) -> bool: (width, height) = console.get_console_size() max_columns = get_option('display.max_columns') nb_columns = len(self.columns) if max_columns and nb_columns > max_columns or (width and nb_columns > width // 2): return False if width is None or not console.in_interactive_session(): return True if get_option('display.width') is not None or console.in_ipython_frontend(): max_rows = 1 else: max_rows = get_option('display.max_rows') buf = StringIO() d = self if max_rows is not None: d = d.iloc[:min(max_rows, len(d))] else: return True d.to_string(buf=buf) value = buf.getvalue() repr_width = max((len(line) for line in value.split('\n'))) return repr_width < width def _info_repr(self) -> bool: info_repr_option = get_option('display.large_repr') == 'info' return info_repr_option and (not (self._repr_fits_horizontal_() and self._repr_fits_vertical_())) def __repr__(self) -> str: if self._info_repr(): buf = StringIO() self.info(buf=buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params) def _repr_html_(self) -> str | None: if self._info_repr(): buf = StringIO() self.info(buf=buf) val = buf.getvalue().replace('<', '<', 1) val = val.replace('>', '>', 1) return f'
{val}
' if get_option('display.notebook_repr_html'): max_rows = get_option('display.max_rows') min_rows = get_option('display.min_rows') max_cols = get_option('display.max_columns') show_dimensions = get_option('display.show_dimensions') formatter = fmt.DataFrameFormatter(self, columns=None, col_space=None, na_rep='NaN', formatters=None, float_format=None, sparsify=None, justify=None, index_names=True, header=True, index=True, bold_rows=True, escape=True, max_rows=max_rows, min_rows=min_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal='.') return fmt.DataFrameRenderer(formatter).to_html(notebook=True) else: return None @overload def to_string(self, buf: None=..., *, columns: Axes | None=..., col_space: int | list[int] | dict[Hashable, int] | None=..., header: bool | SequenceNotStr[str]=..., index: bool=..., na_rep: str=..., formatters: fmt.FormattersType | None=..., float_format: fmt.FloatFormatType | None=..., sparsify: bool | None=..., index_names: bool=..., justify: str | None=..., max_rows: int | None=..., max_cols: int | None=..., show_dimensions: bool=..., decimal: str=..., line_width: int | None=..., min_rows: int | None=..., max_colwidth: int | None=..., encoding: str | None=...) -> str: ... @overload def to_string(self, buf: FilePath | WriteBuffer[str], *, columns: Axes | None=..., col_space: int | list[int] | dict[Hashable, int] | None=..., header: bool | SequenceNotStr[str]=..., index: bool=..., na_rep: str=..., formatters: fmt.FormattersType | None=..., float_format: fmt.FloatFormatType | None=..., sparsify: bool | None=..., index_names: bool=..., justify: str | None=..., max_rows: int | None=..., max_cols: int | None=..., show_dimensions: bool=..., decimal: str=..., line_width: int | None=..., min_rows: int | None=..., max_colwidth: int | None=..., encoding: str | None=...) -> None: ... @Substitution(header_type='bool or list of str', header='Write out the column names. If a list of columns is given, it is assumed to be aliases for the column names', col_space_type='int, list or dict of int', col_space='The minimum width of each column. If a list of ints is given every integers corresponds with one column. If a dict is given, the key references the column, while the value defines the space to use.') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_string(self, buf: FilePath | WriteBuffer[str] | None=None, *, columns: Axes | None=None, col_space: int | list[int] | dict[Hashable, int] | None=None, header: bool | SequenceNotStr[str]=True, index: bool=True, na_rep: str='NaN', formatters: fmt.FormattersType | None=None, float_format: fmt.FloatFormatType | None=None, sparsify: bool | None=None, index_names: bool=True, justify: str | None=None, max_rows: int | None=None, max_cols: int | None=None, show_dimensions: bool=False, decimal: str='.', line_width: int | None=None, min_rows: int | None=None, max_colwidth: int | None=None, encoding: str | None=None) -> str | None: from pandas import option_context with option_context('display.max_colwidth', max_colwidth): formatter = fmt.DataFrameFormatter(self, columns=columns, col_space=col_space, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, justify=justify, index_names=index_names, header=header, index=index, min_rows=min_rows, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal) return fmt.DataFrameRenderer(formatter).to_string(buf=buf, encoding=encoding, line_width=line_width) def _get_values_for_csv(self, *, float_format: FloatFormatType | None, date_format: str | None, decimal: str, na_rep: str, quoting) -> DataFrame: mgr = self._mgr.get_values_for_csv(float_format=float_format, date_format=date_format, decimal=decimal, na_rep=na_rep, quoting=quoting) return self._constructor_from_mgr(mgr, axes=mgr.axes) @property def style(self) -> Styler: from pandas.io.formats.style import Styler return Styler(self) _shared_docs['items'] = "\n Iterate over (column name, Series) pairs.\n\n Iterates over the DataFrame columns, returning a tuple with\n the column name and the content as a Series.\n\n Yields\n ------\n label : object\n The column names for the DataFrame being iterated over.\n content : Series\n The column entries belonging to each label, as a Series.\n\n See Also\n --------\n DataFrame.iterrows : Iterate over DataFrame rows as\n (index, Series) pairs.\n DataFrame.itertuples : Iterate over DataFrame rows as namedtuples\n of the values.\n\n Examples\n --------\n >>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],\n ... 'population': [1864, 22000, 80000]},\n ... index=['panda', 'polar', 'koala'])\n >>> df\n species population\n panda bear 1864\n polar bear 22000\n koala marsupial 80000\n >>> for label, content in df.items():\n ... print(f'label: {label}')\n ... print(f'content: {content}', sep='\\n')\n ...\n label: species\n content:\n panda bear\n polar bear\n koala marsupial\n Name: species, dtype: object\n label: population\n content:\n panda 1864\n polar 22000\n koala 80000\n Name: population, dtype: int64\n " @Appender(_shared_docs['items']) def items(self) -> Iterable[tuple[Hashable, Series]]: for (i, k) in enumerate(self.columns): yield (k, self._ixs(i, axis=1)) def iterrows(self) -> Iterable[tuple[Hashable, Series]]: columns = self.columns klass = self._constructor_sliced for (k, v) in zip(self.index, self.values): s = klass(v, index=columns, name=k).__finalize__(self) if self._mgr.is_single_block: s._mgr.add_references(self._mgr) yield (k, s) def itertuples(self, index: bool=True, name: str | None='Pandas') -> Iterable[tuple[Any, ...]]: arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, 'Index') arrays.extend((self.iloc[:, k] for k in range(len(self.columns)))) if name is not None: itertuple = collections.namedtuple(name, fields, rename=True) return map(itertuple._make, zip(*arrays)) return zip(*arrays) def __len__(self) -> int: return len(self.index) @overload def dot(self, other: Series) -> Series: ... @overload def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame: ... def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: if isinstance(other, (Series, DataFrame)): common = self.columns.union(other.index) if len(common) > len(self.columns) or len(common) > len(other.index): raise ValueError('matrices are not aligned') left = self.reindex(columns=common) right = other.reindex(index=common) lvals = left.values rvals = right._values else: left = self lvals = self.values rvals = np.asarray(other) if lvals.shape[1] != rvals.shape[0]: raise ValueError(f'Dot product shape mismatch, {lvals.shape} vs {rvals.shape}') if isinstance(other, DataFrame): common_type = find_common_type(list(self.dtypes) + list(other.dtypes)) return self._constructor(np.dot(lvals, rvals), index=left.index, columns=other.columns, copy=False, dtype=common_type) elif isinstance(other, Series): common_type = find_common_type(list(self.dtypes) + [other.dtypes]) return self._constructor_sliced(np.dot(lvals, rvals), index=left.index, copy=False, dtype=common_type) elif isinstance(rvals, (np.ndarray, Index)): result = np.dot(lvals, rvals) if result.ndim == 2: return self._constructor(result, index=left.index, copy=False) else: return self._constructor_sliced(result, index=left.index, copy=False) else: raise TypeError(f'unsupported type: {type(other)}') @overload def __matmul__(self, other: Series) -> Series: ... @overload def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: ... def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series: return self.dot(other) def __rmatmul__(self, other) -> DataFrame: try: return self.T.dot(np.transpose(other)).T except ValueError as err: if 'shape mismatch' not in str(err): raise msg = f'shapes {np.shape(other)} and {self.shape} not aligned' raise ValueError(msg) from err @classmethod def from_dict(cls, data: dict, orient: FromDictOrient='columns', dtype: Dtype | None=None, columns: Axes | None=None) -> DataFrame: index: list | Index | None = None orient = orient.lower() if orient == 'index': if len(data) > 0: if isinstance(next(iter(data.values())), (Series, dict)): data = _from_nested_dict(data) else: index = list(data.keys()) data = list(data.values()) elif orient in ('columns', 'tight'): if columns is not None: raise ValueError(f"cannot use columns parameter with orient='{orient}'") else: raise ValueError(f"Expected 'index', 'columns' or 'tight' for orient parameter. Got '{orient}' instead") if orient != 'tight': return cls(data, index=index, columns=columns, dtype=dtype) else: realdata = data['data'] def create_index(indexlist, namelist) -> Index: index: Index if len(namelist) > 1: index = MultiIndex.from_tuples(indexlist, names=namelist) else: index = Index(indexlist, name=namelist[0]) return index index = create_index(data['index'], data['index_names']) columns = create_index(data['columns'], data['column_names']) return cls(realdata, index=index, columns=columns, dtype=dtype) def to_numpy(self, dtype: npt.DTypeLike | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray: if dtype is not None: dtype = np.dtype(dtype) result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value) if result.dtype is not dtype: result = np.asarray(result, dtype=dtype) return result @overload def to_dict(self, orient: Literal['dict', 'list', 'series', 'split', 'tight', 'index']=..., *, into: type[MutableMappingT] | MutableMappingT, index: bool=...) -> MutableMappingT: ... @overload def to_dict(self, orient: Literal['records'], *, into: type[MutableMappingT] | MutableMappingT, index: bool=...) -> list[MutableMappingT]: ... @overload def to_dict(self, orient: Literal['dict', 'list', 'series', 'split', 'tight', 'index']=..., *, into: type[dict]=..., index: bool=...) -> dict: ... @overload def to_dict(self, orient: Literal['records'], *, into: type[dict]=..., index: bool=...) -> list[dict]: ... def to_dict(self, orient: Literal['dict', 'list', 'series', 'split', 'tight', 'records', 'index']='dict', *, into: type[MutableMappingT] | MutableMappingT=dict, index: bool=True) -> MutableMappingT | list[MutableMappingT]: from pandas.core.methods.to_dict import to_dict return to_dict(self, orient, into=into, index=index) @classmethod def from_records(cls, data, index=None, exclude=None, columns=None, coerce_float: bool=False, nrows: int | None=None) -> DataFrame: if isinstance(data, DataFrame): raise TypeError('Passing a DataFrame to DataFrame.from_records is not supported. Use set_index and/or drop to modify the DataFrame instead.') result_index = None if columns is not None: columns = ensure_index(columns) def maybe_reorder(arrays: list[ArrayLike], arr_columns: Index, columns: Index, index) -> tuple[list[ArrayLike], Index, Index | None]: if len(arrays): length = len(arrays[0]) else: length = 0 result_index = None if len(arrays) == 0 and index is None and (length == 0): result_index = default_index(0) (arrays, arr_columns) = reorder_arrays(arrays, arr_columns, columns, length) return (arrays, arr_columns, result_index) if is_iterator(data): if nrows == 0: return cls() try: first_row = next(data) except StopIteration: return cls(index=index, columns=columns) dtype = None if hasattr(first_row, 'dtype') and first_row.dtype.names: dtype = first_row.dtype values = [first_row] if nrows is None: values += data else: values.extend(itertools.islice(data, nrows - 1)) if dtype is not None: data = np.array(values, dtype=dtype) else: data = values if isinstance(data, dict): if columns is None: columns = arr_columns = ensure_index(sorted(data)) arrays = [data[k] for k in columns] else: arrays = [] arr_columns_list = [] for (k, v) in data.items(): if k in columns: arr_columns_list.append(k) arrays.append(v) arr_columns = Index(arr_columns_list) (arrays, arr_columns, result_index) = maybe_reorder(arrays, arr_columns, columns, index) elif isinstance(data, np.ndarray): (arrays, columns) = to_arrays(data, columns) arr_columns = columns else: (arrays, arr_columns) = to_arrays(data, columns) if coerce_float: for (i, arr) in enumerate(arrays): if arr.dtype == object: arrays[i] = lib.maybe_convert_objects(arr, try_float=True) arr_columns = ensure_index(arr_columns) if columns is None: columns = arr_columns else: (arrays, arr_columns, result_index) = maybe_reorder(arrays, arr_columns, columns, index) if exclude is None: exclude = set() else: exclude = set(exclude) if index is not None: if isinstance(index, str) or not hasattr(index, '__iter__'): i = columns.get_loc(index) exclude.add(index) if len(arrays) > 0: result_index = Index(arrays[i], name=index) else: result_index = Index([], name=index) else: try: index_data = [arrays[arr_columns.get_loc(field)] for field in index] except (KeyError, TypeError): result_index = index else: result_index = ensure_index_from_sequences(index_data, names=index) exclude.update(index) if any(exclude): arr_exclude = (x for x in exclude if x in arr_columns) to_remove = {arr_columns.get_loc(col) for col in arr_exclude} arrays = [v for (i, v) in enumerate(arrays) if i not in to_remove] columns = columns.drop(exclude) mgr = arrays_to_mgr(arrays, columns, result_index) return cls._from_mgr(mgr, axes=mgr.axes) def to_records(self, index: bool=True, column_dtypes=None, index_dtypes=None) -> np.rec.recarray: if index: ix_vals = [np.asarray(self.index.get_level_values(i)) for i in range(self.index.nlevels)] arrays = ix_vals + [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] index_names = list(self.index.names) if isinstance(self.index, MultiIndex): index_names = com.fill_missing_names(index_names) elif index_names[0] is None: index_names = ['index'] names = [str(name) for name in itertools.chain(index_names, self.columns)] else: arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))] names = [str(c) for c in self.columns] index_names = [] index_len = len(index_names) formats = [] for (i, v) in enumerate(arrays): index_int = i if index_int < index_len: dtype_mapping = index_dtypes name = index_names[index_int] else: index_int -= index_len dtype_mapping = column_dtypes name = self.columns[index_int] if is_dict_like(dtype_mapping): if name in dtype_mapping: dtype_mapping = dtype_mapping[name] elif index_int in dtype_mapping: dtype_mapping = dtype_mapping[index_int] else: dtype_mapping = None if dtype_mapping is None: formats.append(v.dtype) elif isinstance(dtype_mapping, (type, np.dtype, str)): formats.append(dtype_mapping) else: element = 'row' if i < index_len else 'column' msg = f'Invalid dtype {dtype_mapping} specified for {element} {name}' raise ValueError(msg) return np.rec.fromarrays(arrays, dtype={'names': names, 'formats': formats}) @classmethod def _from_arrays(cls, arrays, columns, index, dtype: Dtype | None=None, verify_integrity: bool=True) -> Self: if dtype is not None: dtype = pandas_dtype(dtype) columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError('len(columns) must match len(arrays)') mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, verify_integrity=verify_integrity) return cls._from_mgr(mgr, axes=mgr.axes) @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'path') def to_stata(self, path: FilePath | WriteBuffer[bytes], *, convert_dates: dict[Hashable, str] | None=None, write_index: bool=True, byteorder: ToStataByteorder | None=None, time_stamp: datetime.datetime | None=None, data_label: str | None=None, variable_labels: dict[Hashable, str] | None=None, version: int | None=114, convert_strl: Sequence[Hashable] | None=None, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None, value_labels: dict[Hashable, dict[float, str]] | None=None) -> None: if version not in (114, 117, 118, 119, None): raise ValueError('Only formats 114, 117, 118 and 119 are supported.') if version == 114: if convert_strl is not None: raise ValueError('strl is not supported in format 114') from pandas.io.stata import StataWriter as statawriter elif version == 117: from pandas.io.stata import StataWriter117 as statawriter else: from pandas.io.stata import StataWriterUTF8 as statawriter kwargs: dict[str, Any] = {} if version is None or version >= 117: kwargs['convert_strl'] = convert_strl if version is None or version >= 118: kwargs['version'] = version writer = statawriter(path, self, convert_dates=convert_dates, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, write_index=write_index, variable_labels=variable_labels, compression=compression, storage_options=storage_options, value_labels=value_labels, **kwargs) writer.write_file() def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None: from pandas.io.feather_format import to_feather to_feather(self, path, **kwargs) @overload def to_markdown(self, buf: None=..., *, mode: str=..., index: bool=..., storage_options: StorageOptions | None=..., **kwargs) -> str: ... @overload def to_markdown(self, buf: FilePath | WriteBuffer[str], *, mode: str=..., index: bool=..., storage_options: StorageOptions | None=..., **kwargs) -> None: ... @overload def to_markdown(self, buf: FilePath | WriteBuffer[str] | None, *, mode: str=..., index: bool=..., storage_options: StorageOptions | None=..., **kwargs) -> str | None: ... def to_markdown(self, buf: FilePath | WriteBuffer[str] | None=None, *, mode: str='wt', index: bool=True, storage_options: StorageOptions | None=None, **kwargs) -> str | None: if 'showindex' in kwargs: raise ValueError("Pass 'index' instead of 'showindex") kwargs.setdefault('headers', 'keys') kwargs.setdefault('tablefmt', 'pipe') kwargs.setdefault('showindex', index) tabulate = import_optional_dependency('tabulate') result = tabulate.tabulate(self, **kwargs) if buf is None: return result with get_handle(buf, mode, storage_options=storage_options) as handles: handles.handle.write(result) return None @overload def to_parquet(self, path: None=..., *, engine: Literal['auto', 'pyarrow', 'fastparquet']=..., compression: str | None=..., index: bool | None=..., partition_cols: list[str] | None=..., storage_options: StorageOptions=..., **kwargs) -> bytes: ... @overload def to_parquet(self, path: FilePath | WriteBuffer[bytes], *, engine: Literal['auto', 'pyarrow', 'fastparquet']=..., compression: str | None=..., index: bool | None=..., partition_cols: list[str] | None=..., storage_options: StorageOptions=..., **kwargs) -> None: ... @doc(storage_options=_shared_docs['storage_options']) def to_parquet(self, path: FilePath | WriteBuffer[bytes] | None=None, *, engine: Literal['auto', 'pyarrow', 'fastparquet']='auto', compression: str | None='snappy', index: bool | None=None, partition_cols: list[str] | None=None, storage_options: StorageOptions | None=None, **kwargs) -> bytes | None: from pandas.io.parquet import to_parquet return to_parquet(self, path, engine, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, **kwargs) @overload def to_orc(self, path: None=..., *, engine: Literal['pyarrow']=..., index: bool | None=..., engine_kwargs: dict[str, Any] | None=...) -> bytes: ... @overload def to_orc(self, path: FilePath | WriteBuffer[bytes], *, engine: Literal['pyarrow']=..., index: bool | None=..., engine_kwargs: dict[str, Any] | None=...) -> None: ... @overload def to_orc(self, path: FilePath | WriteBuffer[bytes] | None, *, engine: Literal['pyarrow']=..., index: bool | None=..., engine_kwargs: dict[str, Any] | None=...) -> bytes | None: ... def to_orc(self, path: FilePath | WriteBuffer[bytes] | None=None, *, engine: Literal['pyarrow']='pyarrow', index: bool | None=None, engine_kwargs: dict[str, Any] | None=None) -> bytes | None: from pandas.io.orc import to_orc return to_orc(self, path, engine=engine, index=index, engine_kwargs=engine_kwargs) @overload def to_html(self, buf: FilePath | WriteBuffer[str], *, columns: Axes | None=..., col_space: ColspaceArgType | None=..., header: bool=..., index: bool=..., na_rep: str=..., formatters: FormattersType | None=..., float_format: FloatFormatType | None=..., sparsify: bool | None=..., index_names: bool=..., justify: str | None=..., max_rows: int | None=..., max_cols: int | None=..., show_dimensions: bool | str=..., decimal: str=..., bold_rows: bool=..., classes: str | list | tuple | None=..., escape: bool=..., notebook: bool=..., border: int | bool | None=..., table_id: str | None=..., render_links: bool=..., encoding: str | None=...) -> None: ... @overload def to_html(self, buf: None=..., *, columns: Axes | None=..., col_space: ColspaceArgType | None=..., header: bool=..., index: bool=..., na_rep: str=..., formatters: FormattersType | None=..., float_format: FloatFormatType | None=..., sparsify: bool | None=..., index_names: bool=..., justify: str | None=..., max_rows: int | None=..., max_cols: int | None=..., show_dimensions: bool | str=..., decimal: str=..., bold_rows: bool=..., classes: str | list | tuple | None=..., escape: bool=..., notebook: bool=..., border: int | bool | None=..., table_id: str | None=..., render_links: bool=..., encoding: str | None=...) -> str: ... @Substitution(header_type='bool', header='Whether to print column labels, default True', col_space_type='str or int, list or dict of int or str', col_space='The minimum width of each column in CSS length units. An int is assumed to be px units.') @Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring) def to_html(self, buf: FilePath | WriteBuffer[str] | None=None, *, columns: Axes | None=None, col_space: ColspaceArgType | None=None, header: bool=True, index: bool=True, na_rep: str='NaN', formatters: FormattersType | None=None, float_format: FloatFormatType | None=None, sparsify: bool | None=None, index_names: bool=True, justify: str | None=None, max_rows: int | None=None, max_cols: int | None=None, show_dimensions: bool | str=False, decimal: str='.', bold_rows: bool=True, classes: str | list | tuple | None=None, escape: bool=True, notebook: bool=False, border: int | bool | None=None, table_id: str | None=None, render_links: bool=False, encoding: str | None=None) -> str | None: if justify is not None and justify not in fmt.VALID_JUSTIFY_PARAMETERS: raise ValueError('Invalid value for justify parameter') formatter = fmt.DataFrameFormatter(self, columns=columns, col_space=col_space, na_rep=na_rep, header=header, index=index, formatters=formatters, float_format=float_format, bold_rows=bold_rows, sparsify=sparsify, justify=justify, index_names=index_names, escape=escape, decimal=decimal, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions) return fmt.DataFrameRenderer(formatter).to_html(buf=buf, classes=classes, notebook=notebook, border=border, encoding=encoding, table_id=table_id, render_links=render_links) @overload def to_xml(self, path_or_buffer: None=..., *, index: bool=..., root_name: str | None=..., row_name: str | None=..., na_rep: str | None=..., attr_cols: list[str] | None=..., elem_cols: list[str] | None=..., namespaces: dict[str | None, str] | None=..., prefix: str | None=..., encoding: str=..., xml_declaration: bool | None=..., pretty_print: bool | None=..., parser: XMLParsers | None=..., stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None=..., compression: CompressionOptions=..., storage_options: StorageOptions | None=...) -> str: ... @overload def to_xml(self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str], *, index: bool=..., root_name: str | None=..., row_name: str | None=..., na_rep: str | None=..., attr_cols: list[str] | None=..., elem_cols: list[str] | None=..., namespaces: dict[str | None, str] | None=..., prefix: str | None=..., encoding: str=..., xml_declaration: bool | None=..., pretty_print: bool | None=..., parser: XMLParsers | None=..., stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None=..., compression: CompressionOptions=..., storage_options: StorageOptions | None=...) -> None: ... @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'path_or_buffer') def to_xml(self, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None=None, *, index: bool=True, root_name: str | None='data', row_name: str | None='row', na_rep: str | None=None, attr_cols: list[str] | None=None, elem_cols: list[str] | None=None, namespaces: dict[str | None, str] | None=None, prefix: str | None=None, encoding: str='utf-8', xml_declaration: bool | None=True, pretty_print: bool | None=True, parser: XMLParsers | None='lxml', stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None=None, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None) -> str | None: from pandas.io.formats.xml import EtreeXMLFormatter, LxmlXMLFormatter lxml = import_optional_dependency('lxml.etree', errors='ignore') TreeBuilder: type[EtreeXMLFormatter | LxmlXMLFormatter] if parser == 'lxml': if lxml is not None: TreeBuilder = LxmlXMLFormatter else: raise ImportError('lxml not found, please install or use the etree parser.') elif parser == 'etree': TreeBuilder = EtreeXMLFormatter else: raise ValueError('Values for parser can only be lxml or etree.') xml_formatter = TreeBuilder(self, path_or_buffer=path_or_buffer, index=index, root_name=root_name, row_name=row_name, na_rep=na_rep, attr_cols=attr_cols, elem_cols=elem_cols, namespaces=namespaces, prefix=prefix, encoding=encoding, xml_declaration=xml_declaration, pretty_print=pretty_print, stylesheet=stylesheet, compression=compression, storage_options=storage_options) return xml_formatter.write_output() @doc(INFO_DOCSTRING, **frame_sub_kwargs) def info(self, verbose: bool | None=None, buf: WriteBuffer[str] | None=None, max_cols: int | None=None, memory_usage: bool | str | None=None, show_counts: bool | None=None) -> None: info = DataFrameInfo(data=self, memory_usage=memory_usage) info.render(buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts) def memory_usage(self, index: bool=True, deep: bool=False) -> Series: result = self._constructor_sliced([c.memory_usage(index=False, deep=deep) for (col, c) in self.items()], index=self.columns, dtype=np.intp) if index: index_memory_usage = self._constructor_sliced(self.index.memory_usage(deep=deep), index=['Index']) result = index_memory_usage._append(result) return result def transpose(self, *args, copy: bool | lib.NoDefault=lib.no_default) -> DataFrame: self._check_copy_deprecation(copy) nv.validate_transpose(args, {}) first_dtype = self.dtypes.iloc[0] if len(self.columns) else None if self._can_fast_transpose: new_vals = self._values.T result = self._constructor(new_vals, index=self.columns, columns=self.index, copy=False, dtype=new_vals.dtype) if len(self) > 0: result._mgr.add_references(self._mgr) elif self._is_homogeneous_type and first_dtype is not None and isinstance(first_dtype, ExtensionDtype): new_values: list if isinstance(first_dtype, BaseMaskedDtype): from pandas.core.arrays.masked import transpose_homogeneous_masked_arrays new_values = transpose_homogeneous_masked_arrays(cast(Sequence[BaseMaskedArray], self._iter_column_arrays())) elif isinstance(first_dtype, ArrowDtype): from pandas.core.arrays.arrow.array import ArrowExtensionArray, transpose_homogeneous_pyarrow new_values = transpose_homogeneous_pyarrow(cast(Sequence[ArrowExtensionArray], self._iter_column_arrays())) else: arr_typ = first_dtype.construct_array_type() values = self.values new_values = [arr_typ._from_sequence(row, dtype=first_dtype) for row in values] result = type(self)._from_arrays(new_values, index=self.columns, columns=self.index, verify_integrity=False) else: new_arr = self.values.T result = self._constructor(new_arr, index=self.columns, columns=self.index, dtype=new_arr.dtype, copy=False) return result.__finalize__(self, method='transpose') @property def T(self) -> DataFrame: return self.transpose() def _ixs(self, i: int, axis: AxisInt=0) -> Series: if axis == 0: new_mgr = self._mgr.fast_xs(i) result = self._constructor_sliced_from_mgr(new_mgr, axes=new_mgr.axes) result._name = self.index[i] return result.__finalize__(self) else: col_mgr = self._mgr.iget(i) return self._box_col_values(col_mgr, i) def _get_column_array(self, i: int) -> ArrayLike: return self._mgr.iget_values(i) def _iter_column_arrays(self) -> Iterator[ArrayLike]: for i in range(len(self.columns)): yield self._get_column_array(i) def __getitem__(self, key): check_dict_or_set_indexers(key) key = lib.item_from_zerodim(key) key = com.apply_if_callable(key, self) if is_hashable(key) and (not is_iterator(key)) and (not isinstance(key, slice)): is_mi = isinstance(self.columns, MultiIndex) if not is_mi and (self.columns.is_unique and key in self.columns or key in self.columns.drop_duplicates(keep=False)): return self._get_item(key) elif is_mi and self.columns.is_unique and (key in self.columns): return self._getitem_multilevel(key) if isinstance(key, slice): return self._getitem_slice(key) if isinstance(key, DataFrame): return self.where(key) if com.is_bool_indexer(key): return self._getitem_bool_array(key) is_single_key = isinstance(key, tuple) or not is_list_like(key) if is_single_key: if self.columns.nlevels > 1: return self._getitem_multilevel(key) indexer = self.columns.get_loc(key) if is_integer(indexer): indexer = [indexer] else: if is_iterator(key): key = list(key) indexer = self.columns._get_indexer_strict(key, 'columns')[1] if getattr(indexer, 'dtype', None) == bool: indexer = np.where(indexer)[0] if isinstance(indexer, slice): return self._slice(indexer, axis=1) data = self.take(indexer, axis=1) if is_single_key: if data.shape[1] == 1 and (not isinstance(self.columns, MultiIndex)): return data._get_item(key) return data def _getitem_bool_array(self, key): if isinstance(key, Series) and (not key.index.equals(self.index)): warnings.warn('Boolean Series key will be reindexed to match DataFrame index.', UserWarning, stacklevel=find_stack_level()) elif len(key) != len(self.index): raise ValueError(f'Item wrong length {len(key)} instead of {len(self.index)}.') key = check_bool_indexer(self.index, key) if key.all(): return self.copy(deep=False) indexer = key.nonzero()[0] return self.take(indexer, axis=0) def _getitem_multilevel(self, key): loc = self.columns.get_loc(key) if isinstance(loc, (slice, np.ndarray)): new_columns = self.columns[loc] result_columns = maybe_droplevels(new_columns, key) result = self.iloc[:, loc] result.columns = result_columns if len(result.columns) == 1: top = result.columns[0] if isinstance(top, tuple): top = top[0] if top == '': result = result[''] if isinstance(result, Series): result = self._constructor_sliced(result, index=self.index, name=key) return result else: return self._ixs(loc, axis=1) def _get_value(self, index, col, takeable: bool=False) -> Scalar: if takeable: series = self._ixs(col, axis=1) return series._values[index] series = self._get_item(col) if not isinstance(self.index, MultiIndex): row = self.index.get_loc(index) return series._values[row] loc = self.index._engine.get_loc(index) return series._values[loc] def isetitem(self, loc, value) -> None: if isinstance(value, DataFrame): if is_integer(loc): loc = [loc] if len(loc) != len(value.columns): raise ValueError(f'Got {len(loc)} positions but value has {len(value.columns)} columns.') for (i, idx) in enumerate(loc): (arraylike, refs) = self._sanitize_column(value.iloc[:, i]) self._iset_item_mgr(idx, arraylike, inplace=False, refs=refs) return (arraylike, refs) = self._sanitize_column(value) self._iset_item_mgr(loc, arraylike, inplace=False, refs=refs) def __setitem__(self, key, value) -> None: if not PYPY: if sys.getrefcount(self) <= 3: warnings.warn(_chained_assignment_msg, ChainedAssignmentError, stacklevel=2) key = com.apply_if_callable(key, self) if isinstance(key, slice): slc = self.index._convert_slice_indexer(key, kind='getitem') return self._setitem_slice(slc, value) if isinstance(key, DataFrame) or getattr(key, 'ndim', None) == 2: self._setitem_frame(key, value) elif isinstance(key, (Series, np.ndarray, list, Index)): self._setitem_array(key, value) elif isinstance(value, DataFrame): self._set_item_frame_value(key, value) elif is_list_like(value) and (not self.columns.is_unique) and (1 < len(self.columns.get_indexer_for([key])) == len(value)): self._setitem_array([key], value) else: self._set_item(key, value) def _setitem_slice(self, key: slice, value) -> None: self.iloc[key] = value def _setitem_array(self, key, value) -> None: if com.is_bool_indexer(key): if len(key) != len(self.index): raise ValueError(f'Item wrong length {len(key)} instead of {len(self.index)}!') key = check_bool_indexer(self.index, key) indexer = key.nonzero()[0] if isinstance(value, DataFrame): value = value.reindex(self.index.take(indexer)) self.iloc[indexer] = value elif isinstance(value, DataFrame): check_key_length(self.columns, key, value) for (k1, k2) in zip(key, value.columns): self[k1] = value[k2] elif not is_list_like(value): for col in key: self[col] = value elif isinstance(value, np.ndarray) and value.ndim == 2: self._iset_not_inplace(key, value) elif np.ndim(value) > 1: value = DataFrame(value).values self._setitem_array(key, value) else: self._iset_not_inplace(key, value) def _iset_not_inplace(self, key, value) -> None: def igetitem(obj, i: int): if isinstance(obj, np.ndarray): return obj[..., i] else: return obj[i] if self.columns.is_unique: if np.shape(value)[-1] != len(key): raise ValueError('Columns must be same length as key') for (i, col) in enumerate(key): self[col] = igetitem(value, i) else: ilocs = self.columns.get_indexer_non_unique(key)[0] if (ilocs < 0).any(): raise NotImplementedError if np.shape(value)[-1] != len(ilocs): raise ValueError('Columns must be same length as key') assert np.ndim(value) <= 2 orig_columns = self.columns try: self.columns = Index(range(len(self.columns))) for (i, iloc) in enumerate(ilocs): self[iloc] = igetitem(value, i) finally: self.columns = orig_columns def _setitem_frame(self, key, value) -> None: if isinstance(key, np.ndarray): if key.shape != self.shape: raise ValueError('Array conditional must be same shape as self') key = self._constructor(key, **self._construct_axes_dict(), copy=False) if key.size and (not all((is_bool_dtype(dtype) for dtype in key.dtypes))): raise TypeError('Must pass DataFrame or 2-d ndarray with boolean values only') self._where(-key, value, inplace=True) def _set_item_frame_value(self, key, value: DataFrame) -> None: self._ensure_valid_index(value) if key in self.columns: loc = self.columns.get_loc(key) cols = self.columns[loc] len_cols = 1 if is_scalar(cols) or isinstance(cols, tuple) else len(cols) if len_cols != len(value.columns): raise ValueError('Columns must be same length as key') if isinstance(self.columns, MultiIndex) and isinstance(loc, (slice, Series, np.ndarray, Index)): cols_droplevel = maybe_droplevels(cols, key) if len(cols_droplevel) and (not cols_droplevel.equals(value.columns)): value = value.reindex(cols_droplevel, axis=1) for (col, col_droplevel) in zip(cols, cols_droplevel): self[col] = value[col_droplevel] return if is_scalar(cols): self[cols] = value[value.columns[0]] return locs: np.ndarray | list if isinstance(loc, slice): locs = np.arange(loc.start, loc.stop, loc.step) elif is_scalar(loc): locs = [loc] else: locs = loc.nonzero()[0] return self.isetitem(locs, value) if len(value.columns) > 1: raise ValueError(f'Cannot set a DataFrame with multiple columns to the single column {key}') elif len(value.columns) == 0: raise ValueError(f'Cannot set a DataFrame without columns to the column {key}') self[key] = value[value.columns[0]] def _iset_item_mgr(self, loc: int | slice | np.ndarray, value, inplace: bool=False, refs: BlockValuesRefs | None=None) -> None: self._mgr.iset(loc, value, inplace=inplace, refs=refs) def _set_item_mgr(self, key, value: ArrayLike, refs: BlockValuesRefs | None=None) -> None: try: loc = self._info_axis.get_loc(key) except KeyError: self._mgr.insert(len(self._info_axis), key, value, refs) else: self._iset_item_mgr(loc, value, refs=refs) def _iset_item(self, loc: int, value: Series, inplace: bool=True) -> None: self._iset_item_mgr(loc, value._values, inplace=inplace, refs=value._references) def _set_item(self, key, value) -> None: (value, refs) = self._sanitize_column(value) if key in self.columns and value.ndim == 1 and (not isinstance(value.dtype, ExtensionDtype)): if not self.columns.is_unique or isinstance(self.columns, MultiIndex): existing_piece = self[key] if isinstance(existing_piece, DataFrame): value = np.tile(value, (len(existing_piece.columns), 1)).T refs = None self._set_item_mgr(key, value, refs) def _set_value(self, index: IndexLabel, col, value: Scalar, takeable: bool=False) -> None: try: if takeable: icol = col iindex = cast(int, index) else: icol = self.columns.get_loc(col) iindex = self.index.get_loc(index) self._mgr.column_setitem(icol, iindex, value, inplace_only=True) except (KeyError, TypeError, ValueError, LossySetitemError): if takeable: self.iloc[index, col] = value else: self.loc[index, col] = value except InvalidIndexError as ii_err: raise InvalidIndexError(f'You can only assign a scalar value not a {type(value)}') from ii_err def _ensure_valid_index(self, value) -> None: if not len(self.index) and is_list_like(value) and len(value): if not isinstance(value, DataFrame): try: value = Series(value) except (ValueError, NotImplementedError, TypeError) as err: raise ValueError('Cannot set a frame with no defined index and a value that cannot be converted to a Series') from err index_copy = value.index.copy() if self.index.name is not None: index_copy.name = self.index.name self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan) def _box_col_values(self, values: SingleBlockManager, loc: int) -> Series: name = self.columns[loc] obj = self._constructor_sliced_from_mgr(values, axes=values.axes) obj._name = name return obj.__finalize__(self) def _get_item(self, item: Hashable) -> Series: loc = self.columns.get_loc(item) return self._ixs(loc, axis=1) @overload def query(self, expr: str, *, inplace: Literal[False]=..., **kwargs) -> DataFrame: ... @overload def query(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... @overload def query(self, expr: str, *, inplace: bool=..., **kwargs) -> DataFrame | None: ... def query(self, expr: str, *, inplace: bool=False, **kwargs) -> DataFrame | None: inplace = validate_bool_kwarg(inplace, 'inplace') if not isinstance(expr, str): msg = f'expr must be a string to be evaluated, {type(expr)} given' raise ValueError(msg) kwargs['level'] = kwargs.pop('level', 0) + 1 kwargs['target'] = None res = self.eval(expr, **kwargs) try: result = self.loc[res] except ValueError: result = self[res] if inplace: self._update_inplace(result) return None else: return result @overload def eval(self, expr: str, *, inplace: Literal[False]=..., **kwargs) -> Any: ... @overload def eval(self, expr: str, *, inplace: Literal[True], **kwargs) -> None: ... def eval(self, expr: str, *, inplace: bool=False, **kwargs) -> Any | None: from pandas.core.computation.eval import eval as _eval inplace = validate_bool_kwarg(inplace, 'inplace') kwargs['level'] = kwargs.pop('level', 0) + 1 index_resolvers = self._get_index_resolvers() column_resolvers = self._get_cleaned_column_resolvers() resolvers = (column_resolvers, index_resolvers) if 'target' not in kwargs: kwargs['target'] = self kwargs['resolvers'] = tuple(kwargs.get('resolvers', ())) + resolvers return _eval(expr, inplace=inplace, **kwargs) def select_dtypes(self, include=None, exclude=None) -> DataFrame: if not is_list_like(include): include = (include,) if include is not None else () if not is_list_like(exclude): exclude = (exclude,) if exclude is not None else () selection = (frozenset(include), frozenset(exclude)) if not any(selection): raise ValueError('at least one of include or exclude must be nonempty') def check_int_infer_dtype(dtypes): converted_dtypes: list[type] = [] for dtype in dtypes: if isinstance(dtype, str) and dtype == 'int' or dtype is int: converted_dtypes.append(np.int32) converted_dtypes.append(np.int64) elif dtype == 'float' or dtype is float: converted_dtypes.extend([np.float64, np.float32]) else: converted_dtypes.append(infer_dtype_from_object(dtype)) return frozenset(converted_dtypes) include = check_int_infer_dtype(include) exclude = check_int_infer_dtype(exclude) for dtypes in (include, exclude): invalidate_string_dtypes(dtypes) if not include.isdisjoint(exclude): raise ValueError(f'include and exclude overlap on {include & exclude}') def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool: dtype = dtype if not isinstance(dtype, ArrowDtype) else dtype.numpy_dtype return issubclass(dtype.type, tuple(dtypes_set)) or (np.number in dtypes_set and getattr(dtype, '_is_numeric', False) and (not is_bool_dtype(dtype))) def predicate(arr: ArrayLike) -> bool: dtype = arr.dtype if include: if not dtype_predicate(dtype, include): return False if exclude: if dtype_predicate(dtype, exclude): return False return True mgr = self._mgr._get_data_subset(predicate).copy(deep=False) return self._constructor_from_mgr(mgr, axes=mgr.axes).__finalize__(self) def insert(self, loc: int, column: Hashable, value: object, allow_duplicates: bool | lib.NoDefault=lib.no_default) -> None: if allow_duplicates is lib.no_default: allow_duplicates = False if allow_duplicates and (not self.flags.allows_duplicate_labels): raise ValueError("Cannot specify 'allow_duplicates=True' when 'self.flags.allows_duplicate_labels' is False.") if not allow_duplicates and column in self.columns: raise ValueError(f'cannot insert {column}, already exists') if not is_integer(loc): raise TypeError('loc must be int') loc = int(loc) if isinstance(value, DataFrame) and len(value.columns) > 1: raise ValueError(f'Expected a one-dimensional object, got a DataFrame with {len(value.columns)} columns instead.') elif isinstance(value, DataFrame): value = value.iloc[:, 0] (value, refs) = self._sanitize_column(value) self._mgr.insert(loc, column, value, refs=refs) def assign(self, **kwargs) -> DataFrame: data = self.copy(deep=False) for (k, v) in kwargs.items(): data[k] = com.apply_if_callable(v, data) return data def _sanitize_column(self, value) -> tuple[ArrayLike, BlockValuesRefs | None]: self._ensure_valid_index(value) assert not isinstance(value, DataFrame) if is_dict_like(value): if not isinstance(value, Series): value = Series(value) return _reindex_for_setitem(value, self.index) if is_list_like(value): com.require_length_match(value, self.index) return (sanitize_array(value, self.index, copy=True, allow_2d=True), None) @property def _series(self): return {item: self._ixs(idx, axis=1) for (idx, item) in enumerate(self.columns)} def _reindex_multi(self, axes: dict[str, Index], fill_value) -> DataFrame: (new_index, row_indexer) = self.index.reindex(axes['index']) (new_columns, col_indexer) = self.columns.reindex(axes['columns']) if row_indexer is not None and col_indexer is not None: indexer = (row_indexer, col_indexer) new_values = take_2d_multi(self.values, indexer, fill_value=fill_value) return self._constructor(new_values, index=new_index, columns=new_columns, copy=False) else: return self._reindex_with_indexers({0: [new_index, row_indexer], 1: [new_columns, col_indexer]}, fill_value=fill_value) @Appender('\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n\n Change the row labels.\n\n >>> df.set_axis([\'a\', \'b\', \'c\'], axis=\'index\')\n A B\n a 1 4\n b 2 5\n c 3 6\n\n Change the column labels.\n\n >>> df.set_axis([\'I\', \'II\'], axis=\'columns\')\n I II\n 0 1 4\n 1 2 5\n 2 3 6\n ') @Substitution(klass=_shared_doc_kwargs['klass'], axes_single_arg=_shared_doc_kwargs['axes_single_arg'], extended_summary_sub=' column or', axis_description_sub=', and 1 identifies the columns', see_also_sub=' or columns') @Appender(NDFrame.set_axis.__doc__) def set_axis(self, labels, *, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default) -> DataFrame: return super().set_axis(labels, axis=axis, copy=copy) @doc(NDFrame.reindex, klass=_shared_doc_kwargs['klass'], optional_reindex=_shared_doc_kwargs['optional_reindex']) def reindex(self, labels=None, *, index=None, columns=None, axis: Axis | None=None, method: ReindexMethod | None=None, copy: bool | lib.NoDefault=lib.no_default, level: Level | None=None, fill_value: Scalar | None=np.nan, limit: int | None=None, tolerance=None) -> DataFrame: return super().reindex(labels=labels, index=index, columns=columns, axis=axis, method=method, level=level, fill_value=fill_value, limit=limit, tolerance=tolerance, copy=copy) @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level=..., inplace: Literal[True], errors: IgnoreRaise=...) -> None: ... @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level=..., inplace: Literal[False]=..., errors: IgnoreRaise=...) -> DataFrame: ... @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level=..., inplace: bool=..., errors: IgnoreRaise=...) -> DataFrame | None: ... def drop(self, labels: IndexLabel | ListLike=None, *, axis: Axis=0, index: IndexLabel | ListLike=None, columns: IndexLabel | ListLike=None, level: Level | None=None, inplace: bool=False, errors: IgnoreRaise='raise') -> DataFrame | None: return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors) @overload def rename(self, mapper: Renamer | None=..., *, index: Renamer | None=..., columns: Renamer | None=..., axis: Axis | None=..., copy: bool | lib.NoDefault=lib.no_default, inplace: Literal[True], level: Level=..., errors: IgnoreRaise=...) -> None: ... @overload def rename(self, mapper: Renamer | None=..., *, index: Renamer | None=..., columns: Renamer | None=..., axis: Axis | None=..., copy: bool | lib.NoDefault=lib.no_default, inplace: Literal[False]=..., level: Level=..., errors: IgnoreRaise=...) -> DataFrame: ... @overload def rename(self, mapper: Renamer | None=..., *, index: Renamer | None=..., columns: Renamer | None=..., axis: Axis | None=..., copy: bool | lib.NoDefault=lib.no_default, inplace: bool=..., level: Level=..., errors: IgnoreRaise=...) -> DataFrame | None: ... def rename(self, mapper: Renamer | None=None, *, index: Renamer | None=None, columns: Renamer | None=None, axis: Axis | None=None, copy: bool | lib.NoDefault=lib.no_default, inplace: bool=False, level: Level | None=None, errors: IgnoreRaise='ignore') -> DataFrame | None: self._check_copy_deprecation(copy) return super()._rename(mapper=mapper, index=index, columns=columns, axis=axis, inplace=inplace, level=level, errors=errors) def pop(self, item: Hashable) -> Series: return super().pop(item=item) @overload def _replace_columnwise(self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[True], regex) -> None: ... @overload def _replace_columnwise(self, mapping: dict[Hashable, tuple[Any, Any]], inplace: Literal[False], regex) -> Self: ... def _replace_columnwise(self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex) -> Self | None: res = self if inplace else self.copy(deep=False) ax = self.columns for (i, ax_value) in enumerate(ax): if ax_value in mapping: ser = self.iloc[:, i] (target, value) = mapping[ax_value] newobj = ser.replace(target, value, regex=regex) res._iset_item(i, newobj, inplace=inplace) if inplace: return None return res.__finalize__(self) @doc(NDFrame.shift, klass=_shared_doc_kwargs['klass']) def shift(self, periods: int | Sequence[int]=1, freq: Frequency | None=None, axis: Axis=0, fill_value: Hashable=lib.no_default, suffix: str | None=None) -> DataFrame: if freq is not None and fill_value is not lib.no_default: raise ValueError("Passing a 'freq' together with a 'fill_value' is not allowed.") if self.empty: return self.copy() axis = self._get_axis_number(axis) if is_list_like(periods): periods = cast(Sequence, periods) if axis == 1: raise ValueError('If `periods` contains multiple shifts, `axis` cannot be 1.') if len(periods) == 0: raise ValueError('If `periods` is an iterable, it cannot be empty.') from pandas.core.reshape.concat import concat shifted_dataframes = [] for period in periods: if not is_integer(period): raise TypeError(f'Periods must be integer, but {period} is {type(period)}.') period = cast(int, period) shifted_dataframes.append(super().shift(periods=period, freq=freq, axis=axis, fill_value=fill_value).add_suffix(f'{suffix}_{period}' if suffix else f'_{period}')) return concat(shifted_dataframes, axis=1) elif suffix: raise ValueError('Cannot specify `suffix` if `periods` is an int.') periods = cast(int, periods) ncols = len(self.columns) if axis == 1 and periods != 0 and (ncols > 0) and (freq is None): if fill_value is lib.no_default: label = self.columns[0] if periods > 0: result = self.iloc[:, :-periods] for col in range(min(ncols, abs(periods))): filler = self.iloc[:, 0].shift(len(self)) result.insert(0, label, filler, allow_duplicates=True) else: result = self.iloc[:, -periods:] for col in range(min(ncols, abs(periods))): filler = self.iloc[:, -1].shift(len(self)) result.insert(len(result.columns), label, filler, allow_duplicates=True) result.columns = self.columns.copy() return result elif len(self._mgr.blocks) > 1 or not can_hold_element(self._mgr.blocks[0].values, fill_value): nper = abs(periods) nper = min(nper, ncols) if periods > 0: indexer = np.array([-1] * nper + list(range(ncols - periods)), dtype=np.intp) else: indexer = np.array(list(range(nper, ncols)) + [-1] * nper, dtype=np.intp) mgr = self._mgr.reindex_indexer(self.columns, indexer, axis=0, fill_value=fill_value, allow_dups=True) res_df = self._constructor_from_mgr(mgr, axes=mgr.axes) return res_df.__finalize__(self, method='shift') else: return self.T.shift(periods=periods, fill_value=fill_value).T return super().shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value) @overload def set_index(self, keys, *, drop: bool=..., append: bool=..., inplace: Literal[False]=..., verify_integrity: bool=...) -> DataFrame: ... @overload def set_index(self, keys, *, drop: bool=..., append: bool=..., inplace: Literal[True], verify_integrity: bool=...) -> None: ... def set_index(self, keys, *, drop: bool=True, append: bool=False, inplace: bool=False, verify_integrity: bool=False) -> DataFrame | None: inplace = validate_bool_kwarg(inplace, 'inplace') self._check_inplace_and_allows_duplicate_labels(inplace) if not isinstance(keys, list): keys = [keys] err_msg = 'The parameter "keys" may be a column key, one-dimensional array, or a list containing only valid column keys and one-dimensional arrays.' missing: list[Hashable] = [] for col in keys: if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)): if getattr(col, 'ndim', 1) != 1: raise ValueError(err_msg) else: try: found = col in self.columns except TypeError as err: raise TypeError(f'{err_msg}. Received column of type {type(col)}') from err else: if not found: missing.append(col) if missing: raise KeyError(f'None of {missing} are in the columns') if inplace: frame = self else: frame = self.copy(deep=False) arrays: list[Index] = [] names: list[Hashable] = [] if append: names = list(self.index.names) if isinstance(self.index, MultiIndex): arrays.extend((self.index._get_level_values(i) for i in range(self.index.nlevels))) else: arrays.append(self.index) to_remove: set[Hashable] = set() for col in keys: if isinstance(col, MultiIndex): arrays.extend((col._get_level_values(n) for n in range(col.nlevels))) names.extend(col.names) elif isinstance(col, (Index, Series)): arrays.append(col) names.append(col.name) elif isinstance(col, (list, np.ndarray)): arrays.append(col) names.append(None) elif isinstance(col, abc.Iterator): arrays.append(list(col)) names.append(None) else: arrays.append(frame[col]) names.append(col) if drop: to_remove.add(col) if len(arrays[-1]) != len(self): raise ValueError(f'Length mismatch: Expected {len(self)} rows, received array of length {len(arrays[-1])}') index = ensure_index_from_sequences(arrays, names) if verify_integrity and (not index.is_unique): duplicates = index[index.duplicated()].unique() raise ValueError(f'Index has duplicate keys: {duplicates}') for c in to_remove: del frame[c] index._cleanup() frame.index = index if not inplace: return frame return None @overload def reset_index(self, level: IndexLabel=..., *, drop: bool=..., inplace: Literal[False]=..., col_level: Hashable=..., col_fill: Hashable=..., allow_duplicates: bool | lib.NoDefault=..., names: Hashable | Sequence[Hashable] | None=None) -> DataFrame: ... @overload def reset_index(self, level: IndexLabel=..., *, drop: bool=..., inplace: Literal[True], col_level: Hashable=..., col_fill: Hashable=..., allow_duplicates: bool | lib.NoDefault=..., names: Hashable | Sequence[Hashable] | None=None) -> None: ... @overload def reset_index(self, level: IndexLabel=..., *, drop: bool=..., inplace: bool=..., col_level: Hashable=..., col_fill: Hashable=..., allow_duplicates: bool | lib.NoDefault=..., names: Hashable | Sequence[Hashable] | None=None) -> DataFrame | None: ... def reset_index(self, level: IndexLabel | None=None, *, drop: bool=False, inplace: bool=False, col_level: Hashable=0, col_fill: Hashable='', allow_duplicates: bool | lib.NoDefault=lib.no_default, names: Hashable | Sequence[Hashable] | None=None) -> DataFrame | None: inplace = validate_bool_kwarg(inplace, 'inplace') self._check_inplace_and_allows_duplicate_labels(inplace) if inplace: new_obj = self else: new_obj = self.copy(deep=False) if allow_duplicates is not lib.no_default: allow_duplicates = validate_bool_kwarg(allow_duplicates, 'allow_duplicates') new_index = default_index(len(new_obj)) if level is not None: if not isinstance(level, (tuple, list)): level = [level] level = [self.index._get_level_number(lev) for lev in level] if len(level) < self.index.nlevels: new_index = self.index.droplevel(level) if not drop: to_insert: Iterable[tuple[Any, Any | None]] default = 'index' if 'index' not in self else 'level_0' names = self.index._get_default_index_names(names, default) if isinstance(self.index, MultiIndex): to_insert = zip(reversed(self.index.levels), reversed(self.index.codes)) else: to_insert = ((self.index, None),) multi_col = isinstance(self.columns, MultiIndex) for (j, (lev, lab)) in enumerate(to_insert, start=1): i = self.index.nlevels - j if level is not None and i not in level: continue name = names[i] if multi_col: col_name = list(name) if isinstance(name, tuple) else [name] if col_fill is None: if len(col_name) not in (1, self.columns.nlevels): raise ValueError(f'col_fill=None is incompatible with incomplete column name {name}') col_fill = col_name[0] lev_num = self.columns._get_level_number(col_level) name_lst = [col_fill] * lev_num + col_name missing = self.columns.nlevels - len(name_lst) name_lst += [col_fill] * missing name = tuple(name_lst) level_values = lev._values if level_values.dtype == np.object_: level_values = lib.maybe_convert_objects(level_values) if lab is not None: level_values = algorithms.take(level_values, lab, allow_fill=True, fill_value=lev._na_value) new_obj.insert(0, name, level_values, allow_duplicates=allow_duplicates) new_obj.index = new_index if not inplace: return new_obj return None @doc(NDFrame.isna, klass=_shared_doc_kwargs['klass']) def isna(self) -> DataFrame: res_mgr = self._mgr.isna(func=isna) result = self._constructor_from_mgr(res_mgr, axes=res_mgr.axes) return result.__finalize__(self, method='isna') @doc(NDFrame.isna, klass=_shared_doc_kwargs['klass']) def isnull(self) -> DataFrame: return self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs['klass']) def notna(self) -> DataFrame: return ~self.isna() @doc(NDFrame.notna, klass=_shared_doc_kwargs['klass']) def notnull(self) -> DataFrame: return ~self.isna() @overload def dropna(self, *, axis: Axis=..., how: AnyAll | lib.NoDefault=..., thresh: int | lib.NoDefault=..., subset: IndexLabel=..., inplace: Literal[False]=..., ignore_index: bool=...) -> DataFrame: ... @overload def dropna(self, *, axis: Axis=..., how: AnyAll | lib.NoDefault=..., thresh: int | lib.NoDefault=..., subset: IndexLabel=..., inplace: Literal[True], ignore_index: bool=...) -> None: ... def dropna(self, *, axis: Axis=0, how: AnyAll | lib.NoDefault=lib.no_default, thresh: int | lib.NoDefault=lib.no_default, subset: IndexLabel | AnyArrayLike | None=None, inplace: bool=False, ignore_index: bool=False) -> DataFrame | None: if how is not lib.no_default and thresh is not lib.no_default: raise TypeError('You cannot set both the how and thresh arguments at the same time.') if how is lib.no_default: how = 'any' inplace = validate_bool_kwarg(inplace, 'inplace') if isinstance(axis, (tuple, list)): raise TypeError('supplying multiple axes to axis is no longer supported.') axis = self._get_axis_number(axis) agg_axis = 1 - axis agg_obj = self if subset is not None: if not is_list_like(subset): subset = [cast(Hashable, subset)] ax = self._get_axis(agg_axis) indices = ax.get_indexer_for(subset) check = indices == -1 if check.any(): raise KeyError(np.array(subset)[check].tolist()) agg_obj = self.take(indices, axis=agg_axis) if thresh is not lib.no_default: count = agg_obj.count(axis=agg_axis) mask = count >= thresh elif how == 'any': mask = notna(agg_obj).all(axis=agg_axis, bool_only=False) elif how == 'all': mask = notna(agg_obj).any(axis=agg_axis, bool_only=False) else: raise ValueError(f'invalid how option: {how}') if np.all(mask): result = self.copy(deep=False) else: result = self.loc(axis=axis)[mask] if ignore_index: result.index = default_index(len(result)) if not inplace: return result self._update_inplace(result) return None @overload def drop_duplicates(self, subset: Hashable | Iterable[Hashable] | None=..., *, keep: DropKeep=..., inplace: Literal[True], ignore_index: bool=...) -> None: ... @overload def drop_duplicates(self, subset: Hashable | Iterable[Hashable] | None=..., *, keep: DropKeep=..., inplace: Literal[False]=..., ignore_index: bool=...) -> DataFrame: ... @overload def drop_duplicates(self, subset: Hashable | Iterable[Hashable] | None=..., *, keep: DropKeep=..., inplace: bool=..., ignore_index: bool=...) -> DataFrame | None: ... def drop_duplicates(self, subset: Hashable | Iterable[Hashable] | None=None, *, keep: DropKeep='first', inplace: bool=False, ignore_index: bool=False) -> DataFrame | None: if self.empty: return self.copy(deep=False) inplace = validate_bool_kwarg(inplace, 'inplace') ignore_index = validate_bool_kwarg(ignore_index, 'ignore_index') result = self[-self.duplicated(subset, keep=keep)] if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, subset: Hashable | Iterable[Hashable] | None=None, keep: DropKeep='first') -> Series: if self.empty: return self._constructor_sliced(dtype=bool) def f(vals) -> tuple[np.ndarray, int]: (labels, shape) = algorithms.factorize(vals, size_hint=len(self)) return (labels.astype('i8'), len(shape)) if subset is None: subset = self.columns elif not np.iterable(subset) or isinstance(subset, str) or (isinstance(subset, tuple) and subset in self.columns): subset = (subset,) subset = cast(Sequence, subset) diff = set(subset) - set(self.columns) if diff: raise KeyError(Index(diff)) if len(subset) == 1 and self.columns.is_unique: result = self[next(iter(subset))].duplicated(keep) result.name = None else: vals = (col.values for (name, col) in self.items() if name in subset) (labels, shape) = map(list, zip(*map(f, vals))) ids = get_group_index(labels, tuple(shape), sort=False, xnull=False) result = self._constructor_sliced(duplicated(ids, keep), index=self.index) return result.__finalize__(self, method='duplicated') @overload def sort_values(self, by: IndexLabel, *, axis: Axis=..., ascending=..., inplace: Literal[False]=..., kind: SortKind=..., na_position: NaPosition=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> DataFrame: ... @overload def sort_values(self, by: IndexLabel, *, axis: Axis=..., ascending=..., inplace: Literal[True], kind: SortKind=..., na_position: str=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> None: ... def sort_values(self, by: IndexLabel, *, axis: Axis=0, ascending: bool | list[bool] | tuple[bool, ...]=True, inplace: bool=False, kind: SortKind='quicksort', na_position: str='last', ignore_index: bool=False, key: ValueKeyFunc | None=None) -> DataFrame | None: inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) if not isinstance(by, list): by = [by] if is_sequence(ascending) and len(by) != len(ascending): raise ValueError(f'Length of ascending ({len(ascending)}) != length of by ({len(by)})') if len(by) > 1: keys = (self._get_label_or_level_values(x, axis=axis) for x in by) if key is not None: keys_data = [Series(k, name=name) for (k, name) in zip(keys, by)] else: keys_data = list(keys) indexer = lexsort_indexer(keys_data, orders=ascending, na_position=na_position, key=key) elif len(by): k = self._get_label_or_level_values(by[0], axis=axis) if key is not None: k = Series(k, name=by[0]) if isinstance(ascending, (tuple, list)): ascending = ascending[0] indexer = nargsort(k, kind=kind, ascending=ascending, na_position=na_position, key=key) elif inplace: return self._update_inplace(self) else: return self.copy(deep=False) if is_range_indexer(indexer, len(indexer)): result = self.copy(deep=False) if ignore_index: result.index = default_index(len(result)) if inplace: return self._update_inplace(result) else: return result new_data = self._mgr.take(indexer, axis=self._get_block_manager_axis(axis), verify=False) if ignore_index: new_data.set_axis(self._get_block_manager_axis(axis), default_index(len(indexer))) result = self._constructor_from_mgr(new_data, axes=new_data.axes) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method='sort_values') @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: Literal[True], kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> None: ... @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: Literal[False]=..., kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> DataFrame: ... @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: bool=..., kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> DataFrame | None: ... def sort_index(self, *, axis: Axis=0, level: IndexLabel | None=None, ascending: bool | Sequence[bool]=True, inplace: bool=False, kind: SortKind='quicksort', na_position: NaPosition='last', sort_remaining: bool=True, ignore_index: bool=False, key: IndexKeyFunc | None=None) -> DataFrame | None: return super().sort_index(axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key) def value_counts(self, subset: IndexLabel | None=None, normalize: bool=False, sort: bool=True, ascending: bool=False, dropna: bool=True) -> Series: if subset is None: subset = self.columns.tolist() name = 'proportion' if normalize else 'count' counts = self.groupby(subset, dropna=dropna, observed=False)._grouper.size() counts.name = name if sort: counts = counts.sort_values(ascending=ascending) if normalize: counts /= counts.sum() if is_list_like(subset) and len(subset) == 1: counts.index = MultiIndex.from_arrays([counts.index], names=[counts.index.name]) return counts def nlargest(self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep='first') -> DataFrame: return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest() def nsmallest(self, n: int, columns: IndexLabel, keep: NsmallestNlargestKeep='first') -> DataFrame: return selectn.SelectNFrame(self, n=n, keep=keep, columns=columns).nsmallest() def swaplevel(self, i: Axis=-2, j: Axis=-1, axis: Axis=0) -> DataFrame: result = self.copy(deep=False) axis = self._get_axis_number(axis) if not isinstance(result._get_axis(axis), MultiIndex): raise TypeError('Can only swap levels on a hierarchical axis.') if axis == 0: assert isinstance(result.index, MultiIndex) result.index = result.index.swaplevel(i, j) else: assert isinstance(result.columns, MultiIndex) result.columns = result.columns.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[int | str], axis: Axis=0) -> DataFrame: axis = self._get_axis_number(axis) if not isinstance(self._get_axis(axis), MultiIndex): raise TypeError('Can only reorder levels on a hierarchical axis.') result = self.copy(deep=False) if axis == 0: assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) else: assert isinstance(result.columns, MultiIndex) result.columns = result.columns.reorder_levels(order) return result def _cmp_method(self, other, op): axis: Literal[1] = 1 (self, other) = self._align_for_op(other, axis, flex=False, level=None) new_data = self._dispatch_frame_op(other, op, axis=axis) return self._construct_result(new_data) def _arith_method(self, other, op): if self._should_reindex_frame_op(other, op, 1, None, None): return self._arith_method_with_reindex(other, op) axis: Literal[1] = 1 other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],)) (self, other) = self._align_for_op(other, axis, flex=True, level=None) with np.errstate(all='ignore'): new_data = self._dispatch_frame_op(other, op, axis=axis) return self._construct_result(new_data) _logical_method = _arith_method def _dispatch_frame_op(self, right, func: Callable, axis: AxisInt | None=None) -> DataFrame: array_op = ops.get_array_op(func) right = lib.item_from_zerodim(right) if not is_list_like(right): bm = self._mgr.apply(array_op, right=right) return self._constructor_from_mgr(bm, axes=bm.axes) elif isinstance(right, DataFrame): assert self.index.equals(right.index) assert self.columns.equals(right.columns) bm = self._mgr.operate_blockwise(right._mgr, array_op) return self._constructor_from_mgr(bm, axes=bm.axes) elif isinstance(right, Series) and axis == 1: assert right.index.equals(self.columns) right = right._values assert not isinstance(right, np.ndarray) arrays = [array_op(_left, _right) for (_left, _right) in zip(self._iter_column_arrays(), right)] elif isinstance(right, Series): assert right.index.equals(self.index) right = right._values arrays = [array_op(left, right) for left in self._iter_column_arrays()] else: raise NotImplementedError(right) return type(self)._from_arrays(arrays, self.columns, self.index, verify_integrity=False) def _combine_frame(self, other: DataFrame, func, fill_value=None): if fill_value is None: _arith_op = func else: def _arith_op(left, right): (left, right) = ops.fill_binop(left, right, fill_value) return func(left, right) new_data = self._dispatch_frame_op(other, _arith_op) return new_data def _arith_method_with_reindex(self, right: DataFrame, op) -> DataFrame: left = self (cols, lcol_indexer, rcol_indexer) = left.columns.join(right.columns, how='inner', return_indexers=True) new_left = left if lcol_indexer is None else left.iloc[:, lcol_indexer] new_right = right if rcol_indexer is None else right.iloc[:, rcol_indexer] result = op(new_left, new_right) join_columns = left.columns.join(right.columns, how='outer') if result.columns.has_duplicates: (indexer, _) = result.columns.get_indexer_non_unique(join_columns) indexer = algorithms.unique1d(indexer) result = result._reindex_with_indexers({1: [join_columns, indexer]}, allow_dups=True) else: result = result.reindex(join_columns, axis=1) return result def _should_reindex_frame_op(self, right, op, axis: int, fill_value, level) -> bool: if op is operator.pow or op is roperator.rpow: return False if not isinstance(right, DataFrame): return False if fill_value is None and level is None and (axis == 1): left_uniques = self.columns.unique() right_uniques = right.columns.unique() cols = left_uniques.intersection(right_uniques) if len(cols) and (not (len(cols) == len(left_uniques) and len(cols) == len(right_uniques))): return True return False def _align_for_op(self, other, axis: AxisInt, flex: bool | None=False, level: Level | None=None): (left, right) = (self, other) def to_series(right): msg = 'Unable to coerce to Series, length must be {req_len}: given {given_len}' dtype = None if getattr(right, 'dtype', None) == object: dtype = object if axis == 0: if len(left.index) != len(right): raise ValueError(msg.format(req_len=len(left.index), given_len=len(right))) right = left._constructor_sliced(right, index=left.index, dtype=dtype) else: if len(left.columns) != len(right): raise ValueError(msg.format(req_len=len(left.columns), given_len=len(right))) right = left._constructor_sliced(right, index=left.columns, dtype=dtype) return right if isinstance(right, np.ndarray): if right.ndim == 1: right = to_series(right) elif right.ndim == 2: dtype = None if right.dtype == object: dtype = object if right.shape == left.shape: right = left._constructor(right, index=left.index, columns=left.columns, dtype=dtype) elif right.shape[0] == left.shape[0] and right.shape[1] == 1: right = np.broadcast_to(right, left.shape) right = left._constructor(right, index=left.index, columns=left.columns, dtype=dtype) elif right.shape[1] == left.shape[1] and right.shape[0] == 1: right = to_series(right[0, :]) else: raise ValueError(f'Unable to coerce to DataFrame, shape must be {left.shape}: given {right.shape}') elif right.ndim > 2: raise ValueError(f'Unable to coerce to Series/DataFrame, dimension must be <= 2: {right.shape}') elif is_list_like(right) and (not isinstance(right, (Series, DataFrame))): if any((is_array_like(el) for el in right)): raise ValueError(f'Unable to coerce list of {type(right[0])} to Series/DataFrame') right = to_series(right) if flex is not None and isinstance(right, DataFrame): if not left._indexed_same(right): if flex: (left, right) = left.align(right, join='outer', level=level) else: raise ValueError('Can only compare identically-labeled (both index and columns) DataFrame objects') elif isinstance(right, Series): axis = axis if axis is not None else 1 if not flex: if not left.axes[axis].equals(right.index): raise ValueError('Operands are not aligned. Do `left, right = left.align(right, axis=1)` before operating.') (left, right) = left.align(right, join='outer', axis=axis, level=level) right = left._maybe_align_series_as_frame(right, axis) return (left, right) def _maybe_align_series_as_frame(self, series: Series, axis: AxisInt): rvalues = series._values if not isinstance(rvalues, np.ndarray): if rvalues.dtype in ('datetime64[ns]', 'timedelta64[ns]'): rvalues = np.asarray(rvalues) else: return series if axis == 0: rvalues = rvalues.reshape(-1, 1) else: rvalues = rvalues.reshape(1, -1) rvalues = np.broadcast_to(rvalues, self.shape) return self._constructor(rvalues, index=self.index, columns=self.columns, dtype=rvalues.dtype) def _flex_arith_method(self, other, op, *, axis: Axis='columns', level=None, fill_value=None): axis = self._get_axis_number(axis) if axis is not None else 1 if self._should_reindex_frame_op(other, op, axis, fill_value, level): return self._arith_method_with_reindex(other, op) if isinstance(other, Series) and fill_value is not None: raise NotImplementedError(f'fill_value {fill_value} not supported.') other = ops.maybe_prepare_scalar_for_op(other, self.shape) (self, other) = self._align_for_op(other, axis, flex=True, level=level) with np.errstate(all='ignore'): if isinstance(other, DataFrame): new_data = self._combine_frame(other, op, fill_value) elif isinstance(other, Series): new_data = self._dispatch_frame_op(other, op, axis=axis) else: if fill_value is not None: self = self.fillna(fill_value) new_data = self._dispatch_frame_op(other, op) return self._construct_result(new_data) def _construct_result(self, result) -> DataFrame: out = self._constructor(result, copy=False).__finalize__(self) out.columns = self.columns out.index = self.index return out def __divmod__(self, other) -> tuple[DataFrame, DataFrame]: div = self // other mod = self - div * other return (div, mod) def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]: div = other // self mod = other - div * self return (div, mod) def _flex_cmp_method(self, other, op, *, axis: Axis='columns', level=None): axis = self._get_axis_number(axis) if axis is not None else 1 (self, other) = self._align_for_op(other, axis, flex=True, level=level) new_data = self._dispatch_frame_op(other, op, axis=axis) return self._construct_result(new_data) @Appender(ops.make_flex_doc('eq', 'dataframe')) def eq(self, other, axis: Axis='columns', level=None) -> DataFrame: return self._flex_cmp_method(other, operator.eq, axis=axis, level=level) @Appender(ops.make_flex_doc('ne', 'dataframe')) def ne(self, other, axis: Axis='columns', level=None) -> DataFrame: return self._flex_cmp_method(other, operator.ne, axis=axis, level=level) @Appender(ops.make_flex_doc('le', 'dataframe')) def le(self, other, axis: Axis='columns', level=None) -> DataFrame: return self._flex_cmp_method(other, operator.le, axis=axis, level=level) @Appender(ops.make_flex_doc('lt', 'dataframe')) def lt(self, other, axis: Axis='columns', level=None) -> DataFrame: return self._flex_cmp_method(other, operator.lt, axis=axis, level=level) @Appender(ops.make_flex_doc('ge', 'dataframe')) def ge(self, other, axis: Axis='columns', level=None) -> DataFrame: return self._flex_cmp_method(other, operator.ge, axis=axis, level=level) @Appender(ops.make_flex_doc('gt', 'dataframe')) def gt(self, other, axis: Axis='columns', level=None) -> DataFrame: return self._flex_cmp_method(other, operator.gt, axis=axis, level=level) @Appender(ops.make_flex_doc('add', 'dataframe')) def add(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, operator.add, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('radd', 'dataframe')) def radd(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, roperator.radd, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('sub', 'dataframe')) def sub(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, operator.sub, level=level, fill_value=fill_value, axis=axis) subtract = sub @Appender(ops.make_flex_doc('rsub', 'dataframe')) def rsub(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, roperator.rsub, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('mul', 'dataframe')) def mul(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, operator.mul, level=level, fill_value=fill_value, axis=axis) multiply = mul @Appender(ops.make_flex_doc('rmul', 'dataframe')) def rmul(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, roperator.rmul, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('truediv', 'dataframe')) def truediv(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, operator.truediv, level=level, fill_value=fill_value, axis=axis) div = truediv divide = truediv @Appender(ops.make_flex_doc('rtruediv', 'dataframe')) def rtruediv(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, roperator.rtruediv, level=level, fill_value=fill_value, axis=axis) rdiv = rtruediv @Appender(ops.make_flex_doc('floordiv', 'dataframe')) def floordiv(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, operator.floordiv, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('rfloordiv', 'dataframe')) def rfloordiv(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, roperator.rfloordiv, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('mod', 'dataframe')) def mod(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, operator.mod, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('rmod', 'dataframe')) def rmod(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, roperator.rmod, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('pow', 'dataframe')) def pow(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, operator.pow, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('rpow', 'dataframe')) def rpow(self, other, axis: Axis='columns', level=None, fill_value=None) -> DataFrame: return self._flex_arith_method(other, roperator.rpow, level=level, fill_value=fill_value, axis=axis) @doc(_shared_docs['compare'], dedent('\n Returns\n -------\n DataFrame\n DataFrame that shows the differences stacked side by side.\n\n The resulting index will be a MultiIndex with \'self\' and \'other\'\n stacked alternately at the inner level.\n\n Raises\n ------\n ValueError\n When the two DataFrames don\'t have identical labels or shape.\n\n See Also\n --------\n Series.compare : Compare with another Series and show differences.\n DataFrame.equals : Test whether two objects contain the same elements.\n\n Notes\n -----\n Matching NaNs will not appear as a difference.\n\n Can only compare identically-labeled\n (i.e. same shape, identical row and column labels) DataFrames\n\n Examples\n --------\n >>> df = pd.DataFrame(\n ... {{\n ... "col1": ["a", "a", "b", "b", "a"],\n ... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],\n ... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]\n ... }},\n ... columns=["col1", "col2", "col3"],\n ... )\n >>> df\n col1 col2 col3\n 0 a 1.0 1.0\n 1 a 2.0 2.0\n 2 b 3.0 3.0\n 3 b NaN 4.0\n 4 a 5.0 5.0\n\n >>> df2 = df.copy()\n >>> df2.loc[0, \'col1\'] = \'c\'\n >>> df2.loc[2, \'col3\'] = 4.0\n >>> df2\n col1 col2 col3\n 0 c 1.0 1.0\n 1 a 2.0 2.0\n 2 b 3.0 4.0\n 3 b NaN 4.0\n 4 a 5.0 5.0\n\n Align the differences on columns\n\n >>> df.compare(df2)\n col1 col3\n self other self other\n 0 a c NaN NaN\n 2 NaN NaN 3.0 4.0\n\n Assign result_names\n\n >>> df.compare(df2, result_names=("left", "right"))\n col1 col3\n left right left right\n 0 a c NaN NaN\n 2 NaN NaN 3.0 4.0\n\n Stack the differences on rows\n\n >>> df.compare(df2, align_axis=0)\n col1 col3\n 0 self a NaN\n other c NaN\n 2 self NaN 3.0\n other NaN 4.0\n\n Keep the equal values\n\n >>> df.compare(df2, keep_equal=True)\n col1 col3\n self other self other\n 0 a c 1.0 1.0\n 2 b b 3.0 4.0\n\n Keep all original rows and columns\n\n >>> df.compare(df2, keep_shape=True)\n col1 col2 col3\n self other self other self other\n 0 a c NaN NaN NaN NaN\n 1 NaN NaN NaN NaN NaN NaN\n 2 NaN NaN NaN NaN 3.0 4.0\n 3 NaN NaN NaN NaN NaN NaN\n 4 NaN NaN NaN NaN NaN NaN\n\n Keep all original rows and columns and also all original values\n\n >>> df.compare(df2, keep_shape=True, keep_equal=True)\n col1 col2 col3\n self other self other self other\n 0 a c 1.0 1.0 1.0 1.0\n 1 a a 2.0 2.0 2.0 2.0\n 2 b b 3.0 3.0 3.0 4.0\n 3 b b NaN NaN 4.0 4.0\n 4 a a 5.0 5.0 5.0 5.0\n '), klass=_shared_doc_kwargs['klass']) def compare(self, other: DataFrame, align_axis: Axis=1, keep_shape: bool=False, keep_equal: bool=False, result_names: Suffixes=('self', 'other')) -> DataFrame: return super().compare(other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names) def combine(self, other: DataFrame, func: Callable[[Series, Series], Series | Hashable], fill_value=None, overwrite: bool=True) -> DataFrame: other_idxlen = len(other.index) (this, other) = self.align(other) new_index = this.index if other.empty and len(new_index) == len(self.index): return self.copy() if self.empty and len(other) == other_idxlen: return other.copy() new_columns = this.columns.union(other.columns) do_fill = fill_value is not None result = {} for col in new_columns: series = this[col] other_series = other[col] this_dtype = series.dtype other_dtype = other_series.dtype this_mask = isna(series) other_mask = isna(other_series) if not overwrite and other_mask.all(): result[col] = this[col].copy() continue if do_fill: series = series.copy() other_series = other_series.copy() series[this_mask] = fill_value other_series[other_mask] = fill_value if col not in self.columns: new_dtype = other_dtype try: series = series.astype(new_dtype) except ValueError: pass else: new_dtype = find_common_type([this_dtype, other_dtype]) series = series.astype(new_dtype) other_series = other_series.astype(new_dtype) arr = func(series, other_series) if isinstance(new_dtype, np.dtype): arr = maybe_downcast_to_dtype(arr, new_dtype) result[col] = arr frame_result = self._constructor(result, index=new_index, columns=new_columns) return frame_result.__finalize__(self, method='combine') def combine_first(self, other: DataFrame) -> DataFrame: from pandas.core.computation import expressions def combiner(x: Series, y: Series): mask = x.isna()._values x_values = x._values y_values = y._values if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) if len(other) == 0: combined = self.reindex(self.columns.append(other.columns.difference(self.columns)), axis=1) combined = combined.astype(other.dtypes) else: combined = self.combine(other, combiner, overwrite=False) dtypes = {col: find_common_type([self.dtypes[col], other.dtypes[col]]) for col in self.columns.intersection(other.columns) if combined.dtypes[col] != self.dtypes[col]} if dtypes: combined = combined.astype(dtypes) return combined.__finalize__(self, method='combine_first') def update(self, other, join: UpdateJoin='left', overwrite: bool=True, filter_func=None, errors: IgnoreRaise='ignore') -> None: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) if join != 'left': raise NotImplementedError('Only left join is supported') if errors not in ['ignore', 'raise']: raise ValueError("The parameter errors must be either 'ignore' or 'raise'") if not isinstance(other, DataFrame): other = DataFrame(other) if other.index.has_duplicates: raise ValueError('Update not allowed with duplicate indexes on other.') index_intersection = other.index.intersection(self.index) if index_intersection.empty: raise ValueError('Update not allowed when the index on `other` has no intersection with this dataframe.') other = other.reindex(index_intersection) this_data = self.loc[index_intersection] for col in self.columns.intersection(other.columns): this = this_data[col] that = other[col] if filter_func is not None: mask = ~filter_func(this) | isna(that) else: if errors == 'raise': mask_this = notna(that) mask_that = notna(this) if any(mask_this & mask_that): raise ValueError('Data overlaps.') if overwrite: mask = isna(that) else: mask = notna(this) if mask.all(): continue self.loc[index_intersection, col] = this.where(mask, that) @Appender(dedent('\n Examples\n --------\n >>> df = pd.DataFrame({\'Animal\': [\'Falcon\', \'Falcon\',\n ... \'Parrot\', \'Parrot\'],\n ... \'Max Speed\': [380., 370., 24., 26.]})\n >>> df\n Animal Max Speed\n 0 Falcon 380.0\n 1 Falcon 370.0\n 2 Parrot 24.0\n 3 Parrot 26.0\n >>> df.groupby([\'Animal\']).mean()\n Max Speed\n Animal\n Falcon 375.0\n Parrot 25.0\n\n **Hierarchical Indexes**\n\n We can groupby different levels of a hierarchical index\n using the `level` parameter:\n\n >>> arrays = [[\'Falcon\', \'Falcon\', \'Parrot\', \'Parrot\'],\n ... [\'Captive\', \'Wild\', \'Captive\', \'Wild\']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=(\'Animal\', \'Type\'))\n >>> df = pd.DataFrame({\'Max Speed\': [390., 350., 30., 20.]},\n ... index=index)\n >>> df\n Max Speed\n Animal Type\n Falcon Captive 390.0\n Wild 350.0\n Parrot Captive 30.0\n Wild 20.0\n >>> df.groupby(level=0).mean()\n Max Speed\n Animal\n Falcon 370.0\n Parrot 25.0\n >>> df.groupby(level="Type").mean()\n Max Speed\n Type\n Captive 210.0\n Wild 185.0\n\n We can also choose to include NA in group keys or not by setting\n `dropna` parameter, the default setting is `True`.\n\n >>> arr = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]\n >>> df = pd.DataFrame(arr, columns=["a", "b", "c"])\n\n >>> df.groupby(by=["b"]).sum()\n a c\n b\n 1.0 2 3\n 2.0 2 5\n\n >>> df.groupby(by=["b"], dropna=False).sum()\n a c\n b\n 1.0 2 3\n 2.0 2 5\n NaN 1 4\n\n >>> arr = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]\n >>> df = pd.DataFrame(arr, columns=["a", "b", "c"])\n\n >>> df.groupby(by="a").sum()\n b c\n a\n a 13.0 13.0\n b 12.3 123.0\n\n >>> df.groupby(by="a", dropna=False).sum()\n b c\n a\n a 13.0 13.0\n b 12.3 123.0\n NaN 12.3 33.0\n\n When using ``.apply()``, use ``group_keys`` to include or exclude the\n group keys. The ``group_keys`` argument defaults to ``True`` (include).\n\n >>> df = pd.DataFrame({\'Animal\': [\'Falcon\', \'Falcon\',\n ... \'Parrot\', \'Parrot\'],\n ... \'Max Speed\': [380., 370., 24., 26.]})\n >>> df.groupby("Animal", group_keys=True)[[\'Max Speed\']].apply(lambda x: x)\n Max Speed\n Animal\n Falcon 0 380.0\n 1 370.0\n Parrot 2 24.0\n 3 26.0\n\n >>> df.groupby("Animal", group_keys=False)[[\'Max Speed\']].apply(lambda x: x)\n Max Speed\n 0 380.0\n 1 370.0\n 2 24.0\n 3 26.0\n ')) @Appender(_shared_docs['groupby'] % _shared_doc_kwargs) def groupby(self, by=None, level: IndexLabel | None=None, as_index: bool=True, sort: bool=True, group_keys: bool=True, observed: bool=True, dropna: bool=True) -> DataFrameGroupBy: from pandas.core.groupby.generic import DataFrameGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") return DataFrameGroupBy(obj=self, keys=by, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna) _shared_docs['pivot'] = '\n Return reshaped DataFrame organized by given index / column values.\n\n Reshape data (produce a "pivot" table) based on column values. Uses\n unique values from specified `index` / `columns` to form axes of the\n resulting DataFrame. This function does not support data\n aggregation, multiple values will result in a MultiIndex in the\n columns. See the :ref:`User Guide ` for more on reshaping.\n\n Parameters\n ----------%s\n columns : str or object or a list of str\n Column to use to make new frame\'s columns.\n index : str or object or a list of str, optional\n Column to use to make new frame\'s index. If not given, uses existing index.\n values : str, object or a list of the previous, optional\n Column(s) to use for populating new frame\'s values. If not\n specified, all remaining columns will be used and the result will\n have hierarchically indexed columns.\n\n Returns\n -------\n DataFrame\n Returns reshaped DataFrame.\n\n Raises\n ------\n ValueError:\n When there are any `index`, `columns` combinations with multiple\n values. `DataFrame.pivot_table` when you need to aggregate.\n\n See Also\n --------\n DataFrame.pivot_table : Generalization of pivot that can handle\n duplicate values for one index/column pair.\n DataFrame.unstack : Pivot based on the index values instead of a\n column.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Notes\n -----\n For finer-tuned control, see hierarchical indexing documentation along\n with the related stack/unstack methods.\n\n Reference :ref:`the user guide ` for more examples.\n\n Examples\n --------\n >>> df = pd.DataFrame({\'foo\': [\'one\', \'one\', \'one\', \'two\', \'two\',\n ... \'two\'],\n ... \'bar\': [\'A\', \'B\', \'C\', \'A\', \'B\', \'C\'],\n ... \'baz\': [1, 2, 3, 4, 5, 6],\n ... \'zoo\': [\'x\', \'y\', \'z\', \'q\', \'w\', \'t\']})\n >>> df\n foo bar baz zoo\n 0 one A 1 x\n 1 one B 2 y\n 2 one C 3 z\n 3 two A 4 q\n 4 two B 5 w\n 5 two C 6 t\n\n >>> df.pivot(index=\'foo\', columns=\'bar\', values=\'baz\')\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index=\'foo\', columns=\'bar\')[\'baz\']\n bar A B C\n foo\n one 1 2 3\n two 4 5 6\n\n >>> df.pivot(index=\'foo\', columns=\'bar\', values=[\'baz\', \'zoo\'])\n baz zoo\n bar A B C A B C\n foo\n one 1 2 3 x y z\n two 4 5 6 q w t\n\n You could also assign a list of column names or a list of index names.\n\n >>> df = pd.DataFrame({\n ... "lev1": [1, 1, 1, 2, 2, 2],\n ... "lev2": [1, 1, 2, 1, 1, 2],\n ... "lev3": [1, 2, 1, 2, 1, 2],\n ... "lev4": [1, 2, 3, 4, 5, 6],\n ... "values": [0, 1, 2, 3, 4, 5]})\n >>> df\n lev1 lev2 lev3 lev4 values\n 0 1 1 1 1 0\n 1 1 1 2 2 1\n 2 1 2 1 3 2\n 3 2 1 2 4 3\n 4 2 1 1 5 4\n 5 2 2 2 6 5\n\n >>> df.pivot(index="lev1", columns=["lev2", "lev3"], values="values")\n lev2 1 2\n lev3 1 2 1 2\n lev1\n 1 0.0 1.0 2.0 NaN\n 2 4.0 3.0 NaN 5.0\n\n >>> df.pivot(index=["lev1", "lev2"], columns=["lev3"], values="values")\n lev3 1 2\n lev1 lev2\n 1 1 0.0 1.0\n 2 2.0 NaN\n 2 1 4.0 3.0\n 2 NaN 5.0\n\n A ValueError is raised if there are any duplicates.\n\n >>> df = pd.DataFrame({"foo": [\'one\', \'one\', \'two\', \'two\'],\n ... "bar": [\'A\', \'A\', \'B\', \'C\'],\n ... "baz": [1, 2, 3, 4]})\n >>> df\n foo bar baz\n 0 one A 1\n 1 one A 2\n 2 two B 3\n 3 two C 4\n\n Notice that the first two rows are the same for our `index`\n and `columns` arguments.\n\n >>> df.pivot(index=\'foo\', columns=\'bar\', values=\'baz\')\n Traceback (most recent call last):\n ...\n ValueError: Index contains duplicate entries, cannot reshape\n ' @Substitution('') @Appender(_shared_docs['pivot']) def pivot(self, *, columns, index=lib.no_default, values=lib.no_default) -> DataFrame: from pandas.core.reshape.pivot import pivot return pivot(self, index=index, columns=columns, values=values) _shared_docs['pivot_table'] = '\n Create a spreadsheet-style pivot table as a DataFrame.\n\n The levels in the pivot table will be stored in MultiIndex objects\n (hierarchical indexes) on the index and columns of the result DataFrame.\n\n Parameters\n ----------%s\n values : list-like or scalar, optional\n Column or columns to aggregate.\n index : column, Grouper, array, or list of the previous\n Keys to group by on the pivot table index. If a list is passed,\n it can contain any of the other types (except list). If an array is\n passed, it must be the same length as the data and will be used in\n the same manner as column values.\n columns : column, Grouper, array, or list of the previous\n Keys to group by on the pivot table column. If a list is passed,\n it can contain any of the other types (except list). If an array is\n passed, it must be the same length as the data and will be used in\n the same manner as column values.\n aggfunc : function, list of functions, dict, default "mean"\n If a list of functions is passed, the resulting pivot table will have\n hierarchical columns whose top level are the function names\n (inferred from the function objects themselves).\n If a dict is passed, the key is column to aggregate and the value is\n function or list of functions. If ``margin=True``, aggfunc will be\n used to calculate the partial aggregates.\n fill_value : scalar, default None\n Value to replace missing values with (in the resulting pivot table,\n after aggregation).\n margins : bool, default False\n If ``margins=True``, special ``All`` columns and rows\n will be added with partial group aggregates across the categories\n on the rows and columns.\n dropna : bool, default True\n Do not include columns whose entries are all NaN. If True,\n rows with a NaN value in any column will be omitted before\n computing margins.\n margins_name : str, default \'All\'\n Name of the row / column that will contain the totals\n when margins is True.\n observed : bool, default False\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionchanged:: 3.0.0\n\n The default value is now ``True``.\n\n sort : bool, default True\n Specifies if the result should be sorted.\n\n .. versionadded:: 1.3.0\n\n **kwargs : dict\n Optional keyword arguments to pass to ``aggfunc``.\n\n .. versionadded:: 3.0.0\n\n Returns\n -------\n DataFrame\n An Excel style pivot table.\n\n See Also\n --------\n DataFrame.pivot : Pivot without aggregation that can handle\n non-numeric data.\n DataFrame.melt: Unpivot a DataFrame from wide to long format,\n optionally leaving identifiers set.\n wide_to_long : Wide panel to long format. Less flexible but more\n user-friendly than melt.\n\n Notes\n -----\n Reference :ref:`the user guide ` for more examples.\n\n Examples\n --------\n >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",\n ... "bar", "bar", "bar", "bar"],\n ... "B": ["one", "one", "one", "two", "two",\n ... "one", "one", "two", "two"],\n ... "C": ["small", "large", "large", "small",\n ... "small", "large", "small", "small",\n ... "large"],\n ... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],\n ... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})\n >>> df\n A B C D E\n 0 foo one small 1 2\n 1 foo one large 2 4\n 2 foo one large 2 5\n 3 foo two small 3 5\n 4 foo two small 3 6\n 5 bar one large 4 6\n 6 bar one small 5 8\n 7 bar two small 6 9\n 8 bar two large 7 9\n\n This first example aggregates values by taking the sum.\n\n >>> table = pd.pivot_table(df, values=\'D\', index=[\'A\', \'B\'],\n ... columns=[\'C\'], aggfunc="sum")\n >>> table\n C large small\n A B\n bar one 4.0 5.0\n two 7.0 6.0\n foo one 4.0 1.0\n two NaN 6.0\n\n We can also fill missing values using the `fill_value` parameter.\n\n >>> table = pd.pivot_table(df, values=\'D\', index=[\'A\', \'B\'],\n ... columns=[\'C\'], aggfunc="sum", fill_value=0)\n >>> table\n C large small\n A B\n bar one 4 5\n two 7 6\n foo one 4 1\n two 0 6\n\n The next example aggregates by taking the mean across multiple columns.\n\n >>> table = pd.pivot_table(df, values=[\'D\', \'E\'], index=[\'A\', \'C\'],\n ... aggfunc={\'D\': "mean", \'E\': "mean"})\n >>> table\n D E\n A C\n bar large 5.500000 7.500000\n small 5.500000 8.500000\n foo large 2.000000 4.500000\n small 2.333333 4.333333\n\n We can also calculate multiple types of aggregations for any given\n value column.\n\n >>> table = pd.pivot_table(df, values=[\'D\', \'E\'], index=[\'A\', \'C\'],\n ... aggfunc={\'D\': "mean",\n ... \'E\': ["min", "max", "mean"]})\n >>> table\n D E\n mean max mean min\n A C\n bar large 5.500000 9 7.500000 6\n small 5.500000 9 8.500000 8\n foo large 2.000000 5 4.500000 4\n small 2.333333 6 4.333333 2\n ' @Substitution('') @Appender(_shared_docs['pivot_table']) def pivot_table(self, values=None, index=None, columns=None, aggfunc: AggFuncType='mean', fill_value=None, margins: bool=False, dropna: bool=True, margins_name: Level='All', observed: bool=True, sort: bool=True, **kwargs) -> DataFrame: from pandas.core.reshape.pivot import pivot_table return pivot_table(self, values=values, index=index, columns=columns, aggfunc=aggfunc, fill_value=fill_value, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, sort=sort, **kwargs) def stack(self, level: IndexLabel=-1, dropna: bool | lib.NoDefault=lib.no_default, sort: bool | lib.NoDefault=lib.no_default, future_stack: bool=True): if not future_stack: from pandas.core.reshape.reshape import stack, stack_multiple warnings.warn("The previous implementation of stack is deprecated and will be removed in a future version of pandas. See the What's New notes for pandas 2.1.0 for details. Do not specify the future_stack argument to adopt the new implementation and silence this warning.", FutureWarning, stacklevel=find_stack_level()) if dropna is lib.no_default: dropna = True if sort is lib.no_default: sort = True if isinstance(level, (tuple, list)): result = stack_multiple(self, level, dropna=dropna, sort=sort) else: result = stack(self, level, dropna=dropna, sort=sort) else: from pandas.core.reshape.reshape import stack_v3 if dropna is not lib.no_default: raise ValueError('dropna must be unspecified as the new implementation does not introduce rows of NA values. This argument will be removed in a future version of pandas.') if sort is not lib.no_default: raise ValueError('Cannot specify sort, this argument will be removed in a future version of pandas. Sort the result using .sort_index instead.') if isinstance(level, (tuple, list)) and (not all((lev in self.columns.names for lev in level))) and (not all((isinstance(lev, int) for lev in level))): raise ValueError('level should contain all level names or all level numbers, not a mixture of the two.') if not isinstance(level, (tuple, list)): level = [level] level = [self.columns._get_level_number(lev) for lev in level] result = stack_v3(self, level) return result.__finalize__(self, method='stack') def explode(self, column: IndexLabel, ignore_index: bool=False) -> DataFrame: if not self.columns.is_unique: duplicate_cols = self.columns[self.columns.duplicated()].tolist() raise ValueError(f'DataFrame columns must be unique. Duplicate columns: {duplicate_cols}') columns: list[Hashable] if is_scalar(column) or isinstance(column, tuple): columns = [column] elif isinstance(column, list) and all((is_scalar(c) or isinstance(c, tuple) for c in column)): if not column: raise ValueError('column must be nonempty') if len(column) > len(set(column)): raise ValueError('column must be unique') columns = column else: raise ValueError('column must be a scalar, tuple, or list thereof') df = self.reset_index(drop=True) if len(columns) == 1: result = df[columns[0]].explode() else: mylen = lambda x: len(x) if is_list_like(x) and len(x) > 0 else 1 counts0 = self[columns[0]].apply(mylen) for c in columns[1:]: if not all(counts0 == self[c].apply(mylen)): raise ValueError('columns must have matching element counts') result = DataFrame({c: df[c].explode() for c in columns}) result = df.drop(columns, axis=1).join(result) if ignore_index: result.index = default_index(len(result)) else: result.index = self.index.take(result.index) result = result.reindex(columns=self.columns) return result.__finalize__(self, method='explode') def unstack(self, level: IndexLabel=-1, fill_value=None, sort: bool=True) -> DataFrame | Series: from pandas.core.reshape.reshape import unstack result = unstack(self, level, fill_value, sort) return result.__finalize__(self, method='unstack') def melt(self, id_vars=None, value_vars=None, var_name=None, value_name: Hashable='value', col_level: Level | None=None, ignore_index: bool=True) -> DataFrame: return melt(self, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name, col_level=col_level, ignore_index=ignore_index).__finalize__(self, method='melt') @doc(Series.diff, klass='DataFrame', extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n Take difference over rows (0) or columns (1).\n", other_klass='Series', examples=dedent("\n Difference with previous row\n\n >>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],\n ... 'b': [1, 1, 2, 3, 5, 8],\n ... 'c': [1, 4, 9, 16, 25, 36]})\n >>> df\n a b c\n 0 1 1 1\n 1 2 1 4\n 2 3 2 9\n 3 4 3 16\n 4 5 5 25\n 5 6 8 36\n\n >>> df.diff()\n a b c\n 0 NaN NaN NaN\n 1 1.0 0.0 3.0\n 2 1.0 1.0 5.0\n 3 1.0 1.0 7.0\n 4 1.0 2.0 9.0\n 5 1.0 3.0 11.0\n\n Difference with previous column\n\n >>> df.diff(axis=1)\n a b c\n 0 NaN 0 0\n 1 NaN -1 3\n 2 NaN -1 7\n 3 NaN -1 13\n 4 NaN 0 20\n 5 NaN 2 28\n\n Difference with 3rd previous row\n\n >>> df.diff(periods=3)\n a b c\n 0 NaN NaN NaN\n 1 NaN NaN NaN\n 2 NaN NaN NaN\n 3 3.0 2.0 15.0\n 4 3.0 4.0 21.0\n 5 3.0 6.0 27.0\n\n Difference with following row\n\n >>> df.diff(periods=-1)\n a b c\n 0 -1.0 0.0 -3.0\n 1 -1.0 -1.0 -5.0\n 2 -1.0 -1.0 -7.0\n 3 -1.0 -2.0 -9.0\n 4 -1.0 -3.0 -11.0\n 5 NaN NaN NaN\n\n Overflow in input dtype\n\n >>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)\n >>> df.diff()\n a\n 0 NaN\n 1 255.0")) def diff(self, periods: int=1, axis: Axis=0) -> DataFrame: if not lib.is_integer(periods): if not (is_float(periods) and periods.is_integer()): raise ValueError('periods must be an integer') periods = int(periods) axis = self._get_axis_number(axis) if axis == 1: if periods != 0: return self - self.shift(periods, axis=axis) axis = 0 new_data = self._mgr.diff(n=periods) res_df = self._constructor_from_mgr(new_data, axes=new_data.axes) return res_df.__finalize__(self, 'diff') def _gotitem(self, key: IndexLabel, ndim: int, subset: DataFrame | Series | None=None) -> DataFrame | Series: if subset is None: subset = self elif subset.ndim == 1: return subset return subset[key] _agg_see_also_doc = dedent('\n See Also\n --------\n DataFrame.apply : Perform any type of operations.\n DataFrame.transform : Perform transformation type operations.\n DataFrame.groupby : Perform operations over groups.\n DataFrame.resample : Perform operations over resampled bins.\n DataFrame.rolling : Perform operations over rolling window.\n DataFrame.expanding : Perform operations over expanding window.\n core.window.ewm.ExponentialMovingWindow : Perform operation over exponential\n weighted window.\n ') _agg_examples_doc = dedent('\n Examples\n --------\n >>> df = pd.DataFrame([[1, 2, 3],\n ... [4, 5, 6],\n ... [7, 8, 9],\n ... [np.nan, np.nan, np.nan]],\n ... columns=[\'A\', \'B\', \'C\'])\n\n Aggregate these functions over the rows.\n\n >>> df.agg([\'sum\', \'min\'])\n A B C\n sum 12.0 15.0 18.0\n min 1.0 2.0 3.0\n\n Different aggregations per column.\n\n >>> df.agg({\'A\' : [\'sum\', \'min\'], \'B\' : [\'min\', \'max\']})\n A B\n sum 12.0 NaN\n min 1.0 2.0\n max NaN 8.0\n\n Aggregate different functions over the columns and rename the index of the resulting\n DataFrame.\n\n >>> df.agg(x=(\'A\', \'max\'), y=(\'B\', \'min\'), z=(\'C\', \'mean\'))\n A B C\n x 7.0 NaN NaN\n y NaN 2.0 NaN\n z NaN NaN 6.0\n\n Aggregate over the columns.\n\n >>> df.agg("mean", axis="columns")\n 0 2.0\n 1 5.0\n 2 8.0\n 3 NaN\n dtype: float64\n ') @doc(_shared_docs['aggregate'], klass=_shared_doc_kwargs['klass'], axis=_shared_doc_kwargs['axis'], see_also=_agg_see_also_doc, examples=_agg_examples_doc) def aggregate(self, func=None, axis: Axis=0, *args, **kwargs): from pandas.core.apply import frame_apply axis = self._get_axis_number(axis) op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) result = op.agg() result = reconstruct_and_relabel_result(result, func, **kwargs) return result agg = aggregate @doc(_shared_docs['transform'], klass=_shared_doc_kwargs['klass'], axis=_shared_doc_kwargs['axis']) def transform(self, func: AggFuncType, axis: Axis=0, *args, **kwargs) -> DataFrame: from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs) result = op.transform() assert isinstance(result, DataFrame) return result def apply(self, func: AggFuncType, axis: Axis=0, raw: bool=False, result_type: Literal['expand', 'reduce', 'broadcast'] | None=None, args=(), by_row: Literal[False, 'compat']='compat', engine: Literal['python', 'numba']='python', engine_kwargs: dict[str, bool] | None=None, **kwargs): from pandas.core.apply import frame_apply op = frame_apply(self, func=func, axis=axis, raw=raw, result_type=result_type, by_row=by_row, engine=engine, engine_kwargs=engine_kwargs, args=args, kwargs=kwargs) return op.apply().__finalize__(self, method='apply') def map(self, func: PythonFuncType, na_action: Literal['ignore'] | None=None, **kwargs) -> DataFrame: if na_action not in {'ignore', None}: raise ValueError(f"na_action must be 'ignore' or None. Got {na_action!r}") if self.empty: return self.copy() func = functools.partial(func, **kwargs) def infer(x): return x._map_values(func, na_action=na_action) return self.apply(infer).__finalize__(self, 'map') def _append(self, other, ignore_index: bool=False, verify_integrity: bool=False, sort: bool=False) -> DataFrame: if isinstance(other, (Series, dict)): if isinstance(other, dict): if not ignore_index: raise TypeError('Can only append a dict if ignore_index=True') other = Series(other) if other.name is None and (not ignore_index): raise TypeError('Can only append a Series if ignore_index=True or if the Series has a name') index = Index([other.name], name=self.index.names if isinstance(self.index, MultiIndex) else self.index.name) row_df = other.to_frame().T other = row_df.infer_objects().rename_axis(index.names) elif isinstance(other, list): if not other: pass elif not isinstance(other[0], DataFrame): other = DataFrame(other) if self.index.name is not None and (not ignore_index): other.index.name = self.index.name from pandas.core.reshape.concat import concat if isinstance(other, (list, tuple)): to_concat = [self, *other] else: to_concat = [self, other] result = concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity, sort=sort) return result.__finalize__(self, method='append') def join(self, other: DataFrame | Series | Iterable[DataFrame | Series], on: IndexLabel | None=None, how: MergeHow='left', lsuffix: str='', rsuffix: str='', sort: bool=False, validate: JoinValidate | None=None) -> DataFrame: from pandas.core.reshape.concat import concat from pandas.core.reshape.merge import merge if isinstance(other, Series): if other.name is None: raise ValueError('Other Series must have a name') other = DataFrame({other.name: other}) if isinstance(other, DataFrame): if how == 'cross': return merge(self, other, how=how, on=on, suffixes=(lsuffix, rsuffix), sort=sort, validate=validate) return merge(self, other, left_on=on, how=how, left_index=on is None, right_index=True, suffixes=(lsuffix, rsuffix), sort=sort, validate=validate) else: if on is not None: raise ValueError('Joining multiple DataFrames only supported for joining on index') if rsuffix or lsuffix: raise ValueError('Suffixes not supported when joining multiple DataFrames') frames = [cast('DataFrame | Series', self)] + list(other) can_concat = all((df.index.is_unique for df in frames)) if can_concat: if how == 'left': res = concat(frames, axis=1, join='outer', verify_integrity=True, sort=sort) return res.reindex(self.index) else: return concat(frames, axis=1, join=how, verify_integrity=True, sort=sort) joined = frames[0] for frame in frames[1:]: joined = merge(joined, frame, how=how, left_index=True, right_index=True, validate=validate) return joined @Substitution('') @Appender(_merge_doc, indents=2) def merge(self, right: DataFrame | Series, how: MergeHow='inner', on: IndexLabel | AnyArrayLike | None=None, left_on: IndexLabel | AnyArrayLike | None=None, right_on: IndexLabel | AnyArrayLike | None=None, left_index: bool=False, right_index: bool=False, sort: bool=False, suffixes: Suffixes=('_x', '_y'), copy: bool | lib.NoDefault=lib.no_default, indicator: str | bool=False, validate: MergeValidate | None=None) -> DataFrame: self._check_copy_deprecation(copy) from pandas.core.reshape.merge import merge return merge(self, right, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate) def round(self, decimals: int | dict[IndexLabel, int] | Series=0, *args, **kwargs) -> DataFrame: from pandas.core.reshape.concat import concat def _dict_round(df: DataFrame, decimals) -> Iterator[Series]: for (col, vals) in df.items(): try: yield _series_round(vals, decimals[col]) except KeyError: yield vals def _series_round(ser: Series, decimals: int) -> Series: if is_integer_dtype(ser.dtype) or is_float_dtype(ser.dtype): return ser.round(decimals) return ser nv.validate_round(args, kwargs) if isinstance(decimals, (dict, Series)): if isinstance(decimals, Series) and (not decimals.index.is_unique): raise ValueError('Index of decimals must be unique') if is_dict_like(decimals) and (not all((is_integer(value) for (_, value) in decimals.items()))): raise TypeError('Values in decimals must be integers') new_cols = list(_dict_round(self, decimals)) elif is_integer(decimals): new_mgr = self._mgr.round(decimals=decimals) return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self, method='round') else: raise TypeError('decimals must be an integer, a dict-like or a Series') if new_cols is not None and len(new_cols) > 0: return self._constructor(concat(new_cols, axis=1), index=self.index, columns=self.columns).__finalize__(self, method='round') else: return self.copy(deep=False) def corr(self, method: CorrelationMethod='pearson', min_periods: int=1, numeric_only: bool=False) -> DataFrame: data = self._get_numeric_data() if numeric_only else self cols = data.columns idx = cols.copy() mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False) if method == 'pearson': correl = libalgos.nancorr(mat, minp=min_periods) elif method == 'spearman': correl = libalgos.nancorr_spearman(mat, minp=min_periods) elif method == 'kendall' or callable(method): if min_periods is None: min_periods = 1 mat = mat.T corrf = nanops.get_corr_func(method) K = len(cols) correl = np.empty((K, K), dtype=float) mask = np.isfinite(mat) for (i, ac) in enumerate(mat): for (j, bc) in enumerate(mat): if i > j: continue valid = mask[i] & mask[j] if valid.sum() < min_periods: c = np.nan elif i == j: c = 1.0 elif not valid.all(): c = corrf(ac[valid], bc[valid]) else: c = corrf(ac, bc) correl[i, j] = c correl[j, i] = c else: raise ValueError(f"method must be either 'pearson', 'spearman', 'kendall', or a callable, '{method}' was supplied") result = self._constructor(correl, index=idx, columns=cols, copy=False) return result.__finalize__(self, method='corr') def cov(self, min_periods: int | None=None, ddof: int | None=1, numeric_only: bool=False) -> DataFrame: data = self._get_numeric_data() if numeric_only else self cols = data.columns idx = cols.copy() mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False) if notna(mat).all(): if min_periods is not None and min_periods > len(mat): base_cov = np.empty((mat.shape[1], mat.shape[1])) base_cov.fill(np.nan) else: base_cov = np.cov(mat.T, ddof=ddof) base_cov = base_cov.reshape((len(cols), len(cols))) else: base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods) result = self._constructor(base_cov, index=idx, columns=cols, copy=False) return result.__finalize__(self, method='cov') def corrwith(self, other: DataFrame | Series, axis: Axis=0, drop: bool=False, method: CorrelationMethod='pearson', numeric_only: bool=False, min_periods: int | None=None) -> Series: axis = self._get_axis_number(axis) this = self._get_numeric_data() if numeric_only else self if isinstance(other, Series): return this.apply(lambda x: other.corr(x, method=method, min_periods=min_periods), axis=axis) if numeric_only: other = other._get_numeric_data() (left, right) = this.align(other, join='inner') if axis == 1: left = left.T right = right.T if method == 'pearson': left = left + right * 0 right = right + left * 0 ldem = left - left.mean(numeric_only=numeric_only) rdem = right - right.mean(numeric_only=numeric_only) num = (ldem * rdem).sum() dom = (left.count() - 1) * left.std(numeric_only=numeric_only) * right.std(numeric_only=numeric_only) correl = num / dom elif method in ['kendall', 'spearman'] or callable(method): def c(x): return nanops.nancorr(x[0], x[1], method=method) correl = self._constructor_sliced(map(c, zip(left.values.T, right.values.T)), index=left.columns, copy=False) else: raise ValueError(f"Invalid method {method} was passed, valid methods are: 'pearson', 'kendall', 'spearman', or callable") if not drop: raxis: AxisInt = 1 if axis == 0 else 0 result_index = this._get_axis(raxis).union(other._get_axis(raxis)) idx_diff = result_index.difference(correl.index) if len(idx_diff) > 0: correl = correl._append(Series([np.nan] * len(idx_diff), index=idx_diff)) return correl def count(self, axis: Axis=0, numeric_only: bool=False) -> Series: axis = self._get_axis_number(axis) if numeric_only: frame = self._get_numeric_data() else: frame = self if len(frame._get_axis(axis)) == 0: result = self._constructor_sliced(0, index=frame._get_agg_axis(axis)) else: result = notna(frame).sum(axis=axis) return result.astype('int64').__finalize__(self, method='count') def _reduce(self, op, name: str, *, axis: Axis=0, skipna: bool=True, numeric_only: bool=False, filter_type=None, **kwds): assert filter_type is None or filter_type == 'bool', filter_type out_dtype = 'bool' if filter_type == 'bool' else None if axis is not None: axis = self._get_axis_number(axis) def func(values: np.ndarray): return op(values, axis=axis, skipna=skipna, **kwds) def blk_func(values, axis: Axis=1): if isinstance(values, ExtensionArray): if not is_1d_only_ea_dtype(values.dtype): return values._reduce(name, axis=1, skipna=skipna, **kwds) return values._reduce(name, skipna=skipna, keepdims=True, **kwds) else: return op(values, axis=axis, skipna=skipna, **kwds) def _get_data() -> DataFrame: if filter_type is None: data = self._get_numeric_data() else: assert filter_type == 'bool' data = self._get_bool_data() return data df = self if numeric_only: df = _get_data() if axis is None: dtype = find_common_type([block.values.dtype for block in df._mgr.blocks]) if isinstance(dtype, ExtensionDtype): df = df.astype(dtype) arr = concat_compat(list(df._iter_column_arrays())) return arr._reduce(name, skipna=skipna, keepdims=False, **kwds) return func(df.values) elif axis == 1: if len(df.index) == 0: result = df._reduce(op, name, axis=0, skipna=skipna, numeric_only=False, filter_type=filter_type, **kwds).iloc[:0] result.index = df.index return result if df.shape[1] and name != 'kurt': dtype = find_common_type([block.values.dtype for block in df._mgr.blocks]) if isinstance(dtype, ExtensionDtype): name = {'argmax': 'idxmax', 'argmin': 'idxmin'}.get(name, name) df = df.astype(dtype) arr = concat_compat(list(df._iter_column_arrays())) (nrows, ncols) = df.shape row_index = np.tile(np.arange(nrows), ncols) col_index = np.repeat(np.arange(ncols), nrows) ser = Series(arr, index=col_index, copy=False) with rewrite_warning(target_message=f'The behavior of SeriesGroupBy.{name} with all-NA values', target_category=FutureWarning, new_message=f'The behavior of {type(self).__name__}.{name} with all-NA values, or any-NA and skipna=False, is deprecated. In a future version this will raise ValueError'): result = ser.groupby(row_index).agg(name, **kwds) result.index = df.index if not skipna and name not in ('any', 'all'): mask = df.isna().to_numpy(dtype=np.bool_).any(axis=1) other = -1 if name in ('idxmax', 'idxmin') else lib.no_default result = result.mask(mask, other) return result df = df.T res = df._mgr.reduce(blk_func) out = df._constructor_from_mgr(res, axes=res.axes).iloc[0] if out_dtype is not None and out.dtype != 'boolean': out = out.astype(out_dtype) elif (df._mgr.get_dtypes() == object).any() and name not in ['any', 'all']: out = out.astype(object) elif len(self) == 0 and out.dtype == object and (name in ('sum', 'prod')): out = out.astype(np.float64) return out def _reduce_axis1(self, name: str, func, skipna: bool) -> Series: if name == 'all': result = np.ones(len(self), dtype=bool) ufunc = np.logical_and elif name == 'any': result = np.zeros(len(self), dtype=bool) ufunc = np.logical_or else: raise NotImplementedError(name) for blocks in self._mgr.blocks: middle = func(blocks.values, axis=0, skipna=skipna) result = ufunc(result, middle) res_ser = self._constructor_sliced(result, index=self.index, copy=False) return res_ser @overload def any(self, *, axis: Axis=..., bool_only: bool=..., skipna: bool=..., **kwargs) -> Series: ... @overload def any(self, *, axis: None, bool_only: bool=..., skipna: bool=..., **kwargs) -> bool: ... @overload def any(self, *, axis: Axis | None, bool_only: bool=..., skipna: bool=..., **kwargs) -> Series | bool: ... @doc(make_doc('any', ndim=1)) def any(self, *, axis: Axis | None=0, bool_only: bool=False, skipna: bool=True, **kwargs) -> Series | bool: result = self._logical_func('any', nanops.nanany, axis, bool_only, skipna, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='any') return result @overload def all(self, *, axis: Axis=..., bool_only: bool=..., skipna: bool=..., **kwargs) -> Series: ... @overload def all(self, *, axis: None, bool_only: bool=..., skipna: bool=..., **kwargs) -> bool: ... @overload def all(self, *, axis: Axis | None, bool_only: bool=..., skipna: bool=..., **kwargs) -> Series | bool: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='all') @doc(make_doc('all', ndim=1)) def all(self, axis: Axis | None=0, bool_only: bool=False, skipna: bool=True, **kwargs) -> Series | bool: result = self._logical_func('all', nanops.nanall, axis, bool_only, skipna, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='all') return result @overload def min(self, *, axis: Axis=..., skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def min(self, *, axis: None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def min(self, *, axis: Axis | None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='min') @doc(make_doc('min', ndim=2)) def min(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().min(axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='min') return result @overload def max(self, *, axis: Axis=..., skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def max(self, *, axis: None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def max(self, *, axis: Axis | None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='max') @doc(make_doc('max', ndim=2)) def max(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().max(axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='max') return result @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sum') def sum(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs) -> Series: result = super().sum(axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='sum') return result @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='prod') def prod(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs) -> Series: result = super().prod(axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='prod') return result @overload def mean(self, *, axis: Axis=..., skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def mean(self, *, axis: None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def mean(self, *, axis: Axis | None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='mean') @doc(make_doc('mean', ndim=2)) def mean(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().mean(axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='mean') return result @overload def median(self, *, axis: Axis=..., skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def median(self, *, axis: None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def median(self, *, axis: Axis | None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='median') @doc(make_doc('median', ndim=2)) def median(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().median(axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='median') return result @overload def sem(self, *, axis: Axis=..., skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def sem(self, *, axis: None, skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def sem(self, *, axis: Axis | None, skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sem') def sem(self, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().sem(axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='sem') return result @overload def var(self, *, axis: Axis=..., skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def var(self, *, axis: None, skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def var(self, *, axis: Axis | None, skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='var') def var(self, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().var(axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='var') return result @overload def std(self, *, axis: Axis=..., skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def std(self, *, axis: None, skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def std(self, *, axis: Axis | None, skipna: bool=..., ddof: int=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='std') def std(self, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().std(axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='std') return result @overload def skew(self, *, axis: Axis=..., skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def skew(self, *, axis: None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def skew(self, *, axis: Axis | None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='skew') def skew(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().skew(axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='skew') return result @overload def kurt(self, *, axis: Axis=..., skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series: ... @overload def kurt(self, *, axis: None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Any: ... @overload def kurt(self, *, axis: Axis | None, skipna: bool=..., numeric_only: bool=..., **kwargs) -> Series | Any: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='kurt') def kurt(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | Any: result = super().kurt(axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) if isinstance(result, Series): result = result.__finalize__(self, method='kurt') return result kurtosis = kurt product = prod @doc(make_doc('cummin', ndim=2)) def cummin(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False, *args, **kwargs) -> Self: data = self._get_numeric_data() if numeric_only else self return NDFrame.cummin(data, axis, skipna, *args, **kwargs) @doc(make_doc('cummax', ndim=2)) def cummax(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False, *args, **kwargs) -> Self: data = self._get_numeric_data() if numeric_only else self return NDFrame.cummax(data, axis, skipna, *args, **kwargs) @doc(make_doc('cumsum', ndim=2)) def cumsum(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False, *args, **kwargs) -> Self: data = self._get_numeric_data() if numeric_only else self return NDFrame.cumsum(data, axis, skipna, *args, **kwargs) @doc(make_doc('cumprod', 2)) def cumprod(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False, *args, **kwargs) -> Self: data = self._get_numeric_data() if numeric_only else self return NDFrame.cumprod(data, axis, skipna, *args, **kwargs) def nunique(self, axis: Axis=0, dropna: bool=True) -> Series: return self.apply(Series.nunique, axis=axis, dropna=dropna) def idxmin(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False) -> Series: axis = self._get_axis_number(axis) if self.empty and len(self.axes[axis]): axis_dtype = self.axes[axis].dtype return self._constructor_sliced(dtype=axis_dtype) if numeric_only: data = self._get_numeric_data() else: data = self res = data._reduce(nanops.nanargmin, 'argmin', axis=axis, skipna=skipna, numeric_only=False) indices = res._values if (indices == -1).any(): warnings.warn(f'The behavior of {type(self).__name__}.idxmin with all-NA values, or any-NA and skipna=False, is deprecated. In a future version this will raise ValueError', FutureWarning, stacklevel=find_stack_level()) index = data._get_axis(axis) result = algorithms.take(index._values, indices, allow_fill=True, fill_value=index._na_value) final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis)) return final_result.__finalize__(self, method='idxmin') def idxmax(self, axis: Axis=0, skipna: bool=True, numeric_only: bool=False) -> Series: axis = self._get_axis_number(axis) if self.empty and len(self.axes[axis]): axis_dtype = self.axes[axis].dtype return self._constructor_sliced(dtype=axis_dtype) if numeric_only: data = self._get_numeric_data() else: data = self res = data._reduce(nanops.nanargmax, 'argmax', axis=axis, skipna=skipna, numeric_only=False) indices = res._values if (indices == -1).any(): warnings.warn(f'The behavior of {type(self).__name__}.idxmax with all-NA values, or any-NA and skipna=False, is deprecated. In a future version this will raise ValueError', FutureWarning, stacklevel=find_stack_level()) index = data._get_axis(axis) result = algorithms.take(index._values, indices, allow_fill=True, fill_value=index._na_value) final_result = data._constructor_sliced(result, index=data._get_agg_axis(axis)) return final_result.__finalize__(self, method='idxmax') def _get_agg_axis(self, axis_num: int) -> Index: if axis_num == 0: return self.columns elif axis_num == 1: return self.index else: raise ValueError(f'Axis must be 0 or 1 (got {axis_num!r})') def mode(self, axis: Axis=0, numeric_only: bool=False, dropna: bool=True) -> DataFrame: data = self if not numeric_only else self._get_numeric_data() def f(s): return s.mode(dropna=dropna) data = data.apply(f, axis=axis) if data.empty: data.index = default_index(0) return data @overload def quantile(self, q: float=..., axis: Axis=..., numeric_only: bool=..., interpolation: QuantileInterpolation=..., method: Literal['single', 'table']=...) -> Series: ... @overload def quantile(self, q: AnyArrayLike | Sequence[float], axis: Axis=..., numeric_only: bool=..., interpolation: QuantileInterpolation=..., method: Literal['single', 'table']=...) -> Series | DataFrame: ... @overload def quantile(self, q: float | AnyArrayLike | Sequence[float]=..., axis: Axis=..., numeric_only: bool=..., interpolation: QuantileInterpolation=..., method: Literal['single', 'table']=...) -> Series | DataFrame: ... def quantile(self, q: float | AnyArrayLike | Sequence[float]=0.5, axis: Axis=0, numeric_only: bool=False, interpolation: QuantileInterpolation='linear', method: Literal['single', 'table']='single') -> Series | DataFrame: validate_percentile(q) axis = self._get_axis_number(axis) if not is_list_like(q): res_df = self.quantile([q], axis=axis, numeric_only=numeric_only, interpolation=interpolation, method=method) if method == 'single': res = res_df.iloc[0] else: res = res_df.T.iloc[:, 0] if axis == 1 and len(self) == 0: dtype = find_common_type(list(self.dtypes)) if needs_i8_conversion(dtype): return res.astype(dtype) return res q = Index(q, dtype=np.float64) data = self._get_numeric_data() if numeric_only else self if axis == 1: data = data.T if len(data.columns) == 0: cols = self.columns[:0] dtype = np.float64 if axis == 1: cdtype = find_common_type(list(self.dtypes)) if needs_i8_conversion(cdtype): dtype = cdtype res = self._constructor([], index=q, columns=cols, dtype=dtype) return res.__finalize__(self, method='quantile') valid_method = {'single', 'table'} if method not in valid_method: raise ValueError(f'Invalid method: {method}. Method must be in {valid_method}.') if method == 'single': res = data._mgr.quantile(qs=q, interpolation=interpolation) elif method == 'table': valid_interpolation = {'nearest', 'lower', 'higher'} if interpolation not in valid_interpolation: raise ValueError(f'Invalid interpolation: {interpolation}. Interpolation must be in {valid_interpolation}') if len(data) == 0: if data.ndim == 2: dtype = find_common_type(list(self.dtypes)) else: dtype = self.dtype return self._constructor([], index=q, columns=data.columns, dtype=dtype) q_idx = np.quantile(np.arange(len(data)), q, method=interpolation) by = data.columns if len(by) > 1: keys = [data._get_label_or_level_values(x) for x in by] indexer = lexsort_indexer(keys) else: k = data._get_label_or_level_values(by[0]) indexer = nargsort(k) res = data._mgr.take(indexer[q_idx], verify=False) res.axes[1] = q result = self._constructor_from_mgr(res, axes=res.axes) return result.__finalize__(self, method='quantile') def to_timestamp(self, freq: Frequency | None=None, how: ToTimestampHow='start', axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default) -> DataFrame: self._check_copy_deprecation(copy) new_obj = self.copy(deep=False) axis_name = self._get_axis_name(axis) old_ax = getattr(self, axis_name) if not isinstance(old_ax, PeriodIndex): raise TypeError(f'unsupported Type {type(old_ax).__name__}') new_ax = old_ax.to_timestamp(freq=freq, how=how) setattr(new_obj, axis_name, new_ax) return new_obj def to_period(self, freq: Frequency | None=None, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default) -> DataFrame: self._check_copy_deprecation(copy) new_obj = self.copy(deep=False) axis_name = self._get_axis_name(axis) old_ax = getattr(self, axis_name) if not isinstance(old_ax, DatetimeIndex): raise TypeError(f'unsupported Type {type(old_ax).__name__}') new_ax = old_ax.to_period(freq=freq) setattr(new_obj, axis_name, new_ax) return new_obj def isin(self, values: Series | DataFrame | Sequence | Mapping) -> DataFrame: if isinstance(values, dict): from pandas.core.reshape.concat import concat values = collections.defaultdict(list, values) result = concat((self.iloc[:, [i]].isin(values[col]) for (i, col) in enumerate(self.columns)), axis=1) elif isinstance(values, Series): if not values.index.is_unique: raise ValueError('cannot compute isin with a duplicate axis.') result = self.eq(values.reindex_like(self), axis='index') elif isinstance(values, DataFrame): if not (values.columns.is_unique and values.index.is_unique): raise ValueError('cannot compute isin with a duplicate axis.') result = self.eq(values.reindex_like(self)) else: if not is_list_like(values): raise TypeError(f"only list-like or dict-like objects are allowed to be passed to DataFrame.isin(), you passed a '{type(values).__name__}'") def isin_(x): result = algorithms.isin(x.ravel(), values) return result.reshape(x.shape) res_mgr = self._mgr.apply(isin_) result = self._constructor_from_mgr(res_mgr, axes=res_mgr.axes) return result.__finalize__(self, method='isin') _AXIS_ORDERS: list[Literal['index', 'columns']] = ['index', 'columns'] _AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {**NDFrame._AXIS_TO_AXIS_NUMBER, 1: 1, 'columns': 1} _AXIS_LEN = len(_AXIS_ORDERS) _info_axis_number: Literal[1] = 1 _info_axis_name: Literal['columns'] = 'columns' index = properties.AxisProperty(axis=1, doc="\n The index (row labels) of the DataFrame.\n\n The index of a DataFrame is a series of labels that identify each row.\n The labels can be integers, strings, or any other hashable type. The index\n is used for label-based access and alignment, and can be accessed or\n modified using this attribute.\n\n Returns\n -------\n pandas.Index\n The index labels of the DataFrame.\n\n See Also\n --------\n DataFrame.columns : The column labels of the DataFrame.\n DataFrame.to_numpy : Convert the DataFrame to a NumPy array.\n\n Examples\n --------\n >>> df = pd.DataFrame({'Name': ['Alice', 'Bob', 'Aritra'],\n ... 'Age': [25, 30, 35],\n ... 'Location': ['Seattle', 'New York', 'Kona']},\n ... index=([10, 20, 30]))\n >>> df.index\n Index([10, 20, 30], dtype='int64')\n\n In this example, we create a DataFrame with 3 rows and 3 columns,\n including Name, Age, and Location information. We set the index labels to\n be the integers 10, 20, and 30. We then access the `index` attribute of the\n DataFrame, which returns an `Index` object containing the index labels.\n\n >>> df.index = [100, 200, 300]\n >>> df\n Name Age Location\n 100 Alice 25 Seattle\n 200 Bob 30 New York\n 300 Aritra 35 Kona\n\n In this example, we modify the index labels of the DataFrame by assigning\n a new list of labels to the `index` attribute. The DataFrame is then\n updated with the new labels, and the output shows the modified DataFrame.\n ") columns = properties.AxisProperty(axis=0, doc="\n The column labels of the DataFrame.\n\n Returns\n -------\n pandas.Index\n The column labels of the DataFrame.\n\n See Also\n --------\n DataFrame.index: The index (row labels) of the DataFrame.\n DataFrame.axes: Return a list representing the axes of the DataFrame.\n\n Examples\n --------\n >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]})\n >>> df\n A B\n 0 1 3\n 1 2 4\n >>> df.columns\n Index(['A', 'B'], dtype='object')\n ") plot = Accessor('plot', pandas.plotting.PlotAccessor) hist = pandas.plotting.hist_frame boxplot = pandas.plotting.boxplot_frame sparse = Accessor('sparse', SparseFrameAccessor) def _to_dict_of_blocks(self): mgr = self._mgr return {k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self) for (k, v) in mgr.to_iter_dict()} @property def values(self) -> np.ndarray: return self._mgr.as_array() def _from_nested_dict(data: Mapping[HashableT, Mapping[HashableT2, T]]) -> collections.defaultdict[HashableT2, dict[HashableT, T]]: new_data: collections.defaultdict[HashableT2, dict[HashableT, T]] = collections.defaultdict(dict) for (index, s) in data.items(): for (col, v) in s.items(): new_data[col][index] = v return new_data def _reindex_for_setitem(value: DataFrame | Series, index: Index) -> tuple[ArrayLike, BlockValuesRefs | None]: if value.index.equals(index) or not len(index): if isinstance(value, Series): return (value._values, value._references) return (value._values.copy(), None) try: reindexed_value = value.reindex(index)._values except ValueError as err: if not value.index.is_unique: raise err raise TypeError('incompatible index of inserted column with frame index') from err return (reindexed_value, None) # File: pandas-main/pandas/core/generic.py from __future__ import annotations import collections from copy import deepcopy import datetime as dt from functools import partial from json import loads import operator import pickle import re import sys from typing import TYPE_CHECKING, Any, ClassVar, Literal, NoReturn, cast, final, overload import warnings import numpy as np from pandas._config import config from pandas._libs import lib from pandas._libs.lib import is_range_indexer from pandas._libs.tslibs import Period, Timestamp, to_offset from pandas._typing import AlignJoin, AnyArrayLike, ArrayLike, Axes, Axis, AxisInt, CompressionOptions, Concatenate, DtypeArg, DtypeBackend, DtypeObj, FilePath, FillnaOptions, FloatFormatType, FormattersType, Frequency, IgnoreRaise, IndexKeyFunc, IndexLabel, InterpolateOptions, IntervalClosedType, JSONSerializable, Level, ListLike, Manager, NaPosition, NDFrameT, OpenFileErrors, RandomState, ReindexMethod, Renamer, Scalar, Self, SequenceNotStr, SortKind, StorageOptions, Suffixes, T, TimeAmbiguous, TimedeltaConvertibleTypes, TimeNonexistent, TimestampConvertibleTypes, TimeUnit, ValueKeyFunc, WriteBuffer, WriteExcelBuffer, npt from pandas.compat import PYPY from pandas.compat._constants import REF_COUNT from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError, ChainedAssignmentError, InvalidIndexError from pandas.errors.cow import _chained_assignment_method_msg from pandas.util._decorators import deprecate_kwarg, doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend, validate_ascending, validate_bool_kwarg, validate_inclusive from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.common import ensure_object, ensure_platform_int, ensure_str, is_bool, is_bool_dtype, is_dict_like, is_extension_array_dtype, is_list_like, is_number, is_numeric_dtype, is_re_compilable, is_scalar, pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, ExtensionDtype, PeriodDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.inference import is_hashable, is_nested_list_like from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms as algos, arraylike, common, indexing, missing, nanops, sample from pandas.core.array_algos.replace import should_use_regex from pandas.core.arrays import ExtensionArray from pandas.core.base import PandasObject from pandas.core.construction import extract_array from pandas.core.flags import Flags from pandas.core.indexes.api import DatetimeIndex, Index, MultiIndex, PeriodIndex, default_index, ensure_index from pandas.core.internals import BlockManager from pandas.core.methods.describe import describe_ndframe from pandas.core.missing import clean_fill_method, clean_reindex_fill_method, find_valid_index from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import get_indexer_indexer from pandas.core.window import Expanding, ExponentialMovingWindow, Rolling, Window from pandas.io.formats.format import DataFrameFormatter, DataFrameRenderer from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from collections.abc import Callable from collections.abc import Hashable, Iterator, Mapping, Sequence from pandas._libs.tslibs import BaseOffset from pandas._typing import P from pandas import DataFrame, ExcelWriter, HDFStore, Series from pandas.core.indexers.objects import BaseIndexer from pandas.core.resample import Resampler import textwrap _shared_docs = {**_shared_docs} _shared_doc_kwargs = {'axes': 'keywords for axes', 'klass': 'Series/DataFrame', 'axes_single_arg': "{0 or 'index'} for Series, {0 or 'index', 1 or 'columns'} for DataFrame", 'inplace': '\n inplace : bool, default False\n If True, performs operation inplace and returns None.', 'optional_by': '\n by : str or list of str\n Name or list of names to sort by'} class NDFrame(PandasObject, indexing.IndexingMixin): _internal_names: list[str] = ['_mgr', '_item_cache', '_cache', '_name', '_metadata', '_flags'] _internal_names_set: set[str] = set(_internal_names) _accessors: set[str] = set() _hidden_attrs: frozenset[str] = frozenset([]) _metadata: list[str] = [] _mgr: Manager _attrs: dict[Hashable, Any] _typ: str def __init__(self, data: Manager) -> None: object.__setattr__(self, '_mgr', data) object.__setattr__(self, '_attrs', {}) object.__setattr__(self, '_flags', Flags(self, allows_duplicate_labels=True)) @final @classmethod def _init_mgr(cls, mgr: Manager, axes: dict[Literal['index', 'columns'], Axes | None], dtype: DtypeObj | None=None, copy: bool=False) -> Manager: for (a, axe) in axes.items(): if axe is not None: axe = ensure_index(axe) bm_axis = cls._get_block_manager_axis(a) mgr = mgr.reindex_axis(axe, axis=bm_axis) if copy: mgr = mgr.copy() if dtype is not None: if isinstance(mgr, BlockManager) and len(mgr.blocks) == 1 and (mgr.blocks[0].values.dtype == dtype): pass else: mgr = mgr.astype(dtype=dtype) return mgr @final @classmethod def _from_mgr(cls, mgr: Manager, axes: list[Index]) -> Self: obj = cls.__new__(cls) NDFrame.__init__(obj, mgr) return obj @property def attrs(self) -> dict[Hashable, Any]: return self._attrs @attrs.setter def attrs(self, value: Mapping[Hashable, Any]) -> None: self._attrs = dict(value) @final @property def flags(self) -> Flags: return self._flags @final def set_flags(self, *, copy: bool | lib.NoDefault=lib.no_default, allows_duplicate_labels: bool | None=None) -> Self: self._check_copy_deprecation(copy) df = self.copy(deep=False) if allows_duplicate_labels is not None: df.flags['allows_duplicate_labels'] = allows_duplicate_labels return df @final @classmethod def _validate_dtype(cls, dtype) -> DtypeObj | None: if dtype is not None: dtype = pandas_dtype(dtype) if dtype.kind == 'V': raise NotImplementedError(f'compound dtypes are not implemented in the {cls.__name__} constructor') return dtype @property def _constructor(self) -> Callable[..., Self]: raise AbstractMethodError(self) _AXIS_ORDERS: list[Literal['index', 'columns']] _AXIS_TO_AXIS_NUMBER: dict[Axis, AxisInt] = {0: 0, 'index': 0, 'rows': 0} _info_axis_number: int _info_axis_name: Literal['index', 'columns'] _AXIS_LEN: int @final def _construct_axes_dict(self, axes: Sequence[Axis] | None=None, **kwargs: AxisInt) -> dict: d = {a: self._get_axis(a) for a in axes or self._AXIS_ORDERS} d.update(kwargs) return d @final @classmethod def _get_axis_number(cls, axis: Axis) -> AxisInt: try: return cls._AXIS_TO_AXIS_NUMBER[axis] except KeyError as err: raise ValueError(f'No axis named {axis} for object type {cls.__name__}') from err @final @classmethod def _get_axis_name(cls, axis: Axis) -> Literal['index', 'columns']: axis_number = cls._get_axis_number(axis) return cls._AXIS_ORDERS[axis_number] @final def _get_axis(self, axis: Axis) -> Index: axis_number = self._get_axis_number(axis) assert axis_number in {0, 1} return self.index if axis_number == 0 else self.columns @final @classmethod def _get_block_manager_axis(cls, axis: Axis) -> AxisInt: axis = cls._get_axis_number(axis) ndim = cls._AXIS_LEN if ndim == 2: return 1 - axis return axis @final def _get_axis_resolvers(self, axis: str) -> dict[str, Series | MultiIndex]: axis_index = getattr(self, axis) d = {} prefix = axis[0] for (i, name) in enumerate(axis_index.names): if name is not None: key = level = name else: key = f'{prefix}level_{i}' level = i level_values = axis_index.get_level_values(level) s = level_values.to_series() s.index = axis_index d[key] = s if isinstance(axis_index, MultiIndex): dindex = axis_index else: dindex = axis_index.to_series() d[axis] = dindex return d @final def _get_index_resolvers(self) -> dict[Hashable, Series | MultiIndex]: from pandas.core.computation.parsing import clean_column_name d: dict[str, Series | MultiIndex] = {} for axis_name in self._AXIS_ORDERS: d.update(self._get_axis_resolvers(axis_name)) return {clean_column_name(k): v for (k, v) in d.items() if not isinstance(k, int)} @final def _get_cleaned_column_resolvers(self) -> dict[Hashable, Series]: from pandas.core.computation.parsing import clean_column_name from pandas.core.series import Series if isinstance(self, ABCSeries): return {clean_column_name(self.name): self} dtypes = self.dtypes return {clean_column_name(k): Series(v, copy=False, index=self.index, name=k, dtype=dtypes[k]).__finalize__(self) for (k, v) in zip(self.columns, self._iter_column_arrays()) if not isinstance(k, int)} @final @property def _info_axis(self) -> Index: return getattr(self, self._info_axis_name) @property def shape(self) -> tuple[int, ...]: return tuple((len(self._get_axis(a)) for a in self._AXIS_ORDERS)) @property def axes(self) -> list[Index]: return [self._get_axis(a) for a in self._AXIS_ORDERS] @final @property def ndim(self) -> int: return self._mgr.ndim @final @property def size(self) -> int: return int(np.prod(self.shape)) def set_axis(self, labels, *, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default) -> Self: self._check_copy_deprecation(copy) return self._set_axis_nocheck(labels, axis, inplace=False) @overload def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[False]) -> Self: ... @overload def _set_axis_nocheck(self, labels, axis: Axis, inplace: Literal[True]) -> None: ... @overload def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None: ... @final def _set_axis_nocheck(self, labels, axis: Axis, inplace: bool) -> Self | None: if inplace: setattr(self, self._get_axis_name(axis), labels) return None obj = self.copy(deep=False) setattr(obj, obj._get_axis_name(axis), labels) return obj @final def _set_axis(self, axis: AxisInt, labels: AnyArrayLike | list) -> None: labels = ensure_index(labels) self._mgr.set_axis(axis, labels) @final @doc(klass=_shared_doc_kwargs['klass']) def droplevel(self, level: IndexLabel, axis: Axis=0) -> Self: labels = self._get_axis(axis) new_labels = labels.droplevel(level) return self.set_axis(new_labels, axis=axis) def pop(self, item: Hashable) -> Series | Any: result = self[item] del self[item] return result @final def squeeze(self, axis: Axis | None=None): axes = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) result = self.iloc[tuple((0 if i in axes and len(a) == 1 else slice(None) for (i, a) in enumerate(self.axes)))] if isinstance(result, NDFrame): result = result.__finalize__(self, method='squeeze') return result @overload def _rename(self, mapper: Renamer | None=..., *, index: Renamer | None=..., columns: Renamer | None=..., axis: Axis | None=..., inplace: Literal[False]=..., level: Level | None=..., errors: str=...) -> Self: ... @overload def _rename(self, mapper: Renamer | None=..., *, index: Renamer | None=..., columns: Renamer | None=..., axis: Axis | None=..., inplace: Literal[True], level: Level | None=..., errors: str=...) -> None: ... @overload def _rename(self, mapper: Renamer | None=..., *, index: Renamer | None=..., columns: Renamer | None=..., axis: Axis | None=..., inplace: bool, level: Level | None=..., errors: str=...) -> Self | None: ... @final def _rename(self, mapper: Renamer | None=None, *, index: Renamer | None=None, columns: Renamer | None=None, axis: Axis | None=None, inplace: bool=False, level: Level | None=None, errors: str='ignore') -> Self | None: if mapper is None and index is None and (columns is None): raise TypeError('must pass an index to rename') if index is not None or columns is not None: if axis is not None: raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'") if mapper is not None: raise TypeError("Cannot specify both 'mapper' and any of 'index' or 'columns'") elif axis and self._get_axis_number(axis) == 1: columns = mapper else: index = mapper self._check_inplace_and_allows_duplicate_labels(inplace) result = self if inplace else self.copy(deep=False) for (axis_no, replacements) in enumerate((index, columns)): if replacements is None: continue ax = self._get_axis(axis_no) f = common.get_rename_function(replacements) if level is not None: level = ax._get_level_number(level) if not callable(replacements): if ax._is_multi and level is not None: indexer = ax.get_level_values(level).get_indexer_for(replacements) else: indexer = ax.get_indexer_for(replacements) if errors == 'raise' and len(indexer[indexer == -1]): missing_labels = [label for (index, label) in enumerate(replacements) if indexer[index] == -1] raise KeyError(f'{missing_labels} not found in axis') new_index = ax._transform_index(f, level=level) result._set_axis_nocheck(new_index, axis=axis_no, inplace=True) if inplace: self._update_inplace(result) return None else: return result.__finalize__(self, method='rename') @overload def rename_axis(self, mapper: IndexLabel | lib.NoDefault=..., *, index=..., columns=..., axis: Axis=..., copy: bool | lib.NoDefault=lib.no_default, inplace: Literal[False]=...) -> Self: ... @overload def rename_axis(self, mapper: IndexLabel | lib.NoDefault=..., *, index=..., columns=..., axis: Axis=..., copy: bool | lib.NoDefault=lib.no_default, inplace: Literal[True]) -> None: ... @overload def rename_axis(self, mapper: IndexLabel | lib.NoDefault=..., *, index=..., columns=..., axis: Axis=..., copy: bool | lib.NoDefault=lib.no_default, inplace: bool=...) -> Self | None: ... def rename_axis(self, mapper: IndexLabel | lib.NoDefault=lib.no_default, *, index=lib.no_default, columns=lib.no_default, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default, inplace: bool=False) -> Self | None: self._check_copy_deprecation(copy) axes = {'index': index, 'columns': columns} if axis is not None: axis = self._get_axis_number(axis) inplace = validate_bool_kwarg(inplace, 'inplace') if mapper is not lib.no_default: non_mapper = is_scalar(mapper) or (is_list_like(mapper) and (not is_dict_like(mapper))) if non_mapper: return self._set_axis_name(mapper, axis=axis, inplace=inplace) else: raise ValueError('Use `.rename` to alter labels with a mapper.') else: result = self if inplace else self.copy(deep=False) for axis in range(self._AXIS_LEN): v = axes.get(self._get_axis_name(axis)) if v is lib.no_default: continue non_mapper = is_scalar(v) or (is_list_like(v) and (not is_dict_like(v))) if non_mapper: newnames = v else: f = common.get_rename_function(v) curnames = self._get_axis(axis).names newnames = [f(name) for name in curnames] result._set_axis_name(newnames, axis=axis, inplace=True) if not inplace: return result return None @overload def _set_axis_name(self, name, axis: Axis=..., *, inplace: Literal[False]=...) -> Self: ... @overload def _set_axis_name(self, name, axis: Axis=..., *, inplace: Literal[True]) -> None: ... @overload def _set_axis_name(self, name, axis: Axis=..., *, inplace: bool) -> Self | None: ... @final def _set_axis_name(self, name, axis: Axis=0, *, inplace: bool=False) -> Self | None: axis = self._get_axis_number(axis) idx = self._get_axis(axis).set_names(name) inplace = validate_bool_kwarg(inplace, 'inplace') renamed = self if inplace else self.copy(deep=False) if axis == 0: renamed.index = idx else: renamed.columns = idx if not inplace: return renamed return None @final def _indexed_same(self, other) -> bool: return all((self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS)) @final def equals(self, other: object) -> bool: if not (isinstance(other, type(self)) or isinstance(self, type(other))): return False other = cast(NDFrame, other) return self._mgr.equals(other._mgr) @final def __neg__(self) -> Self: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return operator.inv(values) else: return operator.neg(values) new_data = self._mgr.apply(blk_func) res = self._constructor_from_mgr(new_data, axes=new_data.axes) return res.__finalize__(self, method='__neg__') @final def __pos__(self) -> Self: def blk_func(values: ArrayLike): if is_bool_dtype(values.dtype): return values.copy() else: return operator.pos(values) new_data = self._mgr.apply(blk_func) res = self._constructor_from_mgr(new_data, axes=new_data.axes) return res.__finalize__(self, method='__pos__') @final def __invert__(self) -> Self: if not self.size: return self.copy(deep=False) new_data = self._mgr.apply(operator.invert) res = self._constructor_from_mgr(new_data, axes=new_data.axes) return res.__finalize__(self, method='__invert__') @final def __bool__(self) -> NoReturn: raise ValueError(f'The truth value of a {type(self).__name__} is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().') @final def abs(self) -> Self: res_mgr = self._mgr.apply(np.abs) return self._constructor_from_mgr(res_mgr, axes=res_mgr.axes).__finalize__(self, name='abs') @final def __abs__(self) -> Self: return self.abs() @final def __round__(self, decimals: int=0) -> Self: return self.round(decimals).__finalize__(self, method='__round__') @final def _is_level_reference(self, key: Level, axis: Axis=0) -> bool: axis_int = self._get_axis_number(axis) return key is not None and is_hashable(key) and (key in self.axes[axis_int].names) and (not self._is_label_reference(key, axis=axis_int)) @final def _is_label_reference(self, key: Level, axis: Axis=0) -> bool: axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) return key is not None and is_hashable(key) and any((key in self.axes[ax] for ax in other_axes)) @final def _is_label_or_level_reference(self, key: Level, axis: AxisInt=0) -> bool: return self._is_level_reference(key, axis=axis) or self._is_label_reference(key, axis=axis) @final def _check_label_or_level_ambiguity(self, key: Level, axis: Axis=0) -> None: axis_int = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis_int) if key is not None and is_hashable(key) and (key in self.axes[axis_int].names) and any((key in self.axes[ax] for ax in other_axes)): (level_article, level_type) = ('an', 'index') if axis_int == 0 else ('a', 'column') (label_article, label_type) = ('a', 'column') if axis_int == 0 else ('an', 'index') msg = f"'{key}' is both {level_article} {level_type} level and {label_article} {label_type} label, which is ambiguous." raise ValueError(msg) @final def _get_label_or_level_values(self, key: Level, axis: AxisInt=0) -> ArrayLike: axis = self._get_axis_number(axis) first_other_axes = next((ax for ax in range(self._AXIS_LEN) if ax != axis), None) if self._is_label_reference(key, axis=axis): self._check_label_or_level_ambiguity(key, axis=axis) if first_other_axes is None: raise ValueError('axis matched all axes') values = self.xs(key, axis=first_other_axes)._values elif self._is_level_reference(key, axis=axis): values = self.axes[axis].get_level_values(key)._values else: raise KeyError(key) if values.ndim > 1: if first_other_axes is not None and isinstance(self._get_axis(first_other_axes), MultiIndex): multi_message = '\nFor a multi-index, the label must be a tuple with elements corresponding to each level.' else: multi_message = '' label_axis_name = 'column' if axis == 0 else 'index' raise ValueError(f"The {label_axis_name} label '{key}' is not unique.{multi_message}") return values @final def _drop_labels_or_levels(self, keys, axis: AxisInt=0): axis = self._get_axis_number(axis) keys = common.maybe_make_list(keys) invalid_keys = [k for k in keys if not self._is_label_or_level_reference(k, axis=axis)] if invalid_keys: raise ValueError(f'The following keys are not valid labels or levels for axis {axis}: {invalid_keys}') levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)] labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)] dropped = self.copy(deep=False) if axis == 0: if levels_to_drop: dropped.reset_index(levels_to_drop, drop=True, inplace=True) if labels_to_drop: dropped.drop(labels_to_drop, axis=1, inplace=True) else: if levels_to_drop: if isinstance(dropped.columns, MultiIndex): dropped.columns = dropped.columns.droplevel(levels_to_drop) else: dropped.columns = default_index(dropped.columns.size) if labels_to_drop: dropped.drop(labels_to_drop, axis=0, inplace=True) return dropped __hash__: ClassVar[None] def __iter__(self) -> Iterator: return iter(self._info_axis) def keys(self) -> Index: return self._info_axis def items(self): for h in self._info_axis: yield (h, self[h]) def __len__(self) -> int: return len(self._info_axis) @final def __contains__(self, key) -> bool: return key in self._info_axis @property def empty(self) -> bool: return any((len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)) __array_priority__: int = 1000 def __array__(self, dtype: npt.DTypeLike | None=None, copy: bool | None=None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if astype_is_view(values.dtype, arr.dtype) and self._mgr.is_single_block: if astype_is_view(self.dtypes.iloc[0], values.dtype) and astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr @final def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any): return arraylike.array_ufunc(self, ufunc, method, *inputs, **kwargs) @final def __getstate__(self) -> dict[str, Any]: meta = {k: getattr(self, k, None) for k in self._metadata} return {'_mgr': self._mgr, '_typ': self._typ, '_metadata': self._metadata, 'attrs': self.attrs, '_flags': {k: self.flags[k] for k in self.flags._keys}, **meta} @final def __setstate__(self, state) -> None: if isinstance(state, BlockManager): self._mgr = state elif isinstance(state, dict): if '_data' in state and '_mgr' not in state: state['_mgr'] = state.pop('_data') typ = state.get('_typ') if typ is not None: attrs = state.get('_attrs', {}) if attrs is None: attrs = {} object.__setattr__(self, '_attrs', attrs) flags = state.get('_flags', {'allows_duplicate_labels': True}) object.__setattr__(self, '_flags', Flags(self, **flags)) meta = set(self._internal_names + self._metadata) for k in meta: if k in state and k != '_flags': v = state[k] object.__setattr__(self, k, v) for (k, v) in state.items(): if k not in meta: object.__setattr__(self, k, v) else: raise NotImplementedError('Pre-0.12 pickles are no longer supported') elif len(state) == 2: raise NotImplementedError('Pre-0.12 pickles are no longer supported') def __repr__(self) -> str: prepr = f"[{','.join(map(pprint_thing, self))}]" return f'{type(self).__name__}({prepr})' @final def _repr_latex_(self): if config.get_option('styler.render.repr') == 'latex': return self.to_latex() else: return None @final def _repr_data_resource_(self): if config.get_option('display.html.table_schema'): data = self.head(config.get_option('display.max_rows')) as_json = data.to_json(orient='table') as_json = cast(str, as_json) return loads(as_json, object_pairs_hook=collections.OrderedDict) @final @doc(klass='object', storage_options=_shared_docs['storage_options'], storage_options_versionadded='1.2.0', encoding_parameter='', verbose_parameter='', extra_parameters=textwrap.dedent(' engine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n ')) def to_excel(self, excel_writer: FilePath | WriteExcelBuffer | ExcelWriter, *, sheet_name: str='Sheet1', na_rep: str='', float_format: str | None=None, columns: Sequence[Hashable] | None=None, header: Sequence[Hashable] | bool=True, index: bool=True, index_label: IndexLabel | None=None, startrow: int=0, startcol: int=0, engine: Literal['openpyxl', 'xlsxwriter'] | None=None, merge_cells: bool=True, inf_rep: str='inf', freeze_panes: tuple[int, int] | None=None, storage_options: StorageOptions | None=None, engine_kwargs: dict[str, Any] | None=None) -> None: if engine_kwargs is None: engine_kwargs = {} df = self if isinstance(self, ABCDataFrame) else self.to_frame() from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter(df, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep) formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options, engine_kwargs=engine_kwargs) @final @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'path_or_buf') def to_json(self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None=None, *, orient: Literal['split', 'records', 'index', 'table', 'columns', 'values'] | None=None, date_format: str | None=None, double_precision: int=10, force_ascii: bool=True, date_unit: TimeUnit='ms', default_handler: Callable[[Any], JSONSerializable] | None=None, lines: bool=False, compression: CompressionOptions='infer', index: bool | None=None, indent: int | None=None, storage_options: StorageOptions | None=None, mode: Literal['a', 'w']='w') -> str | None: from pandas.io import json if date_format is None and orient == 'table': date_format = 'iso' elif date_format is None: date_format = 'epoch' dtypes = self.dtypes if self.ndim == 2 else [self.dtype] if any((lib.is_np_dtype(dtype, 'mM') for dtype in dtypes)): warnings.warn("The default 'epoch' date format is deprecated and will be removed in a future version, please use 'iso' date format instead.", FutureWarning, stacklevel=find_stack_level()) elif date_format == 'epoch': warnings.warn("'epoch' date format is deprecated and will be removed in a future version, please use 'iso' date format instead.", FutureWarning, stacklevel=find_stack_level()) config.is_nonnegative_int(indent) indent = indent or 0 return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient, date_format=date_format, double_precision=double_precision, force_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, lines=lines, compression=compression, index=index, indent=indent, storage_options=storage_options, mode=mode) @final def to_hdf(self, path_or_buf: FilePath | HDFStore, *, key: str, mode: Literal['a', 'w', 'r+']='a', complevel: int | None=None, complib: Literal['zlib', 'lzo', 'bzip2', 'blosc'] | None=None, append: bool=False, format: Literal['fixed', 'table'] | None=None, index: bool=True, min_itemsize: int | dict[str, int] | None=None, nan_rep=None, dropna: bool | None=None, data_columns: Literal[True] | list[str] | None=None, errors: OpenFileErrors='strict', encoding: str='UTF-8') -> None: from pandas.io import pytables pytables.to_hdf(path_or_buf, key, self, mode=mode, complevel=complevel, complib=complib, append=append, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding) @final def to_sql(self, name: str, con, *, schema: str | None=None, if_exists: Literal['fail', 'replace', 'append']='fail', index: bool=True, index_label: IndexLabel | None=None, chunksize: int | None=None, dtype: DtypeArg | None=None, method: Literal['multi'] | Callable | None=None) -> int | None: from pandas.io import sql return sql.to_sql(self, name, con, schema=schema, if_exists=if_exists, index=index, index_label=index_label, chunksize=chunksize, dtype=dtype, method=method) @final @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'path') def to_pickle(self, path: FilePath | WriteBuffer[bytes], *, compression: CompressionOptions='infer', protocol: int=pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions | None=None) -> None: from pandas.io.pickle import to_pickle to_pickle(self, path, compression=compression, protocol=protocol, storage_options=storage_options) @final def to_clipboard(self, *, excel: bool=True, sep: str | None=None, **kwargs) -> None: from pandas.io import clipboards clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) @final def to_xarray(self): xarray = import_optional_dependency('xarray') if self.ndim == 1: return xarray.DataArray.from_series(self) else: return xarray.Dataset.from_dataframe(self) @overload def to_latex(self, buf: None=..., *, columns: Sequence[Hashable] | None=..., header: bool | SequenceNotStr[str]=..., index: bool=..., na_rep: str=..., formatters: FormattersType | None=..., float_format: FloatFormatType | None=..., sparsify: bool | None=..., index_names: bool=..., bold_rows: bool=..., column_format: str | None=..., longtable: bool | None=..., escape: bool | None=..., encoding: str | None=..., decimal: str=..., multicolumn: bool | None=..., multicolumn_format: str | None=..., multirow: bool | None=..., caption: str | tuple[str, str] | None=..., label: str | None=..., position: str | None=...) -> str: ... @overload def to_latex(self, buf: FilePath | WriteBuffer[str], *, columns: Sequence[Hashable] | None=..., header: bool | SequenceNotStr[str]=..., index: bool=..., na_rep: str=..., formatters: FormattersType | None=..., float_format: FloatFormatType | None=..., sparsify: bool | None=..., index_names: bool=..., bold_rows: bool=..., column_format: str | None=..., longtable: bool | None=..., escape: bool | None=..., encoding: str | None=..., decimal: str=..., multicolumn: bool | None=..., multicolumn_format: str | None=..., multirow: bool | None=..., caption: str | tuple[str, str] | None=..., label: str | None=..., position: str | None=...) -> None: ... @final def to_latex(self, buf: FilePath | WriteBuffer[str] | None=None, *, columns: Sequence[Hashable] | None=None, header: bool | SequenceNotStr[str]=True, index: bool=True, na_rep: str='NaN', formatters: FormattersType | None=None, float_format: FloatFormatType | None=None, sparsify: bool | None=None, index_names: bool=True, bold_rows: bool=False, column_format: str | None=None, longtable: bool | None=None, escape: bool | None=None, encoding: str | None=None, decimal: str='.', multicolumn: bool | None=None, multicolumn_format: str | None=None, multirow: bool | None=None, caption: str | tuple[str, str] | None=None, label: str | None=None, position: str | None=None) -> str | None: if self.ndim == 1: self = self.to_frame() if longtable is None: longtable = config.get_option('styler.latex.environment') == 'longtable' if escape is None: escape = config.get_option('styler.format.escape') == 'latex' if multicolumn is None: multicolumn = config.get_option('styler.sparse.columns') if multicolumn_format is None: multicolumn_format = config.get_option('styler.latex.multicol_align') if multirow is None: multirow = config.get_option('styler.sparse.index') if column_format is not None and (not isinstance(column_format, str)): raise ValueError('`column_format` must be str or unicode') length = len(self.columns) if columns is None else len(columns) if isinstance(header, (list, tuple)) and len(header) != length: raise ValueError(f'Writing {length} cols but got {len(header)} aliases') base_format_ = {'na_rep': na_rep, 'escape': 'latex' if escape else None, 'decimal': decimal} index_format_: dict[str, Any] = {'axis': 0, **base_format_} column_format_: dict[str, Any] = {'axis': 1, **base_format_} if isinstance(float_format, str): float_format_: Callable | None = lambda x: float_format % x else: float_format_ = float_format def _wrap(x, alt_format_): if isinstance(x, (float, complex)) and float_format_ is not None: return float_format_(x) else: return alt_format_(x) formatters_: list | tuple | dict | Callable | None = None if isinstance(formatters, list): formatters_ = {c: partial(_wrap, alt_format_=formatters[i]) for (i, c) in enumerate(self.columns)} elif isinstance(formatters, dict): index_formatter = formatters.pop('__index__', None) column_formatter = formatters.pop('__columns__', None) if index_formatter is not None: index_format_.update({'formatter': index_formatter}) if column_formatter is not None: column_format_.update({'formatter': column_formatter}) formatters_ = formatters float_columns = self.select_dtypes(include='float').columns for col in float_columns: if col not in formatters.keys(): formatters_.update({col: float_format_}) elif formatters is None and float_format is not None: formatters_ = partial(_wrap, alt_format_=lambda v: v) format_index_ = [index_format_, column_format_] hide_: list[dict] = [] relabel_index_: list[dict] = [] if columns: hide_.append({'subset': [c for c in self.columns if c not in columns], 'axis': 'columns'}) if header is False: hide_.append({'axis': 'columns'}) elif isinstance(header, (list, tuple)): relabel_index_.append({'labels': header, 'axis': 'columns'}) format_index_ = [index_format_] if index is False: hide_.append({'axis': 'index'}) if index_names is False: hide_.append({'names': True, 'axis': 'index'}) render_kwargs_ = {'hrules': True, 'sparse_index': sparsify, 'sparse_columns': sparsify, 'environment': 'longtable' if longtable else None, 'multicol_align': multicolumn_format if multicolumn else f'naive-{multicolumn_format}', 'multirow_align': 't' if multirow else 'naive', 'encoding': encoding, 'caption': caption, 'label': label, 'position': position, 'column_format': column_format, 'clines': 'skip-last;data' if multirow and isinstance(self.index, MultiIndex) else None, 'bold_rows': bold_rows} return self._to_latex_via_styler(buf, hide=hide_, relabel_index=relabel_index_, format={'formatter': formatters_, **base_format_}, format_index=format_index_, render_kwargs=render_kwargs_) @final def _to_latex_via_styler(self, buf=None, *, hide: dict | list[dict] | None=None, relabel_index: dict | list[dict] | None=None, format: dict | list[dict] | None=None, format_index: dict | list[dict] | None=None, render_kwargs: dict | None=None): from pandas.io.formats.style import Styler self = cast('DataFrame', self) styler = Styler(self, uuid='') for kw_name in ['hide', 'relabel_index', 'format', 'format_index']: kw = vars()[kw_name] if isinstance(kw, dict): getattr(styler, kw_name)(**kw) elif isinstance(kw, list): for sub_kw in kw: getattr(styler, kw_name)(**sub_kw) render_kwargs = {} if render_kwargs is None else render_kwargs if render_kwargs.pop('bold_rows'): styler.map_index(lambda v: 'textbf:--rwrap;') return styler.to_latex(buf=buf, **render_kwargs) @overload def to_csv(self, path_or_buf: None=..., *, sep: str=..., na_rep: str=..., float_format: str | Callable | None=..., columns: Sequence[Hashable] | None=..., header: bool | list[str]=..., index: bool=..., index_label: IndexLabel | None=..., mode: str=..., encoding: str | None=..., compression: CompressionOptions=..., quoting: int | None=..., quotechar: str=..., lineterminator: str | None=..., chunksize: int | None=..., date_format: str | None=..., doublequote: bool=..., escapechar: str | None=..., decimal: str=..., errors: OpenFileErrors=..., storage_options: StorageOptions=...) -> str: ... @overload def to_csv(self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str], *, sep: str=..., na_rep: str=..., float_format: str | Callable | None=..., columns: Sequence[Hashable] | None=..., header: bool | list[str]=..., index: bool=..., index_label: IndexLabel | None=..., mode: str=..., encoding: str | None=..., compression: CompressionOptions=..., quoting: int | None=..., quotechar: str=..., lineterminator: str | None=..., chunksize: int | None=..., date_format: str | None=..., doublequote: bool=..., escapechar: str | None=..., decimal: str=..., errors: OpenFileErrors=..., storage_options: StorageOptions=...) -> None: ... @final @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'path_or_buf') def to_csv(self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None=None, *, sep: str=',', na_rep: str='', float_format: str | Callable | None=None, columns: Sequence[Hashable] | None=None, header: bool | list[str]=True, index: bool=True, index_label: IndexLabel | None=None, mode: str='w', encoding: str | None=None, compression: CompressionOptions='infer', quoting: int | None=None, quotechar: str='"', lineterminator: str | None=None, chunksize: int | None=None, date_format: str | None=None, doublequote: bool=True, escapechar: str | None=None, decimal: str='.', errors: OpenFileErrors='strict', storage_options: StorageOptions | None=None) -> str | None: df = self if isinstance(self, ABCDataFrame) else self.to_frame() formatter = DataFrameFormatter(frame=df, header=header, index=index, na_rep=na_rep, float_format=float_format, decimal=decimal) return DataFrameRenderer(formatter).to_csv(path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, columns=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options) @final def take(self, indices, axis: Axis=0, **kwargs) -> Self: nv.validate_take((), kwargs) if isinstance(indices, slice): raise TypeError(f'{type(self).__name__}.take requires a sequence of integers, not slice.') indices = np.asarray(indices, dtype=np.intp) if axis == 0 and indices.ndim == 1 and is_range_indexer(indices, len(self)): return self.copy(deep=False) new_data = self._mgr.take(indices, axis=self._get_block_manager_axis(axis), verify=True) return self._constructor_from_mgr(new_data, axes=new_data.axes).__finalize__(self, method='take') @final def xs(self, key: IndexLabel, axis: Axis=0, level: IndexLabel | None=None, drop_level: bool=True) -> Self: axis = self._get_axis_number(axis) labels = self._get_axis(axis) if isinstance(key, list): raise TypeError('list keys are not supported in xs, pass a tuple instead') if level is not None: if not isinstance(labels, MultiIndex): raise TypeError('Index must be a MultiIndex') (loc, new_ax) = labels.get_loc_level(key, level=level, drop_level=drop_level) _indexer = [slice(None)] * self.ndim _indexer[axis] = loc indexer = tuple(_indexer) result = self.iloc[indexer] setattr(result, result._get_axis_name(axis), new_ax) return result if axis == 1: if drop_level: return self[key] index = self.columns else: index = self.index if isinstance(index, MultiIndex): (loc, new_index) = index._get_loc_level(key, level=0) if not drop_level: if lib.is_integer(loc): new_index = index[loc:loc + 1] else: new_index = index[loc] else: loc = index.get_loc(key) if isinstance(loc, np.ndarray): if loc.dtype == np.bool_: (inds,) = loc.nonzero() return self.take(inds, axis=axis) else: return self.take(loc, axis=axis) if not is_scalar(loc): new_index = index[loc] if is_scalar(loc) and axis == 0: if self.ndim == 1: return self._values[loc] new_mgr = self._mgr.fast_xs(loc) result = self._constructor_sliced_from_mgr(new_mgr, axes=new_mgr.axes) result._name = self.index[loc] result = result.__finalize__(self) elif is_scalar(loc): result = self.iloc[:, slice(loc, loc + 1)] elif axis == 1: result = self.iloc[:, loc] else: result = self.iloc[loc] result.index = new_index return result def __getitem__(self, item): raise AbstractMethodError(self) @final def _getitem_slice(self, key: slice) -> Self: slobj = self.index._convert_slice_indexer(key, kind='getitem') if isinstance(slobj, np.ndarray): indexer = lib.maybe_indices_to_slice(slobj.astype(np.intp), len(self)) if isinstance(indexer, np.ndarray): return self.take(indexer, axis=0) slobj = indexer return self._slice(slobj) def _slice(self, slobj: slice, axis: AxisInt=0) -> Self: assert isinstance(slobj, slice), type(slobj) axis = self._get_block_manager_axis(axis) new_mgr = self._mgr.get_slice(slobj, axis=axis) result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) result = result.__finalize__(self) return result @final def __delitem__(self, key) -> None: deleted = False maybe_shortcut = False if self.ndim == 2 and isinstance(self.columns, MultiIndex): try: maybe_shortcut = key not in self.columns._engine except TypeError: pass if maybe_shortcut: if not isinstance(key, tuple): key = (key,) for col in self.columns: if isinstance(col, tuple) and col[:len(key)] == key: del self[col] deleted = True if not deleted: loc = self.axes[-1].get_loc(key) self._mgr = self._mgr.idelete(loc) @final def _check_inplace_and_allows_duplicate_labels(self, inplace: bool) -> None: if inplace and (not self.flags.allows_duplicate_labels): raise ValueError("Cannot specify 'inplace=True' when 'self.flags.allows_duplicate_labels' is False.") @final def get(self, key, default=None): try: return self[key] except (KeyError, ValueError, IndexError): return default @staticmethod def _check_copy_deprecation(copy): if copy is not lib.no_default: warnings.warn('The copy keyword is deprecated and will be removed in a future version. Copy-on-Write is active in pandas since 3.0 which utilizes a lazy copy mechanism that defers copies until necessary. Use .copy() to make an eager copy if necessary.', DeprecationWarning, stacklevel=find_stack_level()) @deprecate_kwarg('method', None) @final def reindex_like(self, other, method: Literal['backfill', 'bfill', 'pad', 'ffill', 'nearest'] | None=None, copy: bool | lib.NoDefault=lib.no_default, limit: int | None=None, tolerance=None) -> Self: self._check_copy_deprecation(copy) d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method, limit=limit, tolerance=tolerance) return self.reindex(**d) @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level | None=..., inplace: Literal[True], errors: IgnoreRaise=...) -> None: ... @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level | None=..., inplace: Literal[False]=..., errors: IgnoreRaise=...) -> Self: ... @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level | None=..., inplace: bool=..., errors: IgnoreRaise=...) -> Self | None: ... def drop(self, labels: IndexLabel | ListLike=None, *, axis: Axis=0, index: IndexLabel | ListLike=None, columns: IndexLabel | ListLike=None, level: Level | None=None, inplace: bool=False, errors: IgnoreRaise='raise') -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if labels is not None: if index is not None or columns is not None: raise ValueError("Cannot specify both 'labels' and 'index'/'columns'") axis_name = self._get_axis_name(axis) axes = {axis_name: labels} elif index is not None or columns is not None: axes = {'index': index} if self.ndim == 2: axes['columns'] = columns else: raise ValueError("Need to specify at least one of 'labels', 'index' or 'columns'") obj = self for (axis, labels) in axes.items(): if labels is not None: obj = obj._drop_axis(labels, axis, level=level, errors=errors) if inplace: self._update_inplace(obj) return None else: return obj @final def _drop_axis(self, labels, axis, level=None, errors: IgnoreRaise='raise', only_slice: bool=False) -> Self: axis_num = self._get_axis_number(axis) axis = self._get_axis(axis) if axis.is_unique: if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') new_axis = axis.drop(labels, level=level, errors=errors) else: new_axis = axis.drop(labels, errors=errors) indexer = axis.get_indexer(new_axis) else: is_tuple_labels = is_nested_list_like(labels) or isinstance(labels, tuple) labels = ensure_object(common.index_labels_to_array(labels)) if level is not None: if not isinstance(axis, MultiIndex): raise AssertionError('axis must be a MultiIndex') mask = ~axis.get_level_values(level).isin(labels) if errors == 'raise' and mask.all(): raise KeyError(f'{labels} not found in axis') elif isinstance(axis, MultiIndex) and labels.dtype == 'object' and (not is_tuple_labels): mask = ~axis.get_level_values(0).isin(labels) else: mask = ~axis.isin(labels) labels_missing = (axis.get_indexer_for(labels) == -1).any() if errors == 'raise' and labels_missing: raise KeyError(f'{labels} not found in axis') if isinstance(mask.dtype, ExtensionDtype): mask = mask.to_numpy(dtype=bool) indexer = mask.nonzero()[0] new_axis = axis.take(indexer) bm_axis = self.ndim - axis_num - 1 new_mgr = self._mgr.reindex_indexer(new_axis, indexer, axis=bm_axis, allow_dups=True, only_slice=only_slice) result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) if self.ndim == 1: result._name = self.name return result.__finalize__(self) @final def _update_inplace(self, result) -> None: self._mgr = result._mgr @final def add_prefix(self, prefix: str, axis: Axis | None=None) -> Self: f = lambda x: f'{prefix}{x}' axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} return self._rename(**mapper) @final def add_suffix(self, suffix: str, axis: Axis | None=None) -> Self: f = lambda x: f'{x}{suffix}' axis_name = self._info_axis_name if axis is not None: axis_name = self._get_axis_name(axis) mapper = {axis_name: f} return self._rename(**mapper) @overload def sort_values(self, *, axis: Axis=..., ascending: bool | Sequence[bool]=..., inplace: Literal[False]=..., kind: SortKind=..., na_position: NaPosition=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> Self: ... @overload def sort_values(self, *, axis: Axis=..., ascending: bool | Sequence[bool]=..., inplace: Literal[True], kind: SortKind=..., na_position: NaPosition=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> None: ... @overload def sort_values(self, *, axis: Axis=..., ascending: bool | Sequence[bool]=..., inplace: bool=..., kind: SortKind=..., na_position: NaPosition=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> Self | None: ... def sort_values(self, *, axis: Axis=0, ascending: bool | Sequence[bool]=True, inplace: bool=False, kind: SortKind='quicksort', na_position: NaPosition='last', ignore_index: bool=False, key: ValueKeyFunc | None=None) -> Self | None: raise AbstractMethodError(self) @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: Literal[True], kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> None: ... @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: Literal[False]=..., kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> Self: ... @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: bool=..., kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> Self | None: ... def sort_index(self, *, axis: Axis=0, level: IndexLabel | None=None, ascending: bool | Sequence[bool]=True, inplace: bool=False, kind: SortKind='quicksort', na_position: NaPosition='last', sort_remaining: bool=True, ignore_index: bool=False, key: IndexKeyFunc | None=None) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') axis = self._get_axis_number(axis) ascending = validate_ascending(ascending) target = self._get_axis(axis) indexer = get_indexer_indexer(target, level, ascending, kind, na_position, sort_remaining, key) if indexer is None: if inplace: result = self else: result = self.copy(deep=False) if ignore_index: if axis == 1: result.columns = default_index(len(self.columns)) else: result.index = default_index(len(self)) if inplace: return None else: return result baxis = self._get_block_manager_axis(axis) new_data = self._mgr.take(indexer, axis=baxis, verify=False) if not ignore_index: new_axis = new_data.axes[baxis]._sort_levels_monotonic() else: new_axis = default_index(len(indexer)) new_data.set_axis(baxis, new_axis) result = self._constructor_from_mgr(new_data, axes=new_data.axes) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method='sort_index') @doc(klass=_shared_doc_kwargs['klass'], optional_reindex='') def reindex(self, labels=None, *, index=None, columns=None, axis: Axis | None=None, method: ReindexMethod | None=None, copy: bool | lib.NoDefault=lib.no_default, level: Level | None=None, fill_value: Scalar | None=np.nan, limit: int | None=None, tolerance=None) -> Self: self._check_copy_deprecation(copy) if index is not None and columns is not None and (labels is not None): raise TypeError("Cannot specify all of 'labels', 'index', 'columns'.") elif index is not None or columns is not None: if axis is not None: raise TypeError("Cannot specify both 'axis' and any of 'index' or 'columns'") if labels is not None: if index is not None: columns = labels else: index = labels elif axis and self._get_axis_number(axis) == 1: columns = labels else: index = labels axes: dict[Literal['index', 'columns'], Any] = {'index': index, 'columns': columns} method = clean_reindex_fill_method(method) if all((self._get_axis(axis_name).identical(ax) for (axis_name, ax) in axes.items() if ax is not None)): return self.copy(deep=False) if self._needs_reindex_multi(axes, method, level): return self._reindex_multi(axes, fill_value) return self._reindex_axes(axes, level, limit, tolerance, method, fill_value).__finalize__(self, method='reindex') @final def _reindex_axes(self, axes, level: Level | None, limit: int | None, tolerance, method, fill_value: Scalar | None) -> Self: obj = self for a in self._AXIS_ORDERS: labels = axes[a] if labels is None: continue ax = self._get_axis(a) (new_index, indexer) = ax.reindex(labels, level=level, limit=limit, tolerance=tolerance, method=method) axis = self._get_axis_number(a) obj = obj._reindex_with_indexers({axis: [new_index, indexer]}, fill_value=fill_value, allow_dups=False) return obj def _needs_reindex_multi(self, axes, method, level: Level | None) -> bool: return common.count_not_none(*axes.values()) == self._AXIS_LEN and method is None and (level is None) and self._can_fast_transpose def _reindex_multi(self, axes, fill_value): raise AbstractMethodError(self) @final def _reindex_with_indexers(self, reindexers, fill_value=None, allow_dups: bool=False) -> Self: new_data = self._mgr for axis in sorted(reindexers.keys()): (index, indexer) = reindexers[axis] baxis = self._get_block_manager_axis(axis) if index is None: continue index = ensure_index(index) if indexer is not None: indexer = ensure_platform_int(indexer) new_data = new_data.reindex_indexer(index, indexer, axis=baxis, fill_value=fill_value, allow_dups=allow_dups) if new_data is self._mgr: new_data = new_data.copy(deep=False) return self._constructor_from_mgr(new_data, axes=new_data.axes).__finalize__(self) def filter(self, items=None, like: str | None=None, regex: str | None=None, axis: Axis | None=None) -> Self: nkw = common.count_not_none(items, like, regex) if nkw > 1: raise TypeError('Keyword arguments `items`, `like`, or `regex` are mutually exclusive') if axis is None: axis = self._info_axis_name labels = self._get_axis(axis) if items is not None: name = self._get_axis_name(axis) items = Index(items).intersection(labels) if len(items) == 0: items = items.astype(labels.dtype) return self.reindex(**{name: items}) elif like: def f(x) -> bool: assert like is not None return like in ensure_str(x) values = labels.map(f) return self.loc(axis=axis)[values] elif regex: def f(x) -> bool: return matcher.search(ensure_str(x)) is not None matcher = re.compile(regex) values = labels.map(f) return self.loc(axis=axis)[values] else: raise TypeError('Must pass either `items`, `like`, or `regex`') @final def head(self, n: int=5) -> Self: return self.iloc[:n].copy() @final def tail(self, n: int=5) -> Self: if n == 0: return self.iloc[0:0].copy() return self.iloc[-n:].copy() @final def sample(self, n: int | None=None, frac: float | None=None, replace: bool=False, weights=None, random_state: RandomState | None=None, axis: Axis | None=None, ignore_index: bool=False) -> Self: if axis is None: axis = 0 axis = self._get_axis_number(axis) obj_len = self.shape[axis] rs = common.random_state(random_state) size = sample.process_sampling_size(n, frac, replace) if size is None: assert frac is not None size = round(frac * obj_len) if weights is not None: weights = sample.preprocess_weights(self, weights, axis) sampled_indices = sample.sample(obj_len, size, replace, weights, rs) result = self.take(sampled_indices, axis=axis) if ignore_index: result.index = default_index(len(result)) return result @overload def pipe(self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs) -> T: ... @overload def pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: ... @final @doc(klass=_shared_doc_kwargs['klass']) def pipe(self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: return common.pipe(self.copy(deep=False), func, *args, **kwargs) @final def __finalize__(self, other, method: str | None=None, **kwargs) -> Self: if isinstance(other, NDFrame): if other.attrs: self.attrs = deepcopy(other.attrs) self.flags.allows_duplicate_labels = other.flags.allows_duplicate_labels for name in set(self._metadata) & set(other._metadata): assert isinstance(name, str) object.__setattr__(self, name, getattr(other, name, None)) if method == 'concat': objs = other.objs if all((bool(obj.attrs) for obj in objs)): attrs = objs[0].attrs have_same_attrs = all((obj.attrs == attrs for obj in objs[1:])) if have_same_attrs: self.attrs = deepcopy(attrs) allows_duplicate_labels = all((x.flags.allows_duplicate_labels for x in objs)) self.flags.allows_duplicate_labels = allows_duplicate_labels return self @final def __getattr__(self, name: str): if name not in self._internal_names_set and name not in self._metadata and (name not in self._accessors) and self._info_axis._can_hold_identifiers_and_holds_name(name): return self[name] return object.__getattribute__(self, name) @final def __setattr__(self, name: str, value) -> None: try: object.__getattribute__(self, name) return object.__setattr__(self, name, value) except AttributeError: pass if name in self._internal_names_set: object.__setattr__(self, name, value) elif name in self._metadata: object.__setattr__(self, name, value) else: try: existing = getattr(self, name) if isinstance(existing, Index): object.__setattr__(self, name, value) elif name in self._info_axis: self[name] = value else: object.__setattr__(self, name, value) except (AttributeError, TypeError): if isinstance(self, ABCDataFrame) and is_list_like(value): warnings.warn("Pandas doesn't allow columns to be created via a new attribute name - see https://pandas.pydata.org/pandas-docs/stable/indexing.html#attribute-access", stacklevel=find_stack_level()) object.__setattr__(self, name, value) @final def _dir_additions(self) -> set[str]: additions = super()._dir_additions() if self._info_axis._can_hold_strings: additions.update(self._info_axis._dir_additions_for_owner) return additions @final def _consolidate_inplace(self) -> None: self._mgr = self._mgr.consolidate() @final def _consolidate(self): cons_data = self._mgr.consolidate() return self._constructor_from_mgr(cons_data, axes=cons_data.axes).__finalize__(self) @final @property def _is_mixed_type(self) -> bool: if self._mgr.is_single_block: return False if self._mgr.any_extension_types: return True return self.dtypes.nunique() > 1 @final def _get_numeric_data(self) -> Self: new_mgr = self._mgr.get_numeric_data() return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) @final def _get_bool_data(self): new_mgr = self._mgr.get_bool_data() return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) @property def values(self): raise AbstractMethodError(self) @property def _values(self) -> ArrayLike: raise AbstractMethodError(self) @property def dtypes(self): data = self._mgr.get_dtypes() return self._constructor_sliced(data, index=self._info_axis, dtype=np.object_) @final def astype(self, dtype, copy: bool | lib.NoDefault=lib.no_default, errors: IgnoreRaise='raise') -> Self: self._check_copy_deprecation(copy) if is_dict_like(dtype): if self.ndim == 1: if len(dtype) > 1 or self.name not in dtype: raise KeyError('Only the Series name can be used for the key in Series dtype mappings.') new_type = dtype[self.name] return self.astype(new_type, errors=errors) from pandas import Series dtype_ser = Series(dtype, dtype=object) for col_name in dtype_ser.index: if col_name not in self: raise KeyError(f"Only a column name can be used for the key in a dtype mappings argument. '{col_name}' not found in columns.") dtype_ser = dtype_ser.reindex(self.columns, fill_value=None) results = [] for (i, (col_name, col)) in enumerate(self.items()): cdt = dtype_ser.iat[i] if isna(cdt): res_col = col.copy(deep=False) else: try: res_col = col.astype(dtype=cdt, errors=errors) except ValueError as ex: ex.args = (f"{ex}: Error while type casting for column '{col_name}'",) raise results.append(res_col) elif is_extension_array_dtype(dtype) and self.ndim > 1: dtype = pandas_dtype(dtype) if isinstance(dtype, ExtensionDtype) and all((block.values.dtype == dtype for block in self._mgr.blocks)): return self.copy(deep=False) results = [ser.astype(dtype, errors=errors) for (_, ser) in self.items()] else: new_data = self._mgr.astype(dtype=dtype, errors=errors) res = self._constructor_from_mgr(new_data, axes=new_data.axes) return res.__finalize__(self, method='astype') if not results: return self.copy(deep=False) result = concat(results, axis=1) result = self._constructor(result) result.columns = self.columns result = result.__finalize__(self, method='astype') return cast(Self, result) @final def copy(self, deep: bool=True) -> Self: data = self._mgr.copy(deep=deep) return self._constructor_from_mgr(data, axes=data.axes).__finalize__(self, method='copy') @final def __copy__(self, deep: bool=True) -> Self: return self.copy(deep=deep) @final def __deepcopy__(self, memo=None) -> Self: return self.copy(deep=True) @final def infer_objects(self, copy: bool | lib.NoDefault=lib.no_default) -> Self: self._check_copy_deprecation(copy) new_mgr = self._mgr.convert() res = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) return res.__finalize__(self, method='infer_objects') @final def convert_dtypes(self, infer_objects: bool=True, convert_string: bool=True, convert_integer: bool=True, convert_boolean: bool=True, convert_floating: bool=True, dtype_backend: DtypeBackend='numpy_nullable') -> Self: check_dtype_backend(dtype_backend) new_mgr = self._mgr.convert_dtypes(infer_objects=infer_objects, convert_string=convert_string, convert_integer=convert_integer, convert_boolean=convert_boolean, convert_floating=convert_floating, dtype_backend=dtype_backend) res = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) return res.__finalize__(self, method='convert_dtypes') @final def _pad_or_backfill(self, method: Literal['ffill', 'bfill', 'pad', 'backfill'], *, axis: None | Axis=None, inplace: bool=False, limit: None | int=None, limit_area: Literal['inside', 'outside'] | None=None): if axis is None: axis = 0 axis = self._get_axis_number(axis) method = clean_fill_method(method) if axis == 1: if not self._mgr.is_single_block and inplace: raise NotImplementedError result = self.T._pad_or_backfill(method=method, limit=limit, limit_area=limit_area).T return result new_mgr = self._mgr.pad_or_backfill(method=method, limit=limit, limit_area=limit_area, inplace=inplace) result = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method='fillna') @overload def fillna(self, value: Hashable | Mapping | Series | DataFrame, *, axis: Axis | None=..., inplace: Literal[False]=..., limit: int | None=...) -> Self: ... @overload def fillna(self, value: Hashable | Mapping | Series | DataFrame, *, axis: Axis | None=..., inplace: Literal[True], limit: int | None=...) -> None: ... @overload def fillna(self, value: Hashable | Mapping | Series | DataFrame, *, axis: Axis | None=..., inplace: bool=..., limit: int | None=...) -> Self | None: ... @final @doc(klass=_shared_doc_kwargs['klass'], axes_single_arg=_shared_doc_kwargs['axes_single_arg']) def fillna(self, value: Hashable | Mapping | Series | DataFrame, *, axis: Axis | None=None, inplace: bool=False, limit: int | None=None) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) if isinstance(value, (list, tuple)): raise TypeError(f'"value" parameter must be a scalar or dict, but you passed a "{type(value).__name__}"') if axis is None: axis = 0 axis = self._get_axis_number(axis) if self.ndim == 1: if isinstance(value, (dict, ABCSeries)): if not len(value): if inplace: return None return self.copy(deep=False) from pandas import Series value = Series(value) value = value.reindex(self.index) value = value._values elif not is_list_like(value): pass else: raise TypeError(f'"value" parameter must be a scalar, dict or Series, but you passed a "{type(value).__name__}"') new_data = self._mgr.fillna(value=value, limit=limit, inplace=inplace) elif isinstance(value, (dict, ABCSeries)): if axis == 1: raise NotImplementedError('Currently only can fill with dict/Series column by column') result = self if inplace else self.copy(deep=False) for (k, v) in value.items(): if k not in result: continue res_k = result[k].fillna(v, limit=limit) if not inplace: result[k] = res_k elif isinstance(res_k, ABCSeries): if res_k.dtype == result[k].dtype: result.loc[:, k] = res_k else: result[k] = res_k else: locs = result.columns.get_loc(k) if isinstance(locs, slice): locs = range(self.shape[1])[locs] elif isinstance(locs, np.ndarray) and locs.dtype.kind == 'b': locs = locs.nonzero()[0] elif not (isinstance(locs, np.ndarray) and locs.dtype.kind == 'i'): raise NotImplementedError('Unexpected get_loc result, please report a bug at https://github.com/pandas-dev/pandas') for (i, loc) in enumerate(locs): res_loc = res_k.iloc[:, i] target = self.iloc[:, loc] if res_loc.dtype == target.dtype: result.iloc[:, loc] = res_loc else: result.isetitem(loc, res_loc) if inplace: return self._update_inplace(result) else: return result elif not is_list_like(value): if axis == 1: result = self.T.fillna(value=value, limit=limit).T new_data = result._mgr else: new_data = self._mgr.fillna(value=value, limit=limit, inplace=inplace) elif isinstance(value, ABCDataFrame) and self.ndim == 2: new_data = self.where(self.notna(), value)._mgr else: raise ValueError(f'invalid fill value with a {type(value)}') result = self._constructor_from_mgr(new_data, axes=new_data.axes) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method='fillna') @overload def ffill(self, *, axis: None | Axis=..., inplace: Literal[False]=..., limit: None | int=..., limit_area: Literal['inside', 'outside'] | None=...) -> Self: ... @overload def ffill(self, *, axis: None | Axis=..., inplace: Literal[True], limit: None | int=..., limit_area: Literal['inside', 'outside'] | None=...) -> None: ... @overload def ffill(self, *, axis: None | Axis=..., inplace: bool=..., limit: None | int=..., limit_area: Literal['inside', 'outside'] | None=...) -> Self | None: ... @final @doc(klass=_shared_doc_kwargs['klass'], axes_single_arg=_shared_doc_kwargs['axes_single_arg']) def ffill(self, *, axis: None | Axis=None, inplace: bool=False, limit: None | int=None, limit_area: Literal['inside', 'outside'] | None=None) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) return self._pad_or_backfill('ffill', axis=axis, inplace=inplace, limit=limit, limit_area=limit_area) @overload def bfill(self, *, axis: None | Axis=..., inplace: Literal[False]=..., limit: None | int=..., limit_area: Literal['inside', 'outside'] | None=...) -> Self: ... @overload def bfill(self, *, axis: None | Axis=..., inplace: Literal[True], limit: None | int=...) -> None: ... @overload def bfill(self, *, axis: None | Axis=..., inplace: bool=..., limit: None | int=..., limit_area: Literal['inside', 'outside'] | None=...) -> Self | None: ... @final @doc(klass=_shared_doc_kwargs['klass'], axes_single_arg=_shared_doc_kwargs['axes_single_arg']) def bfill(self, *, axis: None | Axis=None, inplace: bool=False, limit: None | int=None, limit_area: Literal['inside', 'outside'] | None=None) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) return self._pad_or_backfill('bfill', axis=axis, inplace=inplace, limit=limit, limit_area=limit_area) @overload def replace(self, to_replace=..., value=..., *, inplace: Literal[False]=..., regex: bool=...) -> Self: ... @overload def replace(self, to_replace=..., value=..., *, inplace: Literal[True], regex: bool=...) -> None: ... @overload def replace(self, to_replace=..., value=..., *, inplace: bool=..., regex: bool=...) -> Self | None: ... @final @doc(_shared_docs['replace'], klass=_shared_doc_kwargs['klass'], inplace=_shared_doc_kwargs['inplace']) def replace(self, to_replace=None, value=lib.no_default, *, inplace: bool=False, regex: bool=False) -> Self | None: if not is_bool(regex) and to_replace is not None: raise ValueError("'to_replace' must be 'None' if 'regex' is not a bool") if not (is_scalar(to_replace) or is_re_compilable(to_replace) or is_list_like(to_replace)): raise TypeError(f"Expecting 'to_replace' to be either a scalar, array-like, dict or None, got invalid type {type(to_replace).__name__!r}") if value is lib.no_default and (not (is_dict_like(to_replace) or is_dict_like(regex))): raise ValueError(f"{type(self).__name__}.replace must specify either 'value', a dict-like 'to_replace', or dict-like 'regex'.") inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) if value is lib.no_default: if not is_dict_like(to_replace): to_replace = regex regex = True items = list(to_replace.items()) if items: (keys, values) = zip(*items) else: (keys, values) = ([], []) are_mappings = [is_dict_like(v) for v in values] if any(are_mappings): if not all(are_mappings): raise TypeError('If a nested mapping is passed, all values of the top level mapping must be mappings') to_rep_dict = {} value_dict = {} for (k, v) in items: (keys, values) = list(zip(*v.items())) or ([], []) to_rep_dict[k] = list(keys) value_dict[k] = list(values) (to_replace, value) = (to_rep_dict, value_dict) else: (to_replace, value) = (keys, values) return self.replace(to_replace, value, inplace=inplace, regex=regex) else: if not self.size: if inplace: return None return self.copy(deep=False) if is_dict_like(to_replace): if is_dict_like(value): if isinstance(self, ABCSeries): raise ValueError('to_replace and value cannot be dict-like for Series.replace') mapping = {col: (to_replace[col], value[col]) for col in to_replace.keys() if col in value.keys() and col in self} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): if self.ndim == 1: raise ValueError('Series.replace cannot use dict-like to_replace and non-None value') mapping = {col: (to_rep, value) for (col, to_rep) in to_replace.items()} return self._replace_columnwise(mapping, inplace, regex) else: raise TypeError('value argument must be scalar, dict, or Series') elif is_list_like(to_replace): if not is_list_like(value): value = [value] * len(to_replace) if len(to_replace) != len(value): raise ValueError(f'Replacement lists must match in length. Expecting {len(to_replace)} got {len(value)} ') new_data = self._mgr.replace_list(src_list=to_replace, dest_list=value, inplace=inplace, regex=regex) elif to_replace is None: if not (is_re_compilable(regex) or is_list_like(regex) or is_dict_like(regex)): raise TypeError(f"'regex' must be a string or a compiled regular expression or a list or dict of strings or regular expressions, you passed a {type(regex).__name__!r}") return self.replace(regex, value, inplace=inplace, regex=True) elif is_dict_like(value): if self.ndim == 1: raise ValueError('Series.replace cannot use dict-value and non-None to_replace') mapping = {col: (to_replace, val) for (col, val) in value.items()} return self._replace_columnwise(mapping, inplace, regex) elif not is_list_like(value): regex = should_use_regex(regex, to_replace) if regex: new_data = self._mgr.replace_regex(to_replace=to_replace, value=value, inplace=inplace) else: new_data = self._mgr.replace(to_replace=to_replace, value=value, inplace=inplace) else: raise TypeError(f'Invalid "to_replace" type: {type(to_replace).__name__!r}') result = self._constructor_from_mgr(new_data, axes=new_data.axes) if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method='replace') @overload def interpolate(self, method: InterpolateOptions=..., *, axis: Axis=..., limit: int | None=..., inplace: Literal[False]=..., limit_direction: Literal['forward', 'backward', 'both'] | None=..., limit_area: Literal['inside', 'outside'] | None=..., **kwargs) -> Self: ... @overload def interpolate(self, method: InterpolateOptions=..., *, axis: Axis=..., limit: int | None=..., inplace: Literal[True], limit_direction: Literal['forward', 'backward', 'both'] | None=..., limit_area: Literal['inside', 'outside'] | None=..., **kwargs) -> None: ... @overload def interpolate(self, method: InterpolateOptions=..., *, axis: Axis=..., limit: int | None=..., inplace: bool=..., limit_direction: Literal['forward', 'backward', 'both'] | None=..., limit_area: Literal['inside', 'outside'] | None=..., **kwargs) -> Self | None: ... @final def interpolate(self, method: InterpolateOptions='linear', *, axis: Axis=0, limit: int | None=None, inplace: bool=False, limit_direction: Literal['forward', 'backward', 'both'] | None=None, limit_area: Literal['inside', 'outside'] | None=None, **kwargs) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) axis = self._get_axis_number(axis) if self.empty: if inplace: return None return self.copy() if not isinstance(method, str): raise ValueError("'method' should be a string, not None.") (obj, should_transpose) = (self.T, True) if axis == 1 else (self, False) if isinstance(obj.index, MultiIndex) and method != 'linear': raise ValueError('Only `method=linear` interpolation is supported on MultiIndexes.') limit_direction = missing.infer_limit_direction(limit_direction, method) index = missing.get_interp_index(method, obj.index) new_data = obj._mgr.interpolate(method=method, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, inplace=inplace, **kwargs) result = self._constructor_from_mgr(new_data, axes=new_data.axes) if should_transpose: result = result.T if inplace: return self._update_inplace(result) else: return result.__finalize__(self, method='interpolate') @final def asof(self, where, subset=None): if isinstance(where, str): where = Timestamp(where) if not self.index.is_monotonic_increasing: raise ValueError('asof requires a sorted index') is_series = isinstance(self, ABCSeries) if is_series: if subset is not None: raise ValueError('subset is not valid for Series') else: if subset is None: subset = self.columns if not is_list_like(subset): subset = [subset] is_list = is_list_like(where) if not is_list: start = self.index[0] if isinstance(self.index, PeriodIndex): where = Period(where, freq=self.index.freq) if where < start: if not is_series: return self._constructor_sliced(index=self.columns, name=where, dtype=np.float64) return np.nan if is_series: loc = self.index.searchsorted(where, side='right') if loc > 0: loc -= 1 values = self._values while loc > 0 and isna(values[loc]): loc -= 1 return values[loc] if not isinstance(where, Index): where = Index(where) if is_list else Index([where]) nulls = self.isna() if is_series else self[subset].isna().any(axis=1) if nulls.all(): if is_series: self = cast('Series', self) return self._constructor(np.nan, index=where, name=self.name) elif is_list: self = cast('DataFrame', self) return self._constructor(np.nan, index=where, columns=self.columns) else: self = cast('DataFrame', self) return self._constructor_sliced(np.nan, index=self.columns, name=where[0]) locs = self.index.asof_locs(where, ~nulls._values) mask = locs == -1 data = self.take(locs) data.index = where if mask.any(): data.loc[mask] = np.nan return data if is_list else data.iloc[-1] @doc(klass=_shared_doc_kwargs['klass']) def isna(self) -> Self: return isna(self).__finalize__(self, method='isna') @doc(isna, klass=_shared_doc_kwargs['klass']) def isnull(self) -> Self: return isna(self).__finalize__(self, method='isnull') @doc(klass=_shared_doc_kwargs['klass']) def notna(self) -> Self: return notna(self).__finalize__(self, method='notna') @doc(notna, klass=_shared_doc_kwargs['klass']) def notnull(self) -> Self: return notna(self).__finalize__(self, method='notnull') @final def _clip_with_scalar(self, lower, upper, inplace: bool=False): if lower is not None and np.any(isna(lower)) or (upper is not None and np.any(isna(upper))): raise ValueError('Cannot use an NA value as a clip threshold') result = self mask = self.isna() if lower is not None: cond = mask | (self >= lower) result = result.where(cond, lower, inplace=inplace) if upper is not None: cond = mask | (self <= upper) result = self if inplace else result result = result.where(cond, upper, inplace=inplace) return result @final def _clip_with_one_bound(self, threshold, method, axis, inplace): if axis is not None: axis = self._get_axis_number(axis) if is_scalar(threshold) and is_number(threshold): if method.__name__ == 'le': return self._clip_with_scalar(None, threshold, inplace=inplace) return self._clip_with_scalar(threshold, None, inplace=inplace) if not isinstance(threshold, ABCSeries) and is_list_like(threshold): if isinstance(self, ABCSeries): threshold = self._constructor(threshold, index=self.index) else: threshold = self._align_for_op(threshold, axis, flex=None)[1] if is_list_like(threshold): fill_value = np.inf if method.__name__ == 'le' else -np.inf threshold_inf = threshold.fillna(fill_value) else: threshold_inf = threshold subset = method(threshold_inf, axis=axis) | isna(self) return self.where(subset, threshold, axis=axis, inplace=inplace) @overload def clip(self, lower=..., upper=..., *, axis: Axis | None=..., inplace: Literal[False]=..., **kwargs) -> Self: ... @overload def clip(self, lower=..., upper=..., *, axis: Axis | None=..., inplace: Literal[True], **kwargs) -> None: ... @overload def clip(self, lower=..., upper=..., *, axis: Axis | None=..., inplace: bool=..., **kwargs) -> Self | None: ... @final def clip(self, lower=None, upper=None, *, axis: Axis | None=None, inplace: bool=False, **kwargs) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) axis = nv.validate_clip_with_axis(axis, (), kwargs) if axis is not None: axis = self._get_axis_number(axis) isna_lower = isna(lower) if not is_list_like(lower): if np.any(isna_lower): lower = None elif np.all(isna_lower): lower = None isna_upper = isna(upper) if not is_list_like(upper): if np.any(isna_upper): upper = None elif np.all(isna_upper): upper = None if lower is not None and upper is not None and is_scalar(lower) and is_scalar(upper): (lower, upper) = (min(lower, upper), max(lower, upper)) if (lower is None or is_number(lower)) and (upper is None or is_number(upper)): return self._clip_with_scalar(lower, upper, inplace=inplace) result = self if lower is not None: result = result._clip_with_one_bound(lower, method=self.ge, axis=axis, inplace=inplace) if upper is not None: if inplace: result = self result = result._clip_with_one_bound(upper, method=self.le, axis=axis, inplace=inplace) return result @final @doc(klass=_shared_doc_kwargs['klass']) def asfreq(self, freq: Frequency, method: FillnaOptions | None=None, how: Literal['start', 'end'] | None=None, normalize: bool=False, fill_value: Hashable | None=None) -> Self: from pandas.core.resample import asfreq return asfreq(self, freq, method=method, how=how, normalize=normalize, fill_value=fill_value) @final def at_time(self, time, asof: bool=False, axis: Axis | None=None) -> Self: if axis is None: axis = 0 axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError('Index must be DatetimeIndex') indexer = index.indexer_at_time(time, asof=asof) return self.take(indexer, axis=axis) @final def between_time(self, start_time, end_time, inclusive: IntervalClosedType='both', axis: Axis | None=None) -> Self: if axis is None: axis = 0 axis = self._get_axis_number(axis) index = self._get_axis(axis) if not isinstance(index, DatetimeIndex): raise TypeError('Index must be DatetimeIndex') (left_inclusive, right_inclusive) = validate_inclusive(inclusive) indexer = index.indexer_between_time(start_time, end_time, include_start=left_inclusive, include_end=right_inclusive) return self.take(indexer, axis=axis) @final @doc(klass=_shared_doc_kwargs['klass']) def resample(self, rule, closed: Literal['right', 'left'] | None=None, label: Literal['right', 'left'] | None=None, convention: Literal['start', 'end', 's', 'e'] | lib.NoDefault=lib.no_default, on: Level | None=None, level: Level | None=None, origin: str | TimestampConvertibleTypes='start_day', offset: TimedeltaConvertibleTypes | None=None, group_keys: bool=False) -> Resampler: from pandas.core.resample import get_resampler if convention is not lib.no_default: warnings.warn(f"The 'convention' keyword in {type(self).__name__}.resample is deprecated and will be removed in a future version. Explicitly cast PeriodIndex to DatetimeIndex before resampling instead.", FutureWarning, stacklevel=find_stack_level()) else: convention = 'start' return get_resampler(cast('Series | DataFrame', self), freq=rule, label=label, closed=closed, convention=convention, key=on, level=level, origin=origin, offset=offset, group_keys=group_keys) @final def rank(self, axis: Axis=0, method: Literal['average', 'min', 'max', 'first', 'dense']='average', numeric_only: bool=False, na_option: Literal['keep', 'top', 'bottom']='keep', ascending: bool=True, pct: bool=False) -> Self: axis_int = self._get_axis_number(axis) if na_option not in {'keep', 'top', 'bottom'}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) def ranker(data): if data.ndim == 2: values = data.values else: values = data._values if isinstance(values, ExtensionArray): ranks = values._rank(axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct) else: ranks = algos.rank(values, axis=axis_int, method=method, ascending=ascending, na_option=na_option, pct=pct) ranks_obj = self._constructor(ranks, **data._construct_axes_dict()) return ranks_obj.__finalize__(self, method='rank') if numeric_only: if self.ndim == 1 and (not is_numeric_dtype(self.dtype)): raise TypeError('Series.rank does not allow numeric_only=True with non-numeric dtype.') data = self._get_numeric_data() else: data = self return ranker(data) @doc(_shared_docs['compare'], klass=_shared_doc_kwargs['klass']) def compare(self, other: Self, align_axis: Axis=1, keep_shape: bool=False, keep_equal: bool=False, result_names: Suffixes=('self', 'other')): if type(self) is not type(other): (cls_self, cls_other) = (type(self).__name__, type(other).__name__) raise TypeError(f"can only compare '{cls_self}' (not '{cls_other}') with '{cls_self}'") mask = ~((self == other) | self.isna() & other.isna()) mask.fillna(True, inplace=True) if not keep_equal: self = self.where(mask) other = other.where(mask) if not keep_shape: if isinstance(self, ABCDataFrame): cmask = mask.any() rmask = mask.any(axis=1) self = self.loc[rmask, cmask] other = other.loc[rmask, cmask] else: self = self[mask] other = other[mask] if not isinstance(result_names, tuple): raise TypeError(f"Passing 'result_names' as a {type(result_names)} is not supported. Provide 'result_names' as a tuple instead.") if align_axis in (1, 'columns'): axis = 1 else: axis = self._get_axis_number(align_axis) diff = concat([self, other], axis=axis, keys=result_names) if axis >= self.ndim: return diff ax = diff._get_axis(axis) ax_names = np.array(ax.names) ax.names = np.arange(len(ax_names)) order = list(range(1, ax.nlevels)) + [0] if isinstance(diff, ABCDataFrame): diff = diff.reorder_levels(order, axis=axis) else: diff = diff.reorder_levels(order) diff._get_axis(axis=axis).names = ax_names[order] indices = np.arange(diff.shape[axis]).reshape([2, diff.shape[axis] // 2]).T.reshape(-1) diff = diff.take(indices, axis=axis) return diff @final @doc(klass=_shared_doc_kwargs['klass'], axes_single_arg=_shared_doc_kwargs['axes_single_arg']) def align(self, other: NDFrameT, join: AlignJoin='outer', axis: Axis | None=None, level: Level | None=None, copy: bool | lib.NoDefault=lib.no_default, fill_value: Hashable | None=None) -> tuple[Self, NDFrameT]: self._check_copy_deprecation(copy) _right: DataFrame | Series if axis is not None: axis = self._get_axis_number(axis) if isinstance(other, ABCDataFrame): (left, _right, join_index) = self._align_frame(other, join=join, axis=axis, level=level, fill_value=fill_value) elif isinstance(other, ABCSeries): (left, _right, join_index) = self._align_series(other, join=join, axis=axis, level=level, fill_value=fill_value) else: raise TypeError(f'unsupported type: {type(other)}') right = cast(NDFrameT, _right) if self.ndim == 1 or axis == 0: if isinstance(left.index.dtype, DatetimeTZDtype): if left.index.tz != right.index.tz: if join_index is not None: left = left.copy(deep=False) right = right.copy(deep=False) left.index = join_index right.index = join_index left = left.__finalize__(self) right = right.__finalize__(other) return (left, right) @final def _align_frame(self, other: DataFrame, join: AlignJoin='outer', axis: Axis | None=None, level=None, fill_value=None) -> tuple[Self, DataFrame, Index | None]: (join_index, join_columns) = (None, None) (ilidx, iridx) = (None, None) (clidx, cridx) = (None, None) is_series = isinstance(self, ABCSeries) if (axis is None or axis == 0) and (not self.index.equals(other.index)): (join_index, ilidx, iridx) = self.index.join(other.index, how=join, level=level, return_indexers=True) if (axis is None or axis == 1) and (not is_series) and (not self.columns.equals(other.columns)): (join_columns, clidx, cridx) = self.columns.join(other.columns, how=join, level=level, return_indexers=True) if is_series: reindexers = {0: [join_index, ilidx]} else: reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]} left = self._reindex_with_indexers(reindexers, fill_value=fill_value, allow_dups=True) right = other._reindex_with_indexers({0: [join_index, iridx], 1: [join_columns, cridx]}, fill_value=fill_value, allow_dups=True) return (left, right, join_index) @final def _align_series(self, other: Series, join: AlignJoin='outer', axis: Axis | None=None, level=None, fill_value=None) -> tuple[Self, Series, Index | None]: is_series = isinstance(self, ABCSeries) if not is_series and axis is None or axis not in [None, 0, 1]: raise ValueError('Must specify axis=0 or 1') if is_series and axis == 1: raise ValueError('cannot align series to a series other than axis 0') if not axis: if self.index.equals(other.index): (join_index, lidx, ridx) = (None, None, None) else: (join_index, lidx, ridx) = self.index.join(other.index, how=join, level=level, return_indexers=True) if is_series: left = self._reindex_indexer(join_index, lidx) elif lidx is None or join_index is None: left = self.copy(deep=False) else: new_mgr = self._mgr.reindex_indexer(join_index, lidx, axis=1) left = self._constructor_from_mgr(new_mgr, axes=new_mgr.axes) right = other._reindex_indexer(join_index, ridx) else: fdata = self._mgr join_index = self.axes[1] (lidx, ridx) = (None, None) if not join_index.equals(other.index): (join_index, lidx, ridx) = join_index.join(other.index, how=join, level=level, return_indexers=True) if lidx is not None: bm_axis = self._get_block_manager_axis(1) fdata = fdata.reindex_indexer(join_index, lidx, axis=bm_axis) left = self._constructor_from_mgr(fdata, axes=fdata.axes) if ridx is None: right = other.copy(deep=False) else: right = other.reindex(join_index, level=level) fill_na = notna(fill_value) if fill_na: left = left.fillna(fill_value) right = right.fillna(fill_value) return (left, right, join_index) @overload def _where(self, cond, other=..., *, inplace: Literal[False]=..., axis: Axis | None=..., level=...) -> Self: ... @overload def _where(self, cond, other=..., *, inplace: Literal[True], axis: Axis | None=..., level=...) -> None: ... @overload def _where(self, cond, other=..., *, inplace: bool, axis: Axis | None=..., level=...) -> Self | None: ... @final def _where(self, cond, other=lib.no_default, *, inplace: bool=False, axis: Axis | None=None, level=None) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if axis is not None: axis = self._get_axis_number(axis) cond = common.apply_if_callable(cond, self) if isinstance(cond, NDFrame): if cond.ndim == 1 and self.ndim == 2: cond = cond._constructor_expanddim({i: cond for i in range(len(self.columns))}, copy=False) cond.columns = self.columns cond = cond.align(self, join='right')[0] else: if not hasattr(cond, 'shape'): cond = np.asanyarray(cond) if cond.shape != self.shape: raise ValueError('Array conditional must be same shape as self') cond = self._constructor(cond, **self._construct_axes_dict(), copy=False) fill_value = bool(inplace) cond = cond.fillna(fill_value) cond = cond.infer_objects() msg = 'Boolean array expected for the condition, not {dtype}' if not cond.empty: if not isinstance(cond, ABCDataFrame): if not is_bool_dtype(cond): raise TypeError(msg.format(dtype=cond.dtype)) else: for _dt in cond.dtypes: if not is_bool_dtype(_dt): raise TypeError(msg.format(dtype=_dt)) if cond._mgr.any_extension_types: cond = cond._constructor(cond.to_numpy(dtype=bool, na_value=fill_value), **cond._construct_axes_dict()) else: cond = cond.astype(bool) cond = -cond if inplace else cond cond = cond.reindex(self._info_axis, axis=self._info_axis_number) if isinstance(other, NDFrame): if other.ndim <= self.ndim: other = self.align(other, join='left', axis=axis, level=level, fill_value=None)[1] if axis is None and (not other._indexed_same(self)): raise InvalidIndexError if other.ndim < self.ndim: other = other._values if axis == 0: other = np.reshape(other, (-1, 1)) elif axis == 1: other = np.reshape(other, (1, -1)) other = np.broadcast_to(other, self.shape) else: raise NotImplementedError('cannot align with a higher dimensional NDFrame') elif not isinstance(other, (MultiIndex, NDFrame)): other = extract_array(other, extract_numpy=True) if isinstance(other, (np.ndarray, ExtensionArray)): if other.shape != self.shape: if self.ndim != 1: raise ValueError('other must be the same shape as self when an ndarray') else: other = self._constructor(other, **self._construct_axes_dict(), copy=False) if axis is None: axis = 0 if self.ndim == getattr(other, 'ndim', 0): align = True else: align = self._get_axis_number(axis) == 1 if inplace: new_data = self._mgr.putmask(mask=cond, new=other, align=align) result = self._constructor_from_mgr(new_data, axes=new_data.axes) return self._update_inplace(result) else: new_data = self._mgr.where(other=other, cond=cond, align=align) result = self._constructor_from_mgr(new_data, axes=new_data.axes) return result.__finalize__(self) @overload def where(self, cond, other=..., *, inplace: Literal[False]=..., axis: Axis | None=..., level: Level=...) -> Self: ... @overload def where(self, cond, other=..., *, inplace: Literal[True], axis: Axis | None=..., level: Level=...) -> None: ... @overload def where(self, cond, other=..., *, inplace: bool=..., axis: Axis | None=..., level: Level=...) -> Self | None: ... @final @doc(klass=_shared_doc_kwargs['klass'], cond='True', cond_rev='False', name='where', name_other='mask') def where(self, cond, other=np.nan, *, inplace: bool=False, axis: Axis | None=None, level: Level | None=None) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) other = common.apply_if_callable(other, self) return self._where(cond, other, inplace=inplace, axis=axis, level=level) @overload def mask(self, cond, other=..., *, inplace: Literal[False]=..., axis: Axis | None=..., level: Level=...) -> Self: ... @overload def mask(self, cond, other=..., *, inplace: Literal[True], axis: Axis | None=..., level: Level=...) -> None: ... @overload def mask(self, cond, other=..., *, inplace: bool=..., axis: Axis | None=..., level: Level=...) -> Self | None: ... @final @doc(where, klass=_shared_doc_kwargs['klass'], cond='False', cond_rev='True', name='mask', name_other='where') def mask(self, cond, other=lib.no_default, *, inplace: bool=False, axis: Axis | None=None, level: Level | None=None) -> Self | None: inplace = validate_bool_kwarg(inplace, 'inplace') if inplace: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) cond = common.apply_if_callable(cond, self) other = common.apply_if_callable(other, self) if not hasattr(cond, '__invert__'): cond = np.array(cond) return self._where(~cond, other=other, inplace=inplace, axis=axis, level=level) @doc(klass=_shared_doc_kwargs['klass']) def shift(self, periods: int | Sequence[int]=1, freq=None, axis: Axis=0, fill_value: Hashable=lib.no_default, suffix: str | None=None) -> Self | DataFrame: axis = self._get_axis_number(axis) if freq is not None and fill_value is not lib.no_default: raise ValueError("Passing a 'freq' together with a 'fill_value' is not allowed.") if periods == 0: return self.copy(deep=False) if is_list_like(periods) and isinstance(self, ABCSeries): return self.to_frame().shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value) periods = cast(int, periods) if freq is None: axis = self._get_axis_number(axis) assert axis == 0 new_data = self._mgr.shift(periods=periods, fill_value=fill_value) return self._constructor_from_mgr(new_data, axes=new_data.axes).__finalize__(self, method='shift') return self._shift_with_freq(periods, axis, freq) @final def _shift_with_freq(self, periods: int, axis: int, freq) -> Self: index = self._get_axis(axis) if freq == 'infer': freq = getattr(index, 'freq', None) if freq is None: freq = getattr(index, 'inferred_freq', None) if freq is None: msg = 'Freq was not set in the index hence cannot be inferred' raise ValueError(msg) elif isinstance(freq, str): is_period = isinstance(index, PeriodIndex) freq = to_offset(freq, is_period=is_period) if isinstance(index, PeriodIndex): orig_freq = to_offset(index.freq) if freq != orig_freq: assert orig_freq is not None raise ValueError(f'Given freq {PeriodDtype(freq)._freqstr} does not match PeriodIndex freq {PeriodDtype(orig_freq)._freqstr}') new_ax: Index = index.shift(periods) else: new_ax = index.shift(periods, freq) result = self.set_axis(new_ax, axis=axis) return result.__finalize__(self, method='shift') @final def truncate(self, before=None, after=None, axis: Axis | None=None, copy: bool | lib.NoDefault=lib.no_default) -> Self: self._check_copy_deprecation(copy) if axis is None: axis = 0 axis = self._get_axis_number(axis) ax = self._get_axis(axis) if not ax.is_monotonic_increasing and (not ax.is_monotonic_decreasing): raise ValueError('truncate requires a sorted index') if ax._is_all_dates: from pandas.core.tools.datetimes import to_datetime before = to_datetime(before) after = to_datetime(after) if before is not None and after is not None and (before > after): raise ValueError(f'Truncate: {after} must be after {before}') if len(ax) > 1 and ax.is_monotonic_decreasing and (ax.nunique() > 1): (before, after) = (after, before) slicer = [slice(None, None)] * self._AXIS_LEN slicer[axis] = slice(before, after) result = self.loc[tuple(slicer)] if isinstance(ax, MultiIndex): setattr(result, self._get_axis_name(axis), ax.truncate(before, after)) result = result.copy(deep=False) return result @final @doc(klass=_shared_doc_kwargs['klass']) def tz_convert(self, tz, axis: Axis=0, level=None, copy: bool | lib.NoDefault=lib.no_default) -> Self: self._check_copy_deprecation(copy) axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_convert(ax, tz): if not hasattr(ax, 'tz_convert'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError(f'{ax_name} is not a valid DatetimeIndex or PeriodIndex') ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_convert(tz) return ax if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_convert(ax.levels[level], tz) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f'The level {level} is not valid') ax = _tz_convert(ax, tz) result = self.copy(deep=False) result = result.set_axis(ax, axis=axis) return result.__finalize__(self, method='tz_convert') @final @doc(klass=_shared_doc_kwargs['klass']) def tz_localize(self, tz, axis: Axis=0, level=None, copy: bool | lib.NoDefault=lib.no_default, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: self._check_copy_deprecation(copy) nonexistent_options = ('raise', 'NaT', 'shift_forward', 'shift_backward') if nonexistent not in nonexistent_options and (not isinstance(nonexistent, dt.timedelta)): raise ValueError("The nonexistent argument must be one of 'raise', 'NaT', 'shift_forward', 'shift_backward' or a timedelta object") axis = self._get_axis_number(axis) ax = self._get_axis(axis) def _tz_localize(ax, tz, ambiguous, nonexistent): if not hasattr(ax, 'tz_localize'): if len(ax) > 0: ax_name = self._get_axis_name(axis) raise TypeError(f'{ax_name} is not a valid DatetimeIndex or PeriodIndex') ax = DatetimeIndex([], tz=tz) else: ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent) return ax if isinstance(ax, MultiIndex): level = ax._get_level_number(level) new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent) ax = ax.set_levels(new_level, level=level) else: if level not in (None, 0, ax.name): raise ValueError(f'The level {level} is not valid') ax = _tz_localize(ax, tz, ambiguous, nonexistent) result = self.copy(deep=False) result = result.set_axis(ax, axis=axis) return result.__finalize__(self, method='tz_localize') @final def describe(self, percentiles=None, include=None, exclude=None) -> Self: return describe_ndframe(obj=self, include=include, exclude=exclude, percentiles=percentiles).__finalize__(self, method='describe') @final def pct_change(self, periods: int=1, fill_method: None=None, freq=None, **kwargs) -> Self: if fill_method is not None: raise ValueError(f'fill_method must be None; got fill_method={fill_method!r}.') axis = self._get_axis_number(kwargs.pop('axis', 'index')) shifted = self.shift(periods=periods, freq=freq, axis=axis, **kwargs) rs = self / shifted - 1 if freq is not None: rs = rs.loc[~rs.index.duplicated()] rs = rs.reindex_like(self) return rs.__finalize__(self, method='pct_change') @final def _logical_func(self, name: str, func, axis: Axis | None=0, bool_only: bool=False, skipna: bool=True, **kwargs) -> Series | bool: nv.validate_logical_func((), kwargs, fname=name) validate_bool_kwarg(skipna, 'skipna', none_allowed=False) if self.ndim > 1 and axis is None: res = self._logical_func(name, func, axis=0, bool_only=bool_only, skipna=skipna, **kwargs) return res._logical_func(name, func, skipna=skipna, **kwargs) elif axis is None: axis = 0 if self.ndim > 1 and axis == 1 and (len(self._mgr.blocks) > 1) and all((block.values.ndim == 2 for block in self._mgr.blocks)) and (not kwargs): obj = self if bool_only: obj = self._get_bool_data() return obj._reduce_axis1(name, func, skipna=skipna) return self._reduce(func, name=name, axis=axis, skipna=skipna, numeric_only=bool_only, filter_type='bool') def any(self, *, axis: Axis | None=0, bool_only: bool=False, skipna: bool=True, **kwargs) -> Series | bool: return self._logical_func('any', nanops.nanany, axis, bool_only, skipna, **kwargs) def all(self, *, axis: Axis=0, bool_only: bool=False, skipna: bool=True, **kwargs) -> Series | bool: return self._logical_func('all', nanops.nanall, axis, bool_only, skipna, **kwargs) @final def _accum_func(self, name: str, func, axis: Axis | None=None, skipna: bool=True, *args, **kwargs): skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name) if axis is None: axis = 0 else: axis = self._get_axis_number(axis) if axis == 1: return self.T._accum_func(name, func, *args, axis=0, skipna=skipna, **kwargs).T def block_accum_func(blk_values): values = blk_values.T if hasattr(blk_values, 'T') else blk_values result: np.ndarray | ExtensionArray if isinstance(values, ExtensionArray): result = values._accumulate(name, skipna=skipna, **kwargs) else: result = nanops.na_accum_func(values, func, skipna=skipna) result = result.T if hasattr(result, 'T') else result return result result = self._mgr.apply(block_accum_func) return self._constructor_from_mgr(result, axes=result.axes).__finalize__(self, method=name) def cummax(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return self._accum_func('cummax', np.maximum.accumulate, axis, skipna, *args, **kwargs) def cummin(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return self._accum_func('cummin', np.minimum.accumulate, axis, skipna, *args, **kwargs) def cumsum(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return self._accum_func('cumsum', np.cumsum, axis, skipna, *args, **kwargs) def cumprod(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return self._accum_func('cumprod', np.cumprod, axis, skipna, *args, **kwargs) @final def _stat_function_ddof(self, name: str, func, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | float: nv.validate_stat_ddof_func((), kwargs, fname=name) validate_bool_kwarg(skipna, 'skipna', none_allowed=False) return self._reduce(func, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof) def sem(self, *, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | float: return self._stat_function_ddof('sem', nanops.nansem, axis, skipna, ddof, numeric_only, **kwargs) def var(self, *, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | float: return self._stat_function_ddof('var', nanops.nanvar, axis, skipna, ddof, numeric_only, **kwargs) def std(self, *, axis: Axis | None=0, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs) -> Series | float: return self._stat_function_ddof('std', nanops.nanstd, axis, skipna, ddof, numeric_only, **kwargs) @final def _stat_function(self, name: str, func, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs): assert name in ['median', 'mean', 'min', 'max', 'kurt', 'skew'], name nv.validate_func(name, (), kwargs) validate_bool_kwarg(skipna, 'skipna', none_allowed=False) return self._reduce(func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only) def min(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs): return self._stat_function('min', nanops.nanmin, axis, skipna, numeric_only, **kwargs) def max(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs): return self._stat_function('max', nanops.nanmax, axis, skipna, numeric_only, **kwargs) def mean(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | float: return self._stat_function('mean', nanops.nanmean, axis, skipna, numeric_only, **kwargs) def median(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | float: return self._stat_function('median', nanops.nanmedian, axis, skipna, numeric_only, **kwargs) def skew(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | float: return self._stat_function('skew', nanops.nanskew, axis, skipna, numeric_only, **kwargs) def kurt(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series | float: return self._stat_function('kurt', nanops.nankurt, axis, skipna, numeric_only, **kwargs) kurtosis = kurt @final def _min_count_stat_function(self, name: str, func, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs): assert name in ['sum', 'prod'], name nv.validate_func(name, (), kwargs) validate_bool_kwarg(skipna, 'skipna', none_allowed=False) return self._reduce(func, name=name, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count) def sum(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs): return self._min_count_stat_function('sum', nanops.nansum, axis, skipna, numeric_only, min_count, **kwargs) def prod(self, *, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs): return self._min_count_stat_function('prod', nanops.nanprod, axis, skipna, numeric_only, min_count, **kwargs) product = prod @final @doc(Rolling) def rolling(self, window: int | dt.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None=None, center: bool=False, win_type: str | None=None, on: str | None=None, closed: IntervalClosedType | None=None, step: int | None=None, method: str='single') -> Window | Rolling: if win_type is not None: return Window(self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, closed=closed, step=step, method=method) return Rolling(self, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, closed=closed, step=step, method=method) @final @doc(Expanding) def expanding(self, min_periods: int=1, method: Literal['single', 'table']='single') -> Expanding: return Expanding(self, min_periods=min_periods, method=method) @final @doc(ExponentialMovingWindow) def ewm(self, com: float | None=None, span: float | None=None, halflife: float | TimedeltaConvertibleTypes | None=None, alpha: float | None=None, min_periods: int | None=0, adjust: bool=True, ignore_na: bool=False, times: np.ndarray | DataFrame | Series | None=None, method: Literal['single', 'table']='single') -> ExponentialMovingWindow: return ExponentialMovingWindow(self, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, times=times, method=method) @final def _inplace_method(self, other, op) -> Self: result = op(self, other) self._update_inplace(result.reindex_like(self)) return self @final def __iadd__(self, other) -> Self: return self._inplace_method(other, type(self).__add__) @final def __isub__(self, other) -> Self: return self._inplace_method(other, type(self).__sub__) @final def __imul__(self, other) -> Self: return self._inplace_method(other, type(self).__mul__) @final def __itruediv__(self, other) -> Self: return self._inplace_method(other, type(self).__truediv__) @final def __ifloordiv__(self, other) -> Self: return self._inplace_method(other, type(self).__floordiv__) @final def __imod__(self, other) -> Self: return self._inplace_method(other, type(self).__mod__) @final def __ipow__(self, other) -> Self: return self._inplace_method(other, type(self).__pow__) @final def __iand__(self, other) -> Self: return self._inplace_method(other, type(self).__and__) @final def __ior__(self, other) -> Self: return self._inplace_method(other, type(self).__or__) @final def __ixor__(self, other) -> Self: return self._inplace_method(other, type(self).__xor__) @final def _find_valid_index(self, *, how: str) -> Hashable: is_valid = self.notna().values idxpos = find_valid_index(how=how, is_valid=is_valid) if idxpos is None: return None return self.index[idxpos] @final @doc(position='first', klass=_shared_doc_kwargs['klass']) def first_valid_index(self) -> Hashable: return self._find_valid_index(how='first') @final @doc(first_valid_index, position='last', klass=_shared_doc_kwargs['klass']) def last_valid_index(self) -> Hashable: return self._find_valid_index(how='last') _num_doc = '\n{desc}\n\nParameters\n----------\naxis : {axis_descr}\n Axis for the function to be applied on.\n For `Series` this parameter is unused and defaults to 0.\n\n For DataFrames, specifying ``axis=None`` will apply the aggregation\n across both axes.\n\n .. versionadded:: 2.0.0\n\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nnumeric_only : bool, default False\n Include only float, int, boolean columns.\n\n{min_count}**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n{name1} or scalar\n Value containing the calculation referenced in the description.{see_also}{examples}\n' _sum_prod_doc = '\n{desc}\n\nParameters\n----------\naxis : {axis_descr}\n Axis for the function to be applied on.\n For `Series` this parameter is unused and defaults to 0.\n\n .. warning::\n\n The behavior of DataFrame.{name} with ``axis=None`` is deprecated,\n in a future version this will reduce over both axes and return a scalar\n To retain the old behavior, pass axis=0 (or do not pass axis).\n\n .. versionadded:: 2.0.0\n\nskipna : bool, default True\n Exclude NA/null values when computing the result.\nnumeric_only : bool, default False\n Include only float, int, boolean columns. Not implemented for Series.\n\n{min_count}**kwargs\n Additional keyword arguments to be passed to the function.\n\nReturns\n-------\n{name1} or scalar\n Value containing the calculation referenced in the description.{see_also}{examples}\n' _num_ddof_doc = '\n{desc}\n\nParameters\n----------\naxis : {axis_descr}\n For `Series` this parameter is unused and defaults to 0.\n\n .. warning::\n\n The behavior of DataFrame.{name} with ``axis=None`` is deprecated,\n in a future version this will reduce over both axes and return a scalar\n To retain the old behavior, pass axis=0 (or do not pass axis).\n\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\nddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations is N - ddof,\n where N represents the number of elements.\nnumeric_only : bool, default False\n Include only float, int, boolean columns. Not implemented for Series.\n**kwargs :\n Additional keywords have no effect but might be accepted\n for compatibility with NumPy.\n\nReturns\n-------\n{name1} or {name2} (if level specified)\n {return_desc}\n\nSee Also\n--------\n{see_also}{notes}{examples}\n' _sem_see_also = 'scipy.stats.sem : Compute standard error of the mean.\n{name2}.std : Return sample standard deviation over requested axis.\n{name2}.var : Return unbiased variance over requested axis.\n{name2}.mean : Return the mean of the values over the requested axis.\n{name2}.median : Return the median of the values over the requested axis.\n{name2}.mode : Return the mode(s) of the Series.' _sem_return_desc = 'Unbiased standard error of the mean over requested axis.' _std_see_also = 'numpy.std : Compute the standard deviation along the specified axis.\n{name2}.var : Return unbiased variance over requested axis.\n{name2}.sem : Return unbiased standard error of the mean over requested axis.\n{name2}.mean : Return the mean of the values over the requested axis.\n{name2}.median : Return the median of the values over the requested axis.\n{name2}.mode : Return the mode(s) of the Series.' _std_return_desc = 'Standard deviation over requested axis.' _std_notes = '\n\nNotes\n-----\nTo have the same behaviour as `numpy.std`, use `ddof=0` (instead of the\ndefault `ddof=1`)' _std_examples = "\n\nExamples\n--------\n>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3],\n... 'age': [21, 25, 62, 43],\n... 'height': [1.61, 1.87, 1.49, 2.01]}\n... ).set_index('person_id')\n>>> df\n age height\nperson_id\n0 21 1.61\n1 25 1.87\n2 62 1.49\n3 43 2.01\n\nThe standard deviation of the columns can be found as follows:\n\n>>> df.std()\nage 18.786076\nheight 0.237417\ndtype: float64\n\nAlternatively, `ddof=0` can be set to normalize by N instead of N-1:\n\n>>> df.std(ddof=0)\nage 16.269219\nheight 0.205609\ndtype: float64" _var_examples = "\n\nExamples\n--------\n>>> df = pd.DataFrame({'person_id': [0, 1, 2, 3],\n... 'age': [21, 25, 62, 43],\n... 'height': [1.61, 1.87, 1.49, 2.01]}\n... ).set_index('person_id')\n>>> df\n age height\nperson_id\n0 21 1.61\n1 25 1.87\n2 62 1.49\n3 43 2.01\n\n>>> df.var()\nage 352.916667\nheight 0.056367\ndtype: float64\n\nAlternatively, ``ddof=0`` can be set to normalize by N instead of N-1:\n\n>>> df.var(ddof=0)\nage 264.687500\nheight 0.042275\ndtype: float64" _bool_doc = "\n{desc}\n\nParameters\n----------\naxis : {{0 or 'index', 1 or 'columns', None}}, default 0\n Indicate which axis or axes should be reduced. For `Series` this parameter\n is unused and defaults to 0.\n\n * 0 / 'index' : reduce the index, return a Series whose index is the\n original column labels.\n * 1 / 'columns' : reduce the columns, return a Series whose index is the\n original index.\n * None : reduce all axes, return a scalar.\n\nbool_only : bool, default False\n Include only boolean columns. Not implemented for Series.\nskipna : bool, default True\n Exclude NA/null values. If the entire row/column is NA and skipna is\n True, then the result will be {empty_value}, as for an empty row/column.\n If skipna is False, then NA are treated as True, because these are not\n equal to zero.\n**kwargs : any, default None\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n{name2} or {name1}\n If axis=None, then a scalar boolean is returned.\n Otherwise a Series is returned with index matching the index argument.\n\n{see_also}\n{examples}" _all_desc = 'Return whether all elements are True, potentially over an axis.\n\nReturns True unless there at least one element within a series or\nalong a Dataframe axis that is False or equivalent (e.g. zero or\nempty).' _all_examples = 'Examples\n--------\n**Series**\n\n>>> pd.Series([True, True]).all()\nTrue\n>>> pd.Series([True, False]).all()\nFalse\n>>> pd.Series([], dtype="float64").all()\nTrue\n>>> pd.Series([np.nan]).all()\nTrue\n>>> pd.Series([np.nan]).all(skipna=False)\nTrue\n\n**DataFrames**\n\nCreate a DataFrame from a dictionary.\n\n>>> df = pd.DataFrame({\'col1\': [True, True], \'col2\': [True, False]})\n>>> df\n col1 col2\n0 True True\n1 True False\n\nDefault behaviour checks if values in each column all return True.\n\n>>> df.all()\ncol1 True\ncol2 False\ndtype: bool\n\nSpecify ``axis=\'columns\'`` to check if values in each row all return True.\n\n>>> df.all(axis=\'columns\')\n0 True\n1 False\ndtype: bool\n\nOr ``axis=None`` for whether every value is True.\n\n>>> df.all(axis=None)\nFalse\n' _all_see_also = 'See Also\n--------\nSeries.all : Return True if all elements are True.\nDataFrame.any : Return True if one (or more) elements are True.\n' _cnum_pd_doc = "\nReturn cumulative {desc} over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n{desc}.\n\nParameters\n----------\naxis : {{0 or 'index', 1 or 'columns'}}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\n For `Series` this parameter is unused and defaults to 0.\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\nnumeric_only : bool, default False\n Include only float, int, boolean columns.\n*args, **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n{name1} or {name2}\n Return cumulative {desc} of {name1} or {name2}.\n\nSee Also\n--------\ncore.window.expanding.Expanding.{accum_func_name} : Similar functionality\n but ignores ``NaN`` values.\n{name2}.{accum_func_name} : Return the {desc} over\n {name2} axis.\n{name2}.cummax : Return cumulative maximum over {name2} axis.\n{name2}.cummin : Return cumulative minimum over {name2} axis.\n{name2}.cumsum : Return cumulative sum over {name2} axis.\n{name2}.cumprod : Return cumulative product over {name2} axis.\n\n{examples}" _cnum_series_doc = "\nReturn cumulative {desc} over a DataFrame or Series axis.\n\nReturns a DataFrame or Series of the same size containing the cumulative\n{desc}.\n\nParameters\n----------\naxis : {{0 or 'index', 1 or 'columns'}}, default 0\n The index or the name of the axis. 0 is equivalent to None or 'index'.\n For `Series` this parameter is unused and defaults to 0.\nskipna : bool, default True\n Exclude NA/null values. If an entire row/column is NA, the result\n will be NA.\n*args, **kwargs\n Additional keywords have no effect but might be accepted for\n compatibility with NumPy.\n\nReturns\n-------\n{name1} or {name2}\n Return cumulative {desc} of {name1} or {name2}.\n\nSee Also\n--------\ncore.window.expanding.Expanding.{accum_func_name} : Similar functionality\n but ignores ``NaN`` values.\n{name2}.{accum_func_name} : Return the {desc} over\n {name2} axis.\n{name2}.cummax : Return cumulative maximum over {name2} axis.\n{name2}.cummin : Return cumulative minimum over {name2} axis.\n{name2}.cumsum : Return cumulative sum over {name2} axis.\n{name2}.cumprod : Return cumulative product over {name2} axis.\n\n{examples}" _cummin_examples = "Examples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummin()\n0 2.0\n1 NaN\n2 2.0\n3 -1.0\n4 -1.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummin(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the minimum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummin()\n A B\n0 2.0 1.0\n1 2.0 NaN\n2 1.0 0.0\n\nTo iterate over columns and find the minimum in each row,\nuse ``axis=1``\n\n>>> df.cummin(axis=1)\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n" _cumsum_examples = "Examples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumsum()\n0 2.0\n1 NaN\n2 7.0\n3 6.0\n4 6.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumsum(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the sum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumsum()\n A B\n0 2.0 1.0\n1 5.0 NaN\n2 6.0 1.0\n\nTo iterate over columns and find the sum in each row,\nuse ``axis=1``\n\n>>> df.cumsum(axis=1)\n A B\n0 2.0 3.0\n1 3.0 NaN\n2 1.0 1.0\n" _cumprod_examples = "Examples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cumprod()\n0 2.0\n1 NaN\n2 10.0\n3 -10.0\n4 -0.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cumprod(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the product\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cumprod()\n A B\n0 2.0 1.0\n1 6.0 NaN\n2 6.0 0.0\n\nTo iterate over columns and find the product in each row,\nuse ``axis=1``\n\n>>> df.cumprod(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 0.0\n" _cummax_examples = "Examples\n--------\n**Series**\n\n>>> s = pd.Series([2, np.nan, 5, -1, 0])\n>>> s\n0 2.0\n1 NaN\n2 5.0\n3 -1.0\n4 0.0\ndtype: float64\n\nBy default, NA values are ignored.\n\n>>> s.cummax()\n0 2.0\n1 NaN\n2 5.0\n3 5.0\n4 5.0\ndtype: float64\n\nTo include NA values in the operation, use ``skipna=False``\n\n>>> s.cummax(skipna=False)\n0 2.0\n1 NaN\n2 NaN\n3 NaN\n4 NaN\ndtype: float64\n\n**DataFrame**\n\n>>> df = pd.DataFrame([[2.0, 1.0],\n... [3.0, np.nan],\n... [1.0, 0.0]],\n... columns=list('AB'))\n>>> df\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 1.0 0.0\n\nBy default, iterates over rows and finds the maximum\nin each column. This is equivalent to ``axis=None`` or ``axis='index'``.\n\n>>> df.cummax()\n A B\n0 2.0 1.0\n1 3.0 NaN\n2 3.0 1.0\n\nTo iterate over columns and find the maximum in each row,\nuse ``axis=1``\n\n>>> df.cummax(axis=1)\n A B\n0 2.0 2.0\n1 3.0 NaN\n2 1.0 1.0\n" _any_see_also = 'See Also\n--------\nnumpy.any : Numpy version of this method.\nSeries.any : Return whether any element is True.\nSeries.all : Return whether all elements are True.\nDataFrame.any : Return whether any element is True over requested axis.\nDataFrame.all : Return whether all elements are True over requested axis.\n' _any_desc = 'Return whether any element is True, potentially over an axis.\n\nReturns False unless there is at least one element within a series or\nalong a Dataframe axis that is True or equivalent (e.g. non-zero or\nnon-empty).' _any_examples = 'Examples\n--------\n**Series**\n\nFor Series input, the output is a scalar indicating whether any element\nis True.\n\n>>> pd.Series([False, False]).any()\nFalse\n>>> pd.Series([True, False]).any()\nTrue\n>>> pd.Series([], dtype="float64").any()\nFalse\n>>> pd.Series([np.nan]).any()\nFalse\n>>> pd.Series([np.nan]).any(skipna=False)\nTrue\n\n**DataFrame**\n\nWhether each column contains at least one True element (the default).\n\n>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})\n>>> df\n A B C\n0 1 0 0\n1 2 2 0\n\n>>> df.any()\nA True\nB True\nC False\ndtype: bool\n\nAggregating over the columns.\n\n>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})\n>>> df\n A B\n0 True 1\n1 False 2\n\n>>> df.any(axis=\'columns\')\n0 True\n1 True\ndtype: bool\n\n>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})\n>>> df\n A B\n0 True 1\n1 False 0\n\n>>> df.any(axis=\'columns\')\n0 True\n1 False\ndtype: bool\n\nAggregating over the entire DataFrame with ``axis=None``.\n\n>>> df.any(axis=None)\nTrue\n\n`any` for an empty DataFrame is an empty Series.\n\n>>> pd.DataFrame([]).any()\nSeries([], dtype: bool)\n' _shared_docs['stat_func_example'] = "\n\nExamples\n--------\n>>> idx = pd.MultiIndex.from_arrays([\n... ['warm', 'warm', 'cold', 'cold'],\n... ['dog', 'falcon', 'fish', 'spider']],\n... names=['blooded', 'animal'])\n>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)\n>>> s\nblooded animal\nwarm dog 4\n falcon 2\ncold fish 0\n spider 8\nName: legs, dtype: int64\n\n>>> s.{stat_func}()\n{default_output}" _sum_examples = _shared_docs['stat_func_example'].format(stat_func='sum', verb='Sum', default_output=14, level_output_0=6, level_output_1=8) _sum_examples += '\n\nBy default, the sum of an empty or all-NA Series is ``0``.\n\n>>> pd.Series([], dtype="float64").sum() # min_count=0 is the default\n0.0\n\nThis can be controlled with the ``min_count`` parameter. For example, if\nyou\'d like the sum of an empty series to be NaN, pass ``min_count=1``.\n\n>>> pd.Series([], dtype="float64").sum(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).sum()\n0.0\n\n>>> pd.Series([np.nan]).sum(min_count=1)\nnan' _max_examples: str = _shared_docs['stat_func_example'].format(stat_func='max', verb='Max', default_output=8, level_output_0=4, level_output_1=8) _min_examples: str = _shared_docs['stat_func_example'].format(stat_func='min', verb='Min', default_output=0, level_output_0=2, level_output_1=0) _skew_see_also = '\n\nSee Also\n--------\nSeries.skew : Return unbiased skew over requested axis.\nSeries.var : Return unbiased variance over requested axis.\nSeries.std : Return unbiased standard deviation over requested axis.' _stat_func_see_also = '\n\nSee Also\n--------\nSeries.sum : Return the sum.\nSeries.min : Return the minimum.\nSeries.max : Return the maximum.\nSeries.idxmin : Return the index of the minimum.\nSeries.idxmax : Return the index of the maximum.\nDataFrame.sum : Return the sum over the requested axis.\nDataFrame.min : Return the minimum over the requested axis.\nDataFrame.max : Return the maximum over the requested axis.\nDataFrame.idxmin : Return the index of the minimum over the requested axis.\nDataFrame.idxmax : Return the index of the maximum over the requested axis.' _prod_examples = '\n\nExamples\n--------\nBy default, the product of an empty or all-NA Series is ``1``\n\n>>> pd.Series([], dtype="float64").prod()\n1.0\n\nThis can be controlled with the ``min_count`` parameter\n\n>>> pd.Series([], dtype="float64").prod(min_count=1)\nnan\n\nThanks to the ``skipna`` parameter, ``min_count`` handles all-NA and\nempty series identically.\n\n>>> pd.Series([np.nan]).prod()\n1.0\n\n>>> pd.Series([np.nan]).prod(min_count=1)\nnan' _min_count_stub = 'min_count : int, default 0\n The required number of valid values to perform the operation. If fewer than\n ``min_count`` non-NA values are present the result will be NA.\n' def make_doc(name: str, ndim: int) -> str: if ndim == 1: name1 = 'scalar' name2 = 'Series' axis_descr = '{index (0)}' else: name1 = 'Series' name2 = 'DataFrame' axis_descr = '{index (0), columns (1)}' if name == 'any': base_doc = _bool_doc desc = _any_desc see_also = _any_see_also examples = _any_examples kwargs = {'empty_value': 'False'} elif name == 'all': base_doc = _bool_doc desc = _all_desc see_also = _all_see_also examples = _all_examples kwargs = {'empty_value': 'True'} elif name == 'min': base_doc = _num_doc desc = 'Return the minimum of the values over the requested axis.\n\nIf you want the *index* of the minimum, use ``idxmin``. This is the equivalent of the ``numpy.ndarray`` method ``argmin``.' see_also = _stat_func_see_also examples = _min_examples kwargs = {'min_count': ''} elif name == 'max': base_doc = _num_doc desc = 'Return the maximum of the values over the requested axis.\n\nIf you want the *index* of the maximum, use ``idxmax``. This is the equivalent of the ``numpy.ndarray`` method ``argmax``.' see_also = _stat_func_see_also examples = _max_examples kwargs = {'min_count': ''} elif name == 'sum': base_doc = _sum_prod_doc desc = 'Return the sum of the values over the requested axis.\n\nThis is equivalent to the method ``numpy.sum``.' see_also = _stat_func_see_also examples = _sum_examples kwargs = {'min_count': _min_count_stub} elif name == 'prod': base_doc = _sum_prod_doc desc = 'Return the product of the values over the requested axis.' see_also = _stat_func_see_also examples = _prod_examples kwargs = {'min_count': _min_count_stub} elif name == 'median': base_doc = _num_doc desc = 'Return the median of the values over the requested axis.' see_also = _stat_func_see_also examples = "\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.median()\n 2.0\n\n With a DataFrame\n\n >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])\n >>> df\n a b\n tiger 1 2\n zebra 2 3\n >>> df.median()\n a 1.5\n b 2.5\n dtype: float64\n\n Using axis=1\n\n >>> df.median(axis=1)\n tiger 1.5\n zebra 2.5\n dtype: float64\n\n In this case, `numeric_only` should be set to `True`\n to avoid getting an error.\n\n >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},\n ... index=['tiger', 'zebra'])\n >>> df.median(numeric_only=True)\n a 1.5\n dtype: float64" kwargs = {'min_count': ''} elif name == 'mean': base_doc = _num_doc desc = 'Return the mean of the values over the requested axis.' see_also = _stat_func_see_also examples = "\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.mean()\n 2.0\n\n With a DataFrame\n\n >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])\n >>> df\n a b\n tiger 1 2\n zebra 2 3\n >>> df.mean()\n a 1.5\n b 2.5\n dtype: float64\n\n Using axis=1\n\n >>> df.mean(axis=1)\n tiger 1.5\n zebra 2.5\n dtype: float64\n\n In this case, `numeric_only` should be set to `True` to avoid\n getting an error.\n\n >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},\n ... index=['tiger', 'zebra'])\n >>> df.mean(numeric_only=True)\n a 1.5\n dtype: float64" kwargs = {'min_count': ''} elif name == 'var': base_doc = _num_ddof_doc desc = 'Return unbiased variance over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument.' examples = _var_examples see_also = '' kwargs = {'notes': ''} elif name == 'std': base_doc = _num_ddof_doc desc = 'Return sample standard deviation over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument.' examples = _std_examples see_also = _std_see_also.format(name2=name2) kwargs = {'notes': '', 'return_desc': _std_return_desc} elif name == 'sem': base_doc = _num_ddof_doc desc = 'Return unbiased standard error of the mean over requested axis.\n\nNormalized by N-1 by default. This can be changed using the ddof argument' examples = "\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.sem().round(6)\n 0.57735\n\n With a DataFrame\n\n >>> df = pd.DataFrame({'a': [1, 2], 'b': [2, 3]}, index=['tiger', 'zebra'])\n >>> df\n a b\n tiger 1 2\n zebra 2 3\n >>> df.sem()\n a 0.5\n b 0.5\n dtype: float64\n\n Using axis=1\n\n >>> df.sem(axis=1)\n tiger 0.5\n zebra 0.5\n dtype: float64\n\n In this case, `numeric_only` should be set to `True`\n to avoid getting an error.\n\n >>> df = pd.DataFrame({'a': [1, 2], 'b': ['T', 'Z']},\n ... index=['tiger', 'zebra'])\n >>> df.sem(numeric_only=True)\n a 0.5\n dtype: float64" see_also = _sem_see_also.format(name2=name2) kwargs = {'notes': '', 'return_desc': _sem_return_desc} elif name == 'skew': base_doc = _num_doc desc = 'Return unbiased skew over requested axis.\n\nNormalized by N-1.' see_also = _skew_see_also examples = "\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s.skew()\n 0.0\n\n With a DataFrame\n\n >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4], 'c': [1, 3, 5]},\n ... index=['tiger', 'zebra', 'cow'])\n >>> df\n a b c\n tiger 1 2 1\n zebra 2 3 3\n cow 3 4 5\n >>> df.skew()\n a 0.0\n b 0.0\n c 0.0\n dtype: float64\n\n Using axis=1\n\n >>> df.skew(axis=1)\n tiger 1.732051\n zebra -1.732051\n cow 0.000000\n dtype: float64\n\n In this case, `numeric_only` should be set to `True` to avoid\n getting an error.\n\n >>> df = pd.DataFrame({'a': [1, 2, 3], 'b': ['T', 'Z', 'X']},\n ... index=['tiger', 'zebra', 'cow'])\n >>> df.skew(numeric_only=True)\n a 0.0\n dtype: float64" kwargs = {'min_count': ''} elif name == 'kurt': base_doc = _num_doc desc = "Return unbiased kurtosis over requested axis.\n\nKurtosis obtained using Fisher's definition of\nkurtosis (kurtosis of normal == 0.0). Normalized by N-1." see_also = '' examples = "\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 2, 3], index=['cat', 'dog', 'dog', 'mouse'])\n >>> s\n cat 1\n dog 2\n dog 2\n mouse 3\n dtype: int64\n >>> s.kurt()\n 1.5\n\n With a DataFrame\n\n >>> df = pd.DataFrame({'a': [1, 2, 2, 3], 'b': [3, 4, 4, 4]},\n ... index=['cat', 'dog', 'dog', 'mouse'])\n >>> df\n a b\n cat 1 3\n dog 2 4\n dog 2 4\n mouse 3 4\n >>> df.kurt()\n a 1.5\n b 4.0\n dtype: float64\n\n With axis=None\n\n >>> df.kurt(axis=None).round(6)\n -0.988693\n\n Using axis=1\n\n >>> df = pd.DataFrame({'a': [1, 2], 'b': [3, 4], 'c': [3, 4], 'd': [1, 2]},\n ... index=['cat', 'dog'])\n >>> df.kurt(axis=1)\n cat -6.0\n dog -6.0\n dtype: float64" kwargs = {'min_count': ''} elif name == 'cumsum': if ndim == 1: base_doc = _cnum_series_doc else: base_doc = _cnum_pd_doc desc = 'sum' see_also = '' examples = _cumsum_examples kwargs = {'accum_func_name': 'sum'} elif name == 'cumprod': if ndim == 1: base_doc = _cnum_series_doc else: base_doc = _cnum_pd_doc desc = 'product' see_also = '' examples = _cumprod_examples kwargs = {'accum_func_name': 'prod'} elif name == 'cummin': if ndim == 1: base_doc = _cnum_series_doc else: base_doc = _cnum_pd_doc desc = 'minimum' see_also = '' examples = _cummin_examples kwargs = {'accum_func_name': 'min'} elif name == 'cummax': if ndim == 1: base_doc = _cnum_series_doc else: base_doc = _cnum_pd_doc desc = 'maximum' see_also = '' examples = _cummax_examples kwargs = {'accum_func_name': 'max'} else: raise NotImplementedError docstr = base_doc.format(desc=desc, name=name, name1=name1, name2=name2, axis_descr=axis_descr, see_also=see_also, examples=examples, **kwargs) return docstr # File: pandas-main/pandas/core/groupby/__init__.py from pandas.core.groupby.generic import DataFrameGroupBy, NamedAgg, SeriesGroupBy from pandas.core.groupby.groupby import GroupBy from pandas.core.groupby.grouper import Grouper __all__ = ['DataFrameGroupBy', 'NamedAgg', 'SeriesGroupBy', 'GroupBy', 'Grouper'] # File: pandas-main/pandas/core/groupby/base.py """""" from __future__ import annotations import dataclasses from typing import TYPE_CHECKING if TYPE_CHECKING: from collections.abc import Hashable @dataclasses.dataclass(order=True, frozen=True) class OutputKey: label: Hashable position: int plotting_methods = frozenset(['plot', 'hist']) cythonized_kernels = frozenset(['cumprod', 'cumsum', 'shift', 'cummin', 'cummax']) reduction_kernels = frozenset(['all', 'any', 'corrwith', 'count', 'first', 'idxmax', 'idxmin', 'last', 'max', 'mean', 'median', 'min', 'nunique', 'prod', 'quantile', 'sem', 'size', 'skew', 'std', 'sum', 'var']) transformation_kernels = frozenset(['bfill', 'cumcount', 'cummax', 'cummin', 'cumprod', 'cumsum', 'diff', 'ffill', 'ngroup', 'pct_change', 'rank', 'shift']) groupby_other_methods = frozenset(['agg', 'aggregate', 'apply', 'boxplot', 'corr', 'cov', 'describe', 'expanding', 'ewm', 'filter', 'get_group', 'groups', 'head', 'hist', 'indices', 'ndim', 'ngroups', 'nth', 'ohlc', 'pipe', 'plot', 'resample', 'rolling', 'tail', 'take', 'transform', 'sample', 'value_counts']) transform_kernel_allowlist = reduction_kernels | transformation_kernels # File: pandas-main/pandas/core/groupby/categorical.py from __future__ import annotations import numpy as np from pandas.core.algorithms import unique1d from pandas.core.arrays.categorical import Categorical, CategoricalDtype, recode_for_categories def recode_for_groupby(c: Categorical, sort: bool, observed: bool) -> Categorical: if observed: take_codes = unique1d(c.codes[c.codes != -1]) if sort: take_codes = np.sort(take_codes) categories = c.categories.take(take_codes) codes = recode_for_categories(c.codes, c.categories, categories) dtype = CategoricalDtype(categories, ordered=c.ordered) return Categorical._simple_new(codes, dtype=dtype) if sort: return c unique_notnan_codes = unique1d(c.codes[c.codes != -1]) if sort: unique_notnan_codes = np.sort(unique_notnan_codes) if (num_cat := len(c.categories)) > len(unique_notnan_codes): missing_codes = np.setdiff1d(np.arange(num_cat), unique_notnan_codes, assume_unique=True) take_codes = np.concatenate((unique_notnan_codes, missing_codes)) else: take_codes = unique_notnan_codes return Categorical(c, c.categories.take(take_codes)) # File: pandas-main/pandas/core/groupby/generic.py """""" from __future__ import annotations from collections import abc from collections.abc import Callable from functools import partial from textwrap import dedent from typing import TYPE_CHECKING, Any, Literal, NamedTuple, TypeVar, Union, cast import warnings import numpy as np from pandas._libs import Interval from pandas._libs.hashtable import duplicated from pandas.errors import SpecificationError from pandas.util._decorators import Appender, Substitution, doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ensure_int64, is_bool, is_dict_like, is_integer_dtype, is_list_like, is_numeric_dtype, is_scalar from pandas.core.dtypes.dtypes import CategoricalDtype, IntervalDtype from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, notna from pandas.core import algorithms from pandas.core.apply import GroupByApply, maybe_mangle_lambdas, reconstruct_func, validate_func_kwargs import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.groupby import base from pandas.core.groupby.groupby import GroupBy, GroupByPlot, _agg_template_frame, _agg_template_series, _transform_template from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same, default_index from pandas.core.series import Series from pandas.core.sorting import get_group_index from pandas.core.util.numba_ import maybe_use_numba from pandas.plotting import boxplot_frame_groupby if TYPE_CHECKING: from collections.abc import Hashable, Sequence from pandas._typing import ArrayLike, BlockManager, CorrelationMethod, IndexLabel, Manager, SingleBlockManager, TakeIndexer from pandas import Categorical from pandas.core.generic import NDFrame AggScalar = Union[str, Callable[..., Any]] ScalarResult = TypeVar('ScalarResult') class NamedAgg(NamedTuple): column: Hashable aggfunc: AggScalar class SeriesGroupBy(GroupBy[Series]): def _wrap_agged_manager(self, mgr: Manager) -> Series: out = self.obj._constructor_from_mgr(mgr, axes=mgr.axes) out._name = self.obj.name return out def _get_data_to_aggregate(self, *, numeric_only: bool=False, name: str | None=None) -> SingleBlockManager: ser = self._obj_with_exclusions single = ser._mgr if numeric_only and (not is_numeric_dtype(ser.dtype)): kwd_name = 'numeric_only' raise TypeError(f'Cannot use {kwd_name}=True with {type(self).__name__}.{name} and non-numeric dtypes.') return single _agg_examples_doc = dedent("\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).min()\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg('min')\n 1 1\n 2 3\n dtype: int64\n\n >>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])\n min max\n 1 1 2\n 2 3 4\n\n The output column names can be controlled by passing\n the desired column names and aggregations as keyword arguments.\n\n >>> s.groupby([1, 1, 2, 2]).agg(\n ... minimum='min',\n ... maximum='max',\n ... )\n minimum maximum\n 1 1 2\n 2 3 4\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the aggregating function.\n\n >>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())\n 1 1.0\n 2 3.0\n dtype: float64\n ") def apply(self, func, *args, **kwargs) -> Series: return super().apply(func, *args, **kwargs) @doc(_agg_template_series, examples=_agg_examples_doc, klass='Series') def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): relabeling = func is None columns = None if relabeling: (columns, func) = validate_func_kwargs(kwargs) kwargs = {} if isinstance(func, str): if maybe_use_numba(engine) and engine is not None: kwargs['engine'] = engine if engine_kwargs is not None: kwargs['engine_kwargs'] = engine_kwargs return getattr(self, func)(*args, **kwargs) elif isinstance(func, abc.Iterable): func = maybe_mangle_lambdas(func) kwargs['engine'] = engine kwargs['engine_kwargs'] = engine_kwargs ret = self._aggregate_multiple_funcs(func, *args, **kwargs) if relabeling: assert columns is not None ret.columns = columns if not self.as_index: ret = ret.reset_index() return ret else: if maybe_use_numba(engine): return self._aggregate_with_numba(func, *args, engine_kwargs=engine_kwargs, **kwargs) if self.ngroups == 0: obj = self._obj_with_exclusions return self.obj._constructor([], name=self.obj.name, index=self._grouper.result_index, dtype=obj.dtype) return self._python_agg_general(func, *args, **kwargs) agg = aggregate def _python_agg_general(self, func, *args, **kwargs): f = lambda x: func(x, *args, **kwargs) obj = self._obj_with_exclusions result = self._grouper.agg_series(obj, f) res = obj._constructor(result, name=obj.name) return self._wrap_aggregated_output(res) def _aggregate_multiple_funcs(self, arg, *args, **kwargs) -> DataFrame: if isinstance(arg, dict): raise SpecificationError('nested renamer is not supported') if any((isinstance(x, (tuple, list)) for x in arg)): arg = ((x, x) if not isinstance(x, (tuple, list)) else x for x in arg) else: columns = (com.get_callable_name(f) or f for f in arg) arg = zip(columns, arg) results: dict[base.OutputKey, DataFrame | Series] = {} with com.temp_setattr(self, 'as_index', True): for (idx, (name, func)) in enumerate(arg): key = base.OutputKey(label=name, position=idx) results[key] = self.aggregate(func, *args, **kwargs) if any((isinstance(x, DataFrame) for x in results.values())): from pandas import concat res_df = concat(results.values(), axis=1, keys=[key.label for key in results]) return res_df indexed_output = {key.position: val for (key, val) in results.items()} output = self.obj._constructor_expanddim(indexed_output, index=None) output.columns = Index((key.label for key in results)) return output def _wrap_applied_output(self, data: Series, values: list[Any], not_indexed_same: bool=False, is_transform: bool=False) -> DataFrame | Series: if len(values) == 0: if is_transform: res_index = data.index else: res_index = self._grouper.result_index return self.obj._constructor([], name=self.obj.name, index=res_index, dtype=data.dtype) assert values is not None if isinstance(values[0], dict): index = self._grouper.result_index res_df = self.obj._constructor_expanddim(values, index=index) res_ser = res_df.stack() res_ser.name = self.obj.name return res_ser elif isinstance(values[0], (Series, DataFrame)): result = self._concat_objects(values, not_indexed_same=not_indexed_same, is_transform=is_transform) if isinstance(result, Series): result.name = self.obj.name if not self.as_index and not_indexed_same: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result else: result = self.obj._constructor(data=values, index=self._grouper.result_index, name=self.obj.name) if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result __examples_series_doc = dedent('\n >>> ser = pd.Series([390.0, 350.0, 30.0, 20.0],\n ... index=["Falcon", "Falcon", "Parrot", "Parrot"],\n ... name="Max Speed")\n >>> grouped = ser.groupby([1, 1, 2, 2])\n >>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n Falcon 0.707107\n Falcon -0.707107\n Parrot 0.707107\n Parrot -0.707107\n Name: Max Speed, dtype: float64\n\n Broadcast result of the transformation\n\n >>> grouped.transform(lambda x: x.max() - x.min())\n Falcon 40.0\n Falcon 40.0\n Parrot 10.0\n Parrot 10.0\n Name: Max Speed, dtype: float64\n\n >>> grouped.transform("mean")\n Falcon 370.0\n Falcon 370.0\n Parrot 25.0\n Parrot 25.0\n Name: Max Speed, dtype: float64\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the passed ``func``,\n for example:\n\n >>> grouped.transform(lambda x: x.astype(int).max())\n Falcon 390\n Falcon 390\n Parrot 30\n Parrot 30\n Name: Max Speed, dtype: int64\n ') @Substitution(klass='Series', example=__examples_series_doc) @Appender(_transform_template) def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): return self._transform(func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs) def _cython_transform(self, how: str, numeric_only: bool=False, **kwargs): obj = self._obj_with_exclusions try: result = self._grouper._cython_operation('transform', obj._values, how, 0, **kwargs) except NotImplementedError as err: raise TypeError(f'{how} is not supported for {obj.dtype} dtype') from err return obj._constructor(result, index=self.obj.index, name=obj.name) def _transform_general(self, func: Callable, engine, engine_kwargs, *args, **kwargs) -> Series: if maybe_use_numba(engine): return self._transform_with_numba(func, *args, engine_kwargs=engine_kwargs, **kwargs) assert callable(func) klass = type(self.obj) results = [] for (name, group) in self._grouper.get_iterator(self._obj_with_exclusions): object.__setattr__(group, 'name', name) res = func(group, *args, **kwargs) results.append(klass(res, index=group.index)) if results: from pandas.core.reshape.concat import concat concatenated = concat(results, ignore_index=True) result = self._set_result_index_ordered(concatenated) else: result = self.obj._constructor(dtype=np.float64) result.name = self.obj.name return result def filter(self, func, dropna: bool=True, *args, **kwargs): if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) def true_and_notna(x) -> bool: b = wrapper(x) return notna(b) and b try: indices = [self._get_index(name) for (name, group) in self._grouper.get_iterator(self._obj_with_exclusions) if true_and_notna(group)] except (ValueError, TypeError) as err: raise TypeError('the filter must return a boolean result') from err filtered = self._apply_filter(indices, dropna) return filtered def nunique(self, dropna: bool=True) -> Series | DataFrame: ids = self._grouper.ids ngroups = self._grouper.ngroups val = self.obj._values (codes, uniques) = algorithms.factorize(val, use_na_sentinel=dropna, sort=False) if self._grouper.has_dropped_na: mask = ids >= 0 ids = ids[mask] codes = codes[mask] group_index = get_group_index(labels=[ids, codes], shape=(ngroups, len(uniques)), sort=False, xnull=dropna) if dropna: mask = group_index >= 0 if (~mask).any(): ids = ids[mask] group_index = group_index[mask] mask = duplicated(group_index, 'first') res = np.bincount(ids[~mask], minlength=ngroups) res = ensure_int64(res) ri = self._grouper.result_index result: Series | DataFrame = self.obj._constructor(res, index=ri, name=self.obj.name) if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result @doc(Series.describe) def describe(self, percentiles=None, include=None, exclude=None) -> Series: return super().describe(percentiles=percentiles, include=include, exclude=exclude) def value_counts(self, normalize: bool=False, sort: bool=True, ascending: bool=False, bins=None, dropna: bool=True) -> Series | DataFrame: name = 'proportion' if normalize else 'count' if bins is None: result = self._value_counts(normalize=normalize, sort=sort, ascending=ascending, dropna=dropna) result.name = name return result from pandas.core.reshape.merge import get_join_indexers from pandas.core.reshape.tile import cut ids = self._grouper.ids val = self.obj._values index_names = self._grouper.names + [self.obj.name] if isinstance(val.dtype, CategoricalDtype) or (bins is not None and (not np.iterable(bins))): ser = self.apply(Series.value_counts, normalize=normalize, sort=sort, ascending=ascending, bins=bins) ser.name = name ser.index.names = index_names return ser mask = ids != -1 (ids, val) = (ids[mask], val[mask]) lab: Index | np.ndarray if bins is None: (lab, lev) = algorithms.factorize(val, sort=True) llab = lambda lab, inc: lab[inc] else: cat_ser = cut(Series(val, copy=False), bins, include_lowest=True) cat_obj = cast('Categorical', cat_ser._values) lev = cat_obj.categories lab = lev.take(cat_obj.codes, allow_fill=True, fill_value=lev._na_value) llab = lambda lab, inc: lab[inc]._multiindex.codes[-1] if isinstance(lab.dtype, IntervalDtype): lab_interval = cast(Interval, lab) sorter = np.lexsort((lab_interval.left, lab_interval.right, ids)) else: sorter = np.lexsort((lab, ids)) (ids, lab) = (ids[sorter], lab[sorter]) idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0] idx = np.r_[0, idchanges] if not len(ids): idx = idchanges lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1)) inc = np.r_[True, lchanges] if not len(val): inc = lchanges inc[idx] = True out = np.diff(np.nonzero(np.r_[inc, True])[0]) rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx)) if isinstance(self._grouper.result_index, MultiIndex): codes = list(self._grouper.result_index.codes) else: codes = [algorithms.factorize(self._grouper.result_index, sort=self._grouper._sort, use_na_sentinel=self._grouper.dropna)[0]] codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)] levels = self._grouper.levels + [lev] if dropna: mask = codes[-1] != -1 if mask.all(): dropna = False else: (out, codes) = (out[mask], [level_codes[mask] for level_codes in codes]) if normalize: out = out.astype('float') d = np.diff(np.r_[idx, len(ids)]) if dropna: m = ids[lab == -1] np.add.at(d, m, -1) acc = rep(d)[mask] else: acc = rep(d) out /= acc if sort and bins is None: cat = ids[inc][mask] if dropna else ids[inc] sorter = np.lexsort((out if ascending else -out, cat)) (out, codes[-1]) = (out[sorter], codes[-1][sorter]) if bins is not None: diff = np.zeros(len(out), dtype='bool') for level_codes in codes[:-1]: diff |= np.r_[True, level_codes[1:] != level_codes[:-1]] (ncat, nbin) = (diff.sum(), len(levels[-1])) left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)] right = [diff.cumsum() - 1, codes[-1]] (_, idx) = get_join_indexers(left, right, sort=False, how='left') if idx is not None: out = np.where(idx != -1, out[idx], 0) if sort: sorter = np.lexsort((out if ascending else -out, left[0])) (out, left[-1]) = (out[sorter], left[-1][sorter]) def build_codes(lev_codes: np.ndarray) -> np.ndarray: return np.repeat(lev_codes[diff], nbin) codes = [build_codes(lev_codes) for lev_codes in codes[:-1]] codes.append(left[-1]) mi = MultiIndex(levels=levels, codes=codes, names=index_names, verify_integrity=False) if is_integer_dtype(out.dtype): out = ensure_int64(out) result = self.obj._constructor(out, index=mi, name=name) if not self.as_index: result = result.reset_index() return result def take(self, indices: TakeIndexer, **kwargs) -> Series: result = self._op_via_apply('take', indices=indices, **kwargs) return result def skew(self, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Series: def alt(obj): raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") return self._cython_agg_general('skew', alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs) @property @doc(Series.plot.__doc__) def plot(self) -> GroupByPlot: result = GroupByPlot(self) return result @doc(Series.nlargest.__doc__) def nlargest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series: f = partial(Series.nlargest, n=n, keep=keep) data = self._obj_with_exclusions result = self._python_apply_general(f, data, not_indexed_same=True) return result @doc(Series.nsmallest.__doc__) def nsmallest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series: f = partial(Series.nsmallest, n=n, keep=keep) data = self._obj_with_exclusions result = self._python_apply_general(f, data, not_indexed_same=True) return result def idxmin(self, skipna: bool=True) -> Series: return self._idxmax_idxmin('idxmin', skipna=skipna) def idxmax(self, skipna: bool=True) -> Series: return self._idxmax_idxmin('idxmax', skipna=skipna) @doc(Series.corr.__doc__) def corr(self, other: Series, method: CorrelationMethod='pearson', min_periods: int | None=None) -> Series: result = self._op_via_apply('corr', other=other, method=method, min_periods=min_periods) return result @doc(Series.cov.__doc__) def cov(self, other: Series, min_periods: int | None=None, ddof: int | None=1) -> Series: result = self._op_via_apply('cov', other=other, min_periods=min_periods, ddof=ddof) return result @property def is_monotonic_increasing(self) -> Series: return self.apply(lambda ser: ser.is_monotonic_increasing) @property def is_monotonic_decreasing(self) -> Series: return self.apply(lambda ser: ser.is_monotonic_decreasing) @doc(Series.hist.__doc__) def hist(self, by=None, ax=None, grid: bool=True, xlabelsize: int | None=None, xrot: float | None=None, ylabelsize: int | None=None, yrot: float | None=None, figsize: tuple[float, float] | None=None, bins: int | Sequence[int]=10, backend: str | None=None, legend: bool=False, **kwargs): result = self._op_via_apply('hist', by=by, ax=ax, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, figsize=figsize, bins=bins, backend=backend, legend=legend, **kwargs) return result @property @doc(Series.dtype.__doc__) def dtype(self) -> Series: return self.apply(lambda ser: ser.dtype) def unique(self) -> Series: result = self._op_via_apply('unique') return result class DataFrameGroupBy(GroupBy[DataFrame]): _agg_examples_doc = dedent('\n Examples\n --------\n >>> data = {"A": [1, 1, 2, 2],\n ... "B": [1, 2, 3, 4],\n ... "C": [0.362838, 0.227877, 1.267767, -0.562860]}\n >>> df = pd.DataFrame(data)\n >>> df\n A B C\n 0 1 1 0.362838\n 1 1 2 0.227877\n 2 2 3 1.267767\n 3 2 4 -0.562860\n\n The aggregation is for each column.\n\n >>> df.groupby(\'A\').agg(\'min\')\n B C\n A\n 1 1 0.227877\n 2 3 -0.562860\n\n Multiple aggregations\n\n >>> df.groupby(\'A\').agg([\'min\', \'max\'])\n B C\n min max min max\n A\n 1 1 2 0.227877 0.362838\n 2 3 4 -0.562860 1.267767\n\n Select a column for aggregation\n\n >>> df.groupby(\'A\').B.agg([\'min\', \'max\'])\n min max\n A\n 1 1 2\n 2 3 4\n\n User-defined function for aggregation\n\n >>> df.groupby(\'A\').agg(lambda x: sum(x) + 2)\n B\t C\n A\n 1\t5\t2.590715\n 2\t9\t2.704907\n\n Different aggregations per column\n\n >>> df.groupby(\'A\').agg({\'B\': [\'min\', \'max\'], \'C\': \'sum\'})\n B C\n min max sum\n A\n 1 1 2 0.590715\n 2 3 4 0.704907\n\n To control the output names with different aggregations per column,\n pandas supports "named aggregation"\n\n >>> df.groupby("A").agg(\n ... b_min=pd.NamedAgg(column="B", aggfunc="min"),\n ... c_sum=pd.NamedAgg(column="C", aggfunc="sum")\n ... )\n b_min c_sum\n A\n 1 1 0.590715\n 2 3 0.704907\n\n - The keywords are the *output* column names\n - The values are tuples whose first element is the column to select\n and the second element is the aggregation to apply to that column.\n Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields\n ``[\'column\', \'aggfunc\']`` to make it clearer what the arguments are.\n As usual, the aggregation can be a callable or a string alias.\n\n See :ref:`groupby.aggregate.named` for more.\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the aggregating function.\n\n >>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())\n B\n A\n 1 1.0\n 2 3.0\n ') @doc(_agg_template_frame, examples=_agg_examples_doc, klass='DataFrame') def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs): (relabeling, func, columns, order) = reconstruct_func(func, **kwargs) func = maybe_mangle_lambdas(func) if maybe_use_numba(engine): kwargs['engine'] = engine kwargs['engine_kwargs'] = engine_kwargs op = GroupByApply(self, func, args=args, kwargs=kwargs) result = op.agg() if not is_dict_like(func) and result is not None: if not self.as_index and is_list_like(func): return result.reset_index() else: return result elif relabeling: result = cast(DataFrame, result) result = result.iloc[:, order] result = cast(DataFrame, result) result.columns = columns if result is None: if 'engine' in kwargs: del kwargs['engine'] del kwargs['engine_kwargs'] if maybe_use_numba(engine): return self._aggregate_with_numba(func, *args, engine_kwargs=engine_kwargs, **kwargs) if self._grouper.nkeys > 1: return self._python_agg_general(func, *args, **kwargs) elif args or kwargs: result = self._aggregate_frame(func, *args, **kwargs) else: gba = GroupByApply(self, [func], args=(), kwargs={}) try: result = gba.agg() except ValueError as err: if 'No objects to concatenate' not in str(err): raise result = self._aggregate_frame(func) else: result = cast(DataFrame, result) result.columns = self._obj_with_exclusions.columns.copy() if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result agg = aggregate def _python_agg_general(self, func, *args, **kwargs): f = lambda x: func(x, *args, **kwargs) if self.ngroups == 0: return self._python_apply_general(f, self._selected_obj, is_agg=True) obj = self._obj_with_exclusions if not len(obj.columns): return self._python_apply_general(f, self._selected_obj) output: dict[int, ArrayLike] = {} for (idx, (name, ser)) in enumerate(obj.items()): result = self._grouper.agg_series(ser, f) output[idx] = result res = self.obj._constructor(output) res.columns = obj.columns.copy(deep=False) return self._wrap_aggregated_output(res) def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame: if self._grouper.nkeys != 1: raise AssertionError('Number of keys must be 1') obj = self._obj_with_exclusions result: dict[Hashable, NDFrame | np.ndarray] = {} for (name, grp_df) in self._grouper.get_iterator(obj): fres = func(grp_df, *args, **kwargs) result[name] = fres result_index = self._grouper.result_index out = self.obj._constructor(result, index=obj.columns, columns=result_index) out = out.T return out def _wrap_applied_output(self, data: DataFrame, values: list, not_indexed_same: bool=False, is_transform: bool=False): if len(values) == 0: if is_transform: res_index = data.index else: res_index = self._grouper.result_index result = self.obj._constructor(index=res_index, columns=data.columns) result = result.astype(data.dtypes) return result first_not_none = next(com.not_none(*values), None) if first_not_none is None: result = self.obj._constructor(columns=data.columns) result = result.astype(data.dtypes) return result elif isinstance(first_not_none, DataFrame): return self._concat_objects(values, not_indexed_same=not_indexed_same, is_transform=is_transform) key_index = self._grouper.result_index if self.as_index else None if isinstance(first_not_none, (np.ndarray, Index)): if not is_hashable(self._selection): name = tuple(self._selection) else: name = self._selection return self.obj._constructor_sliced(values, index=key_index, name=name) elif not isinstance(first_not_none, Series): if self.as_index: return self.obj._constructor_sliced(values, index=key_index) else: result = self.obj._constructor(values, columns=[self._selection]) result = self._insert_inaxis_grouper(result) return result else: return self._wrap_applied_output_series(values, not_indexed_same, first_not_none, key_index, is_transform) def _wrap_applied_output_series(self, values: list[Series], not_indexed_same: bool, first_not_none, key_index: Index | None, is_transform: bool) -> DataFrame | Series: kwargs = first_not_none._construct_axes_dict() backup = Series(**kwargs) values = [x if x is not None else backup for x in values] all_indexed_same = all_indexes_same((x.index for x in values)) if not all_indexed_same: return self._concat_objects(values, not_indexed_same=True, is_transform=is_transform) stacked_values = np.vstack([np.asarray(v) for v in values]) index = key_index columns = first_not_none.index.copy() if columns.name is None: names = {v.name for v in values} if len(names) == 1: columns.name = next(iter(names)) if stacked_values.dtype == object: stacked_values = stacked_values.tolist() result = self.obj._constructor(stacked_values, index=index, columns=columns) if not self.as_index: result = self._insert_inaxis_grouper(result) return result def _cython_transform(self, how: str, numeric_only: bool=False, **kwargs) -> DataFrame: mgr: BlockManager = self._get_data_to_aggregate(numeric_only=numeric_only, name=how) def arr_func(bvalues: ArrayLike) -> ArrayLike: return self._grouper._cython_operation('transform', bvalues, how, 1, **kwargs) res_mgr = mgr.apply(arr_func) res_df = self.obj._constructor_from_mgr(res_mgr, axes=res_mgr.axes) return res_df def _transform_general(self, func, engine, engine_kwargs, *args, **kwargs): if maybe_use_numba(engine): return self._transform_with_numba(func, *args, engine_kwargs=engine_kwargs, **kwargs) from pandas.core.reshape.concat import concat applied = [] obj = self._obj_with_exclusions gen = self._grouper.get_iterator(obj) (fast_path, slow_path) = self._define_paths(func, *args, **kwargs) try: (name, group) = next(gen) except StopIteration: pass else: object.__setattr__(group, 'name', name) try: (path, res) = self._choose_path(fast_path, slow_path, group) except ValueError as err: msg = 'transform must return a scalar value for each group' raise ValueError(msg) from err if group.size > 0: res = _wrap_transform_general_frame(self.obj, group, res) applied.append(res) for (name, group) in gen: if group.size == 0: continue object.__setattr__(group, 'name', name) res = path(group) res = _wrap_transform_general_frame(self.obj, group, res) applied.append(res) concat_index = obj.columns concatenated = concat(applied, axis=0, verify_integrity=False, ignore_index=True) concatenated = concatenated.reindex(concat_index, axis=1) return self._set_result_index_ordered(concatenated) __examples_dataframe_doc = dedent('\n >>> df = pd.DataFrame({\'A\' : [\'foo\', \'bar\', \'foo\', \'bar\',\n ... \'foo\', \'bar\'],\n ... \'B\' : [\'one\', \'one\', \'two\', \'three\',\n ... \'two\', \'two\'],\n ... \'C\' : [1, 5, 5, 2, 5, 5],\n ... \'D\' : [2.0, 5., 8., 1., 2., 9.]})\n >>> grouped = df.groupby(\'A\')[[\'C\', \'D\']]\n >>> grouped.transform(lambda x: (x - x.mean()) / x.std())\n C D\n 0 -1.154701 -0.577350\n 1 0.577350 0.000000\n 2 0.577350 1.154701\n 3 -1.154701 -1.000000\n 4 0.577350 -0.577350\n 5 0.577350 1.000000\n\n Broadcast result of the transformation\n\n >>> grouped.transform(lambda x: x.max() - x.min())\n C D\n 0 4.0 6.0\n 1 3.0 8.0\n 2 4.0 6.0\n 3 3.0 8.0\n 4 4.0 6.0\n 5 3.0 8.0\n\n >>> grouped.transform("mean")\n C D\n 0 3.666667 4.0\n 1 4.000000 5.0\n 2 3.666667 4.0\n 3 4.000000 5.0\n 4 3.666667 4.0\n 5 4.000000 5.0\n\n .. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the passed ``func``,\n for example:\n\n >>> grouped.transform(lambda x: x.astype(int).max())\n C D\n 0 5 8\n 1 5 9\n 2 5 8\n 3 5 9\n 4 5 8\n 5 5 9\n ') @Substitution(klass='DataFrame', example=__examples_dataframe_doc) @Appender(_transform_template) def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): return self._transform(func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs) def _define_paths(self, func, *args, **kwargs): if isinstance(func, str): fast_path = lambda group: getattr(group, func)(*args, **kwargs) slow_path = lambda group: group.apply(lambda x: getattr(x, func)(*args, **kwargs), axis=0) else: fast_path = lambda group: func(group, *args, **kwargs) slow_path = lambda group: group.apply(lambda x: func(x, *args, **kwargs), axis=0) return (fast_path, slow_path) def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame): path = slow_path res = slow_path(group) if self.ngroups == 1: return (path, res) try: res_fast = fast_path(group) except AssertionError: raise except Exception: return (path, res) if isinstance(res_fast, DataFrame): if not res_fast.columns.equals(group.columns): return (path, res) elif isinstance(res_fast, Series): if not res_fast.index.equals(group.columns): return (path, res) else: return (path, res) if res_fast.equals(res): path = fast_path return (path, res) def filter(self, func, dropna: bool=True, *args, **kwargs) -> DataFrame: indices = [] obj = self._selected_obj gen = self._grouper.get_iterator(obj) for (name, group) in gen: object.__setattr__(group, 'name', name) res = func(group, *args, **kwargs) try: res = res.squeeze() except AttributeError: pass if is_bool(res) or (is_scalar(res) and isna(res)): if notna(res) and res: indices.append(self._get_index(name)) else: raise TypeError(f'filter function returned a {type(res).__name__}, but expected a scalar bool') return self._apply_filter(indices, dropna) def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy: if isinstance(key, tuple) and len(key) > 1: raise ValueError('Cannot subset columns with a tuple with more than one element. Use a list instead.') return super().__getitem__(key) def _gotitem(self, key, ndim: int, subset=None): if ndim == 2: if subset is None: subset = self.obj return DataFrameGroupBy(subset, self.keys, level=self.level, grouper=self._grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna) elif ndim == 1: if subset is None: subset = self.obj[key] return SeriesGroupBy(subset, self.keys, level=self.level, grouper=self._grouper, exclusions=self.exclusions, selection=key, as_index=self.as_index, sort=self.sort, group_keys=self.group_keys, observed=self.observed, dropna=self.dropna) raise AssertionError('invalid ndim for _gotitem') def _get_data_to_aggregate(self, *, numeric_only: bool=False, name: str | None=None) -> BlockManager: obj = self._obj_with_exclusions mgr = obj._mgr if numeric_only: mgr = mgr.get_numeric_data() return mgr def _wrap_agged_manager(self, mgr: BlockManager) -> DataFrame: return self.obj._constructor_from_mgr(mgr, axes=mgr.axes) def _apply_to_column_groupbys(self, func) -> DataFrame: from pandas.core.reshape.concat import concat obj = self._obj_with_exclusions columns = obj.columns sgbs = (SeriesGroupBy(obj.iloc[:, i], selection=colname, grouper=self._grouper, exclusions=self.exclusions, observed=self.observed) for (i, colname) in enumerate(obj.columns)) results = [func(sgb) for sgb in sgbs] if not len(results): res_df = DataFrame([], columns=columns, index=self._grouper.result_index) else: res_df = concat(results, keys=columns, axis=1) if not self.as_index: res_df.index = default_index(len(res_df)) res_df = self._insert_inaxis_grouper(res_df) return res_df def nunique(self, dropna: bool=True) -> DataFrame: return self._apply_to_column_groupbys(lambda sgb: sgb.nunique(dropna)) def idxmax(self, skipna: bool=True, numeric_only: bool=False) -> DataFrame: return self._idxmax_idxmin('idxmax', numeric_only=numeric_only, skipna=skipna) def idxmin(self, skipna: bool=True, numeric_only: bool=False) -> DataFrame: return self._idxmax_idxmin('idxmin', numeric_only=numeric_only, skipna=skipna) boxplot = boxplot_frame_groupby def value_counts(self, subset: Sequence[Hashable] | None=None, normalize: bool=False, sort: bool=True, ascending: bool=False, dropna: bool=True) -> DataFrame | Series: return self._value_counts(subset, normalize, sort, ascending, dropna) def take(self, indices: TakeIndexer, **kwargs) -> DataFrame: result = self._op_via_apply('take', indices=indices, **kwargs) return result def skew(self, skipna: bool=True, numeric_only: bool=False, **kwargs) -> DataFrame: def alt(obj): raise TypeError(f"'skew' is not supported for dtype={obj.dtype}") return self._cython_agg_general('skew', alt=alt, skipna=skipna, numeric_only=numeric_only, **kwargs) @property @doc(DataFrame.plot.__doc__) def plot(self) -> GroupByPlot: result = GroupByPlot(self) return result @doc(DataFrame.corr.__doc__) def corr(self, method: str | Callable[[np.ndarray, np.ndarray], float]='pearson', min_periods: int=1, numeric_only: bool=False) -> DataFrame: result = self._op_via_apply('corr', method=method, min_periods=min_periods, numeric_only=numeric_only) return result @doc(DataFrame.cov.__doc__) def cov(self, min_periods: int | None=None, ddof: int | None=1, numeric_only: bool=False) -> DataFrame: result = self._op_via_apply('cov', min_periods=min_periods, ddof=ddof, numeric_only=numeric_only) return result def hist(self, column: IndexLabel | None=None, by=None, grid: bool=True, xlabelsize: int | None=None, xrot: float | None=None, ylabelsize: int | None=None, yrot: float | None=None, ax=None, sharex: bool=False, sharey: bool=False, figsize: tuple[float, float] | None=None, layout: tuple[int, int] | None=None, bins: int | Sequence[int]=10, backend: str | None=None, legend: bool=False, **kwargs): result = self._op_via_apply('hist', column=column, by=by, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout, bins=bins, backend=backend, legend=legend, **kwargs) return result def corrwith(self, other: DataFrame | Series, drop: bool=False, method: CorrelationMethod='pearson', numeric_only: bool=False) -> DataFrame: warnings.warn('DataFrameGroupBy.corrwith is deprecated', FutureWarning, stacklevel=find_stack_level()) result = self._op_via_apply('corrwith', other=other, drop=drop, method=method, numeric_only=numeric_only) return result def _wrap_transform_general_frame(obj: DataFrame, group: DataFrame, res: DataFrame | Series) -> DataFrame: from pandas import concat if isinstance(res, Series): if res.index.is_(obj.index): res_frame = concat([res] * len(group.columns), axis=1, ignore_index=True) res_frame.columns = group.columns res_frame.index = group.index else: res_frame = obj._constructor(np.tile(res.values, (len(group.index), 1)), columns=group.columns, index=group.index) assert isinstance(res_frame, DataFrame) return res_frame elif isinstance(res, DataFrame) and (not res.index.is_(group.index)): return res._align_frame(group)[0] else: return res # File: pandas-main/pandas/core/groupby/groupby.py """""" from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Iterator, Mapping, Sequence import datetime from functools import partial, wraps from textwrap import dedent from typing import TYPE_CHECKING, Literal, TypeVar, Union, cast, final, overload import warnings import numpy as np from pandas._libs import Timestamp, lib from pandas._libs.algos import rank_1d import pandas._libs.groupby as libgroupby from pandas._libs.missing import NA from pandas._typing import AnyArrayLike, ArrayLike, DtypeObj, IndexLabel, IntervalClosedType, NDFrameT, PositionalIndexer, RandomState, npt from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError, DataError from pandas.util._decorators import Appender, Substitution, cache_readonly, doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype, ensure_dtype_can_hold_na from pandas.core.dtypes.common import is_bool_dtype, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, needs_i8_conversion, pandas_dtype from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna from pandas.core import algorithms, sample from pandas.core._numba import executor from pandas.core.arrays import ArrowExtensionArray, BaseMaskedArray, ExtensionArray, FloatingArray, IntegerArray, SparseArray from pandas.core.arrays.string_ import StringDtype from pandas.core.arrays.string_arrow import ArrowStringArray, ArrowStringArrayNumpySemantics from pandas.core.base import PandasObject, SelectionMixin import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.generic import NDFrame from pandas.core.groupby import base, numba_, ops from pandas.core.groupby.grouper import get_grouper from pandas.core.groupby.indexing import GroupByIndexingMixin, GroupByNthSelector from pandas.core.indexes.api import Index, MultiIndex, default_index from pandas.core.internals.blocks import ensure_block_shape from pandas.core.series import Series from pandas.core.sorting import get_group_index_sorter from pandas.core.util.numba_ import get_jit_arguments, maybe_use_numba if TYPE_CHECKING: from pandas._libs.tslibs import BaseOffset from pandas._typing import Any, Concatenate, P, Self, T from pandas.core.indexers.objects import BaseIndexer from pandas.core.resample import Resampler from pandas.core.window import ExpandingGroupby, ExponentialMovingWindowGroupby, RollingGroupby _common_see_also = '\n See Also\n --------\n Series.%(name)s : Apply a function %(name)s to a Series.\n DataFrame.%(name)s : Apply a function %(name)s\n to each row or column of a DataFrame.\n' _groupby_agg_method_engine_template = "\nCompute {fname} of group values.\n\nParameters\n----------\nnumeric_only : bool, default {no}\n Include only float, int, boolean columns.\n\n .. versionchanged:: 2.0.0\n\n numeric_only no longer accepts ``None``.\n\nmin_count : int, default {mc}\n The required number of valid values to perform the operation. If fewer\n than ``min_count`` non-NA values are present the result will be NA.\n\nengine : str, default None {e}\n * ``'cython'`` : Runs rolling apply through C-extensions from cython.\n * ``'numba'`` : Runs rolling apply through JIT compiled code from numba.\n Only available when ``raw`` is set to ``True``.\n * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``\n\nengine_kwargs : dict, default None {ek}\n * For ``'cython'`` engine, there are no accepted ``engine_kwargs``\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be\n applied to both the ``func`` and the ``apply`` groupby aggregation.\n\nReturns\n-------\nSeries or DataFrame\n Computed {fname} of values within each group.\n\nSee Also\n--------\nSeriesGroupBy.min : Return the min of the group values.\nDataFrameGroupBy.min : Return the min of the group values.\nSeriesGroupBy.max : Return the max of the group values.\nDataFrameGroupBy.max : Return the max of the group values.\nSeriesGroupBy.sum : Return the sum of the group values.\nDataFrameGroupBy.sum : Return the sum of the group values.\n\nExamples\n--------\n{example}\n" _pipe_template = '\nApply a ``func`` with arguments to this %(klass)s object and return its result.\n\nUse `.pipe` when you want to improve readability by chaining together\nfunctions that expect Series, DataFrames, GroupBy or Resampler objects.\nInstead of writing\n\n>>> h = lambda x, arg2, arg3: x + 1 - arg2 * arg3\n>>> g = lambda x, arg1: x * 5 / arg1\n>>> f = lambda x: x ** 4\n>>> df = pd.DataFrame([["a", 4], ["b", 5]], columns=["group", "value"])\n>>> h(g(f(df.groupby(\'group\')), arg1=1), arg2=2, arg3=3) # doctest: +SKIP\n\nYou can write\n\n>>> (df.groupby(\'group\')\n... .pipe(f)\n... .pipe(g, arg1=1)\n... .pipe(h, arg2=2, arg3=3)) # doctest: +SKIP\n\nwhich is much more readable.\n\nParameters\n----------\nfunc : callable or tuple of (callable, str)\n Function to apply to this %(klass)s object or, alternatively,\n a `(callable, data_keyword)` tuple where `data_keyword` is a\n string indicating the keyword of `callable` that expects the\n %(klass)s object.\n*args : iterable, optional\n Positional arguments passed into `func`.\n**kwargs : dict, optional\n A dictionary of keyword arguments passed into `func`.\n\nReturns\n-------\n%(klass)s\n The original object with the function `func` applied.\n\nSee Also\n--------\nSeries.pipe : Apply a function with arguments to a series.\nDataFrame.pipe: Apply a function with arguments to a dataframe.\napply : Apply function to each group instead of to the\n full %(klass)s object.\n\nNotes\n-----\nSee more `here\n`_\n\nExamples\n--------\n%(examples)s\n' _transform_template = '\nCall function producing a same-indexed %(klass)s on each group.\n\nReturns a %(klass)s having the same indexes as the original object\nfilled with the transformed values.\n\nParameters\n----------\nfunc : function, str\n Function to apply to each group. See the Notes section below for requirements.\n\n Accepted inputs are:\n\n - String\n - Python function\n - Numba JIT function with ``engine=\'numba\'`` specified.\n\n Only passing a single function is supported with this engine.\n If the ``\'numba\'`` engine is chosen, the function must be\n a user defined function with ``values`` and ``index`` as the\n first and second arguments respectively in the function signature.\n Each group\'s index will be passed to the user defined function\n and optionally available for use.\n\n If a string is chosen, then it needs to be the name\n of the groupby method you want to use.\n*args\n Positional arguments to pass to func.\nengine : str, default None\n * ``\'cython\'`` : Runs the function through C-extensions from cython.\n * ``\'numba\'`` : Runs the function through JIT compiled code from numba.\n * ``None`` : Defaults to ``\'cython\'`` or the global setting ``compute.use_numba``\n\nengine_kwargs : dict, default None\n * For ``\'cython\'`` engine, there are no accepted ``engine_kwargs``\n * For ``\'numba\'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``\'numba\'`` engine is\n ``{\'nopython\': True, \'nogil\': False, \'parallel\': False}`` and will be\n applied to the function\n\n**kwargs\n Keyword arguments to be passed into func.\n\nReturns\n-------\n%(klass)s\n %(klass)s with the same indexes as the original object filled\n with transformed values.\n\nSee Also\n--------\n%(klass)s.groupby.apply : Apply function ``func`` group-wise and combine\n the results together.\n%(klass)s.groupby.aggregate : Aggregate using one or more operations.\n%(klass)s.transform : Call ``func`` on self producing a %(klass)s with the\n same axis shape as self.\n\nNotes\n-----\nEach group is endowed the attribute \'name\' in case you need to know\nwhich group you are working on.\n\nThe current implementation imposes three requirements on f:\n\n* f must return a value that either has the same shape as the input\n subframe or can be broadcast to the shape of the input subframe.\n For example, if `f` returns a scalar it will be broadcast to have the\n same shape as the input subframe.\n* if this is a DataFrame, f must support application column-by-column\n in the subframe. If f also supports application to the entire subframe,\n then a fast path is used starting from the second chunk.\n* f must not mutate groups. Mutation is not supported and may\n produce unexpected results. See :ref:`gotchas.udf-mutation` for more details.\n\nWhen using ``engine=\'numba\'``, there will be no "fall back" behavior internally.\nThe group data and group index will be passed as numpy arrays to the JITed\nuser defined function, and no alternative execution attempts will be tried.\n\n.. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the passed ``func``,\n see the examples below.\n\n.. versionchanged:: 2.0.0\n\n When using ``.transform`` on a grouped DataFrame and the transformation function\n returns a DataFrame, pandas now aligns the result\'s index\n with the input\'s index. You can call ``.to_numpy()`` on the\n result of the transformation function to avoid alignment.\n\nExamples\n--------\n%(example)s' _agg_template_series = '\nAggregate using one or more operations.\n\nParameters\n----------\nfunc : function, str, list, dict or None\n Function to use for aggregating the data. If a function, must either\n work when passed a {klass} or when passed to {klass}.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, \'mean\']``\n - None, in which case ``**kwargs`` are used with Named Aggregation. Here the\n output has one column for each element in ``**kwargs``. The name of the\n column is keyword, whereas the value determines the aggregation used to compute\n the values in the column.\n\n Can also accept a Numba JIT function with\n ``engine=\'numba\'`` specified. Only passing a single function is supported\n with this engine.\n\n If the ``\'numba\'`` engine is chosen, the function must be\n a user defined function with ``values`` and ``index`` as the\n first and second arguments respectively in the function signature.\n Each group\'s index will be passed to the user defined function\n and optionally available for use.\n\n .. deprecated:: 2.1.0\n\n Passing a dictionary is deprecated and will raise in a future version\n of pandas. Pass a list of aggregations instead.\n*args\n Positional arguments to pass to func.\nengine : str, default None\n * ``\'cython\'`` : Runs the function through C-extensions from cython.\n * ``\'numba\'`` : Runs the function through JIT compiled code from numba.\n * ``None`` : Defaults to ``\'cython\'`` or globally setting ``compute.use_numba``\n\nengine_kwargs : dict, default None\n * For ``\'cython\'`` engine, there are no accepted ``engine_kwargs``\n * For ``\'numba\'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``\'numba\'`` engine is\n ``{{\'nopython\': True, \'nogil\': False, \'parallel\': False}}`` and will be\n applied to the function\n\n**kwargs\n * If ``func`` is None, ``**kwargs`` are used to define the output names and\n aggregations via Named Aggregation. See ``func`` entry.\n * Otherwise, keyword arguments to be passed into func.\n\nReturns\n-------\n{klass}\n\nSee Also\n--------\n{klass}GroupBy.apply : Apply function func group-wise\n and combine the results together.\n{klass}GroupBy.transform : Transforms the Series on each group\n based on the given function.\n{klass}.aggregate : Aggregate using one or more operations.\n\nNotes\n-----\nWhen using ``engine=\'numba\'``, there will be no "fall back" behavior internally.\nThe group data and group index will be passed as numpy arrays to the JITed\nuser defined function, and no alternative execution attempts will be tried.\n\nFunctions that mutate the passed object can produce unexpected\nbehavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\nfor more details.\n\n.. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the passed ``func``,\n see the examples below.\n{examples}' _agg_template_frame = '\nAggregate using one or more operations.\n\nParameters\n----------\nfunc : function, str, list, dict or None\n Function to use for aggregating the data. If a function, must either\n work when passed a {klass} or when passed to {klass}.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, \'mean\']``\n - dict of index labels -> functions, function names or list of such.\n - None, in which case ``**kwargs`` are used with Named Aggregation. Here the\n output has one column for each element in ``**kwargs``. The name of the\n column is keyword, whereas the value determines the aggregation used to compute\n the values in the column.\n\n Can also accept a Numba JIT function with\n ``engine=\'numba\'`` specified. Only passing a single function is supported\n with this engine.\n\n If the ``\'numba\'`` engine is chosen, the function must be\n a user defined function with ``values`` and ``index`` as the\n first and second arguments respectively in the function signature.\n Each group\'s index will be passed to the user defined function\n and optionally available for use.\n\n*args\n Positional arguments to pass to func.\nengine : str, default None\n * ``\'cython\'`` : Runs the function through C-extensions from cython.\n * ``\'numba\'`` : Runs the function through JIT compiled code from numba.\n * ``None`` : Defaults to ``\'cython\'`` or globally setting ``compute.use_numba``\n\nengine_kwargs : dict, default None\n * For ``\'cython\'`` engine, there are no accepted ``engine_kwargs``\n * For ``\'numba\'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``\'numba\'`` engine is\n ``{{\'nopython\': True, \'nogil\': False, \'parallel\': False}}`` and will be\n applied to the function\n\n**kwargs\n * If ``func`` is None, ``**kwargs`` are used to define the output names and\n aggregations via Named Aggregation. See ``func`` entry.\n * Otherwise, keyword arguments to be passed into func.\n\nReturns\n-------\n{klass}\n\nSee Also\n--------\n{klass}.groupby.apply : Apply function func group-wise\n and combine the results together.\n{klass}.groupby.transform : Transforms the Series on each group\n based on the given function.\n{klass}.aggregate : Aggregate using one or more operations.\n\nNotes\n-----\nWhen using ``engine=\'numba\'``, there will be no "fall back" behavior internally.\nThe group data and group index will be passed as numpy arrays to the JITed\nuser defined function, and no alternative execution attempts will be tried.\n\nFunctions that mutate the passed object can produce unexpected\nbehavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\nfor more details.\n\n.. versionchanged:: 1.3.0\n\n The resulting dtype will reflect the return value of the passed ``func``,\n see the examples below.\n{examples}' @final class GroupByPlot(PandasObject): def __init__(self, groupby: GroupBy) -> None: self._groupby = groupby def __call__(self, *args, **kwargs): def f(self): return self.plot(*args, **kwargs) f.__name__ = 'plot' return self._groupby._python_apply_general(f, self._groupby._selected_obj) def __getattr__(self, name: str): def attr(*args, **kwargs): def f(self): return getattr(self.plot, name)(*args, **kwargs) return self._groupby._python_apply_general(f, self._groupby._selected_obj) return attr _KeysArgType = Union[Hashable, list[Hashable], Callable[[Hashable], Hashable], list[Callable[[Hashable], Hashable]], Mapping[Hashable, Hashable]] class BaseGroupBy(PandasObject, SelectionMixin[NDFrameT], GroupByIndexingMixin): _hidden_attrs = PandasObject._hidden_attrs | {'as_index', 'dropna', 'exclusions', 'grouper', 'group_keys', 'keys', 'level', 'obj', 'observed', 'sort'} _grouper: ops.BaseGrouper keys: _KeysArgType | None = None level: IndexLabel | None = None group_keys: bool @final def __len__(self) -> int: return self._grouper.ngroups @final def __repr__(self) -> str: return object.__repr__(self) @final @property def groups(self) -> dict[Hashable, Index]: if isinstance(self.keys, list) and len(self.keys) == 1: warnings.warn("`groups` by one element list returns scalar is deprecated and will be removed. In a future version `groups` by one element list will return tuple. Use ``df.groupby(by='a').groups`` instead of ``df.groupby(by=['a']).groups`` to avoid this warning", FutureWarning, stacklevel=find_stack_level()) return self._grouper.groups @final @property def ngroups(self) -> int: return self._grouper.ngroups @final @property def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: return self._grouper.indices @final def _get_indices(self, names): def get_converter(s): if isinstance(s, datetime.datetime): return lambda key: Timestamp(key) elif isinstance(s, np.datetime64): return lambda key: Timestamp(key).asm8 else: return lambda key: key if len(names) == 0: return [] if len(self.indices) > 0: index_sample = next(iter(self.indices)) else: index_sample = None name_sample = names[0] if isinstance(index_sample, tuple): if not isinstance(name_sample, tuple): msg = 'must supply a tuple to get_group with multiple grouping keys' raise ValueError(msg) if not len(name_sample) == len(index_sample): try: return [self.indices[name] for name in names] except KeyError as err: msg = 'must supply a same-length tuple to get_group with multiple grouping keys' raise ValueError(msg) from err converters = (get_converter(s) for s in index_sample) names = (tuple((f(n) for (f, n) in zip(converters, name))) for name in names) else: converter = get_converter(index_sample) names = (converter(name) for name in names) return [self.indices.get(name, []) for name in names] @final def _get_index(self, name): return self._get_indices([name])[0] @final @cache_readonly def _selected_obj(self): if isinstance(self.obj, Series): return self.obj if self._selection is not None: if is_hashable(self._selection): return self.obj[self._selection] return self._obj_with_exclusions return self.obj @final def _dir_additions(self) -> set[str]: return self.obj._dir_additions() @overload def pipe(self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs) -> T: ... @overload def pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: ... @Substitution(klass='GroupBy', examples=dedent(" >>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})\n >>> df\n A B\n 0 a 1\n 1 b 2\n 2 a 3\n 3 b 4\n\n To get the difference between each groups maximum and minimum value in one\n pass, you can do\n\n >>> df.groupby('A').pipe(lambda x: x.max() - x.min())\n B\n A\n a 2\n b 2")) @Appender(_pipe_template) def pipe(self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: return com.pipe(self, func, *args, **kwargs) @final def get_group(self, name) -> DataFrame | Series: keys = self.keys level = self.level if is_list_like(level) and len(level) == 1 or (is_list_like(keys) and len(keys) == 1): if isinstance(name, tuple) and len(name) == 1: name = name[0] else: raise KeyError(name) inds = self._get_index(name) if not len(inds): raise KeyError(name) return self._selected_obj.iloc[inds] @final def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]: keys = self.keys level = self.level result = self._grouper.get_iterator(self._selected_obj) if is_list_like(level) and len(level) == 1 or (isinstance(keys, list) and len(keys) == 1): result = (((key,), group) for (key, group) in result) return result OutputFrameOrSeries = TypeVar('OutputFrameOrSeries', bound=NDFrame) class GroupBy(BaseGroupBy[NDFrameT]): _grouper: ops.BaseGrouper as_index: bool @final def __init__(self, obj: NDFrameT, keys: _KeysArgType | None=None, level: IndexLabel | None=None, grouper: ops.BaseGrouper | None=None, exclusions: frozenset[Hashable] | None=None, selection: IndexLabel | None=None, as_index: bool=True, sort: bool=True, group_keys: bool=True, observed: bool=False, dropna: bool=True) -> None: self._selection = selection assert isinstance(obj, NDFrame), type(obj) self.level = level self.as_index = as_index self.keys = keys self.sort = sort self.group_keys = group_keys self.dropna = dropna if grouper is None: (grouper, exclusions, obj) = get_grouper(obj, keys, level=level, sort=sort, observed=observed, dropna=self.dropna) self.observed = observed self.obj = obj self._grouper = grouper self.exclusions = frozenset(exclusions) if exclusions else frozenset() def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self.obj: return self[attr] raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") @final def _op_via_apply(self, name: str, *args, **kwargs): f = getattr(type(self._obj_with_exclusions), name) def curried(x): return f(x, *args, **kwargs) curried.__name__ = name if name in base.plotting_methods: return self._python_apply_general(curried, self._selected_obj) is_transform = name in base.transformation_kernels result = self._python_apply_general(curried, self._obj_with_exclusions, is_transform=is_transform, not_indexed_same=not is_transform) if self._grouper.has_dropped_na and is_transform: result = self._set_result_index_ordered(result) return result @final def _concat_objects(self, values, not_indexed_same: bool=False, is_transform: bool=False): from pandas.core.reshape.concat import concat if self.group_keys and (not is_transform): if self.as_index: group_keys = self._grouper.result_index group_levels = self._grouper.levels group_names = self._grouper.names result = concat(values, axis=0, keys=group_keys, levels=group_levels, names=group_names, sort=False) else: result = concat(values, axis=0) elif not not_indexed_same: result = concat(values, axis=0) ax = self._selected_obj.index if self.dropna: labels = self._grouper.ids mask = labels != -1 ax = ax[mask] if ax.has_duplicates and (not result.axes[0].equals(ax)): target = algorithms.unique1d(ax._values) (indexer, _) = result.index.get_indexer_non_unique(target) result = result.take(indexer, axis=0) else: result = result.reindex(ax, axis=0) else: result = concat(values, axis=0) if self.obj.ndim == 1: name = self.obj.name elif is_hashable(self._selection): name = self._selection else: name = None if isinstance(result, Series) and name is not None: result.name = name return result @final def _set_result_index_ordered(self, result: OutputFrameOrSeries) -> OutputFrameOrSeries: index = self.obj.index if self._grouper.is_monotonic and (not self._grouper.has_dropped_na): result = result.set_axis(index, axis=0) return result original_positions = Index(self._grouper.result_ilocs) result = result.set_axis(original_positions, axis=0) result = result.sort_index(axis=0) if self._grouper.has_dropped_na: result = result.reindex(default_index(len(index)), axis=0) result = result.set_axis(index, axis=0) return result @final def _insert_inaxis_grouper(self, result: Series | DataFrame, qs: npt.NDArray[np.float64] | None=None) -> DataFrame: if isinstance(result, Series): result = result.to_frame() n_groupings = len(self._grouper.groupings) if qs is not None: result.insert(0, f'level_{n_groupings}', np.tile(qs, len(result) // len(qs))) for (level, (name, lev)) in enumerate(zip(reversed(self._grouper.names), self._grouper.get_group_levels())): if name is None: name = 'index' if n_groupings == 1 and qs is None else f'level_{n_groupings - level - 1}' if name not in result.columns: if qs is None: result.insert(0, name, lev) else: result.insert(0, name, Index(np.repeat(lev, len(qs)))) return result @final def _wrap_aggregated_output(self, result: Series | DataFrame, qs: npt.NDArray[np.float64] | None=None): if not self.as_index: result = self._insert_inaxis_grouper(result, qs=qs) result = result._consolidate() result.index = default_index(len(result)) else: index = self._grouper.result_index if qs is not None: index = _insert_quantile_level(index, qs) result.index = index return result def _wrap_applied_output(self, data, values: list, not_indexed_same: bool=False, is_transform: bool=False): raise AbstractMethodError(self) @final def _numba_prep(self, data: DataFrame): ngroups = self._grouper.ngroups sorted_index = self._grouper.result_ilocs sorted_ids = self._grouper._sorted_ids sorted_data = data.take(sorted_index, axis=0).to_numpy() index_data = data.index if isinstance(index_data, MultiIndex): if len(self._grouper.groupings) > 1: raise NotImplementedError("Grouping with more than 1 grouping labels and a MultiIndex is not supported with engine='numba'") group_key = self._grouper.groupings[0].name index_data = index_data.get_level_values(group_key) sorted_index_data = index_data.take(sorted_index).to_numpy() (starts, ends) = lib.generate_slices(sorted_ids, ngroups) return (starts, ends, sorted_index_data, sorted_data) def _numba_agg_general(self, func: Callable, dtype_mapping: dict[np.dtype, Any], engine_kwargs: dict[str, bool] | None, **aggregator_kwargs): if not self.as_index: raise NotImplementedError('as_index=False is not supported. Use .reset_index() instead.') data = self._obj_with_exclusions df = data if data.ndim == 2 else data.to_frame() aggregator = executor.generate_shared_aggregator(func, dtype_mapping, True, **get_jit_arguments(engine_kwargs)) ids = self._grouper.ids ngroups = self._grouper.ngroups res_mgr = df._mgr.apply(aggregator, labels=ids, ngroups=ngroups, **aggregator_kwargs) res_mgr.axes[1] = self._grouper.result_index result = df._constructor_from_mgr(res_mgr, axes=res_mgr.axes) if data.ndim == 1: result = result.squeeze('columns') result.name = data.name else: result.columns = data.columns return result @final def _transform_with_numba(self, func, *args, engine_kwargs=None, **kwargs): data = self._obj_with_exclusions index_sorting = self._grouper.result_ilocs df = data if data.ndim == 2 else data.to_frame() (starts, ends, sorted_index, sorted_data) = self._numba_prep(df) numba_.validate_udf(func) numba_transform_func = numba_.generate_numba_transform_func(func, **get_jit_arguments(engine_kwargs, kwargs)) result = numba_transform_func(sorted_data, sorted_index, starts, ends, len(df.columns), *args) result = result.take(np.argsort(index_sorting), axis=0) index = data.index if data.ndim == 1: result_kwargs = {'name': data.name} result = result.ravel() else: result_kwargs = {'columns': data.columns} return data._constructor(result, index=index, **result_kwargs) @final def _aggregate_with_numba(self, func, *args, engine_kwargs=None, **kwargs): data = self._obj_with_exclusions df = data if data.ndim == 2 else data.to_frame() (starts, ends, sorted_index, sorted_data) = self._numba_prep(df) numba_.validate_udf(func) numba_agg_func = numba_.generate_numba_agg_func(func, **get_jit_arguments(engine_kwargs, kwargs)) result = numba_agg_func(sorted_data, sorted_index, starts, ends, len(df.columns), *args) index = self._grouper.result_index if data.ndim == 1: result_kwargs = {'name': data.name} result = result.ravel() else: result_kwargs = {'columns': data.columns} res = data._constructor(result, index=index, **result_kwargs) if not self.as_index: res = self._insert_inaxis_grouper(res) res.index = default_index(len(res)) return res def apply(self, func, *args, include_groups: bool=True, **kwargs) -> NDFrameT: if isinstance(func, str): if hasattr(self, func): res = getattr(self, func) if callable(res): return res(*args, **kwargs) elif args or kwargs: raise ValueError(f'Cannot pass arguments to property {func}') return res else: raise TypeError(f"apply func should be callable, not '{func}'") elif args or kwargs: if callable(func): @wraps(func) def f(g): return func(g, *args, **kwargs) else: raise ValueError('func must be a callable if args or kwargs are supplied') else: f = func if not include_groups: return self._python_apply_general(f, self._obj_with_exclusions) try: result = self._python_apply_general(f, self._selected_obj) if not isinstance(self.obj, Series) and self._selection is None and (self._selected_obj.shape != self._obj_with_exclusions.shape): warnings.warn(message=_apply_groupings_depr.format(type(self).__name__, 'apply'), category=DeprecationWarning, stacklevel=find_stack_level()) except TypeError: return self._python_apply_general(f, self._obj_with_exclusions) return result @final def _python_apply_general(self, f: Callable, data: DataFrame | Series, not_indexed_same: bool | None=None, is_transform: bool=False, is_agg: bool=False) -> NDFrameT: (values, mutated) = self._grouper.apply_groupwise(f, data) if not_indexed_same is None: not_indexed_same = mutated return self._wrap_applied_output(data, values, not_indexed_same, is_transform) @final def _agg_general(self, numeric_only: bool=False, min_count: int=-1, *, alias: str, npfunc: Callable | None=None, **kwargs): result = self._cython_agg_general(how=alias, alt=npfunc, numeric_only=numeric_only, min_count=min_count, **kwargs) return result.__finalize__(self.obj, method='groupby') def _agg_py_fallback(self, how: str, values: ArrayLike, ndim: int, alt: Callable) -> ArrayLike: assert alt is not None if values.ndim == 1: ser = Series(values, copy=False) else: df = DataFrame(values.T, dtype=values.dtype) assert df.shape[1] == 1 ser = df.iloc[:, 0] try: res_values = self._grouper.agg_series(ser, alt, preserve_dtype=True) except Exception as err: msg = f'agg function failed [how->{how},dtype->{ser.dtype}]' raise type(err)(msg) from err if ser.dtype == object: res_values = res_values.astype(object, copy=False) return ensure_block_shape(res_values, ndim=ndim) @final def _cython_agg_general(self, how: str, alt: Callable | None=None, numeric_only: bool=False, min_count: int=-1, **kwargs): data = self._get_data_to_aggregate(numeric_only=numeric_only, name=how) def array_func(values: ArrayLike) -> ArrayLike: try: result = self._grouper._cython_operation('aggregate', values, how, axis=data.ndim - 1, min_count=min_count, **kwargs) except NotImplementedError: if how in ['any', 'all'] and isinstance(values, SparseArray): pass elif alt is None or how in ['any', 'all', 'std', 'sem']: raise else: return result assert alt is not None result = self._agg_py_fallback(how, values, ndim=data.ndim, alt=alt) return result new_mgr = data.grouped_reduce(array_func) res = self._wrap_agged_manager(new_mgr) if how in ['idxmin', 'idxmax']: res = self._wrap_idxmax_idxmin(res) out = self._wrap_aggregated_output(res) return out def _cython_transform(self, how: str, numeric_only: bool=False, **kwargs): raise AbstractMethodError(self) @final def _transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): if not isinstance(func, str): return self._transform_general(func, engine, engine_kwargs, *args, **kwargs) elif func not in base.transform_kernel_allowlist: msg = f"'{func}' is not a valid function name for transform(name)" raise ValueError(msg) elif func in base.cythonized_kernels or func in base.transformation_kernels: if engine is not None: kwargs['engine'] = engine kwargs['engine_kwargs'] = engine_kwargs return getattr(self, func)(*args, **kwargs) else: if self.observed: return self._reduction_kernel_transform(func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs) with com.temp_setattr(self, 'observed', True), com.temp_setattr(self, '_grouper', self._grouper.observed_grouper): return self._reduction_kernel_transform(func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs) @final def _reduction_kernel_transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs): with com.temp_setattr(self, 'as_index', True): if func in ['idxmin', 'idxmax']: func = cast(Literal['idxmin', 'idxmax'], func) result = self._idxmax_idxmin(func, True, *args, **kwargs) else: if engine is not None: kwargs['engine'] = engine kwargs['engine_kwargs'] = engine_kwargs result = getattr(self, func)(*args, **kwargs) return self._wrap_transform_fast_result(result) @final def _wrap_transform_fast_result(self, result: NDFrameT) -> NDFrameT: obj = self._obj_with_exclusions ids = self._grouper.ids result = result.reindex(self._grouper.result_index, axis=0) if self.obj.ndim == 1: out = algorithms.take_nd(result._values, ids) output = obj._constructor(out, index=obj.index, name=obj.name) else: new_ax = result.index.take(ids) output = result._reindex_with_indexers({0: (new_ax, ids)}, allow_dups=True) output = output.set_axis(obj.index, axis=0) return output @final def _apply_filter(self, indices, dropna): if len(indices) == 0: indices = np.array([], dtype='int64') else: indices = np.sort(np.concatenate(indices)) if dropna: filtered = self._selected_obj.take(indices, axis=0) else: mask = np.empty(len(self._selected_obj.index), dtype=bool) mask.fill(False) mask[indices.astype(int)] = True mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T filtered = self._selected_obj.where(mask) return filtered @final def _cumcount_array(self, ascending: bool=True) -> np.ndarray: ids = self._grouper.ids ngroups = self._grouper.ngroups sorter = get_group_index_sorter(ids, ngroups) (ids, count) = (ids[sorter], len(ids)) if count == 0: return np.empty(0, dtype=np.int64) run = np.r_[True, ids[:-1] != ids[1:]] rep = np.diff(np.r_[np.nonzero(run)[0], count]) out = (~run).cumsum() if ascending: out -= np.repeat(out[run], rep) else: out = np.repeat(out[np.r_[run[1:], True]], rep) - out if self._grouper.has_dropped_na: out = np.where(ids == -1, np.nan, out.astype(np.float64, copy=False)) else: out = out.astype(np.int64, copy=False) rev = np.empty(count, dtype=np.intp) rev[sorter] = np.arange(count, dtype=np.intp) return out[rev] @final @property def _obj_1d_constructor(self) -> Callable: if isinstance(self.obj, DataFrame): return self.obj._constructor_sliced assert isinstance(self.obj, Series) return self.obj._constructor @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def any(self, skipna: bool=True) -> NDFrameT: return self._cython_agg_general('any', alt=lambda x: Series(x, copy=False).any(skipna=skipna), skipna=skipna) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def all(self, skipna: bool=True) -> NDFrameT: return self._cython_agg_general('all', alt=lambda x: Series(x, copy=False).all(skipna=skipna), skipna=skipna) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def count(self) -> NDFrameT: data = self._get_data_to_aggregate() ids = self._grouper.ids ngroups = self._grouper.ngroups mask = ids != -1 is_series = data.ndim == 1 def hfunc(bvalues: ArrayLike) -> ArrayLike: if bvalues.ndim == 1: masked = mask & ~isna(bvalues).reshape(1, -1) else: masked = mask & ~isna(bvalues) counted = lib.count_level_2d(masked, labels=ids, max_bin=ngroups) if isinstance(bvalues, BaseMaskedArray): return IntegerArray(counted[0], mask=np.zeros(counted.shape[1], dtype=np.bool_)) elif isinstance(bvalues, ArrowExtensionArray) and (not isinstance(bvalues.dtype, StringDtype)): dtype = pandas_dtype('int64[pyarrow]') return type(bvalues)._from_sequence(counted[0], dtype=dtype) if is_series: assert counted.ndim == 2 assert counted.shape[0] == 1 return counted[0] return counted new_mgr = data.grouped_reduce(hfunc) new_obj = self._wrap_agged_manager(new_mgr) result = self._wrap_aggregated_output(new_obj) return result @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def mean(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): from pandas.core._numba.kernels import grouped_mean return self._numba_agg_general(grouped_mean, executor.float_dtype_mapping, engine_kwargs, min_periods=0) else: result = self._cython_agg_general('mean', alt=lambda x: Series(x, copy=False).mean(numeric_only=numeric_only), numeric_only=numeric_only) return result.__finalize__(self.obj, method='groupby') @final def median(self, numeric_only: bool=False) -> NDFrameT: result = self._cython_agg_general('median', alt=lambda x: Series(x, copy=False).median(numeric_only=numeric_only), numeric_only=numeric_only) return result.__finalize__(self.obj, method='groupby') @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def std(self, ddof: int=1, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None, numeric_only: bool=False): if maybe_use_numba(engine): from pandas.core._numba.kernels import grouped_var return np.sqrt(self._numba_agg_general(grouped_var, executor.float_dtype_mapping, engine_kwargs, min_periods=0, ddof=ddof)) else: return self._cython_agg_general('std', alt=lambda x: Series(x, copy=False).std(ddof=ddof), numeric_only=numeric_only, ddof=ddof) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def var(self, ddof: int=1, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None, numeric_only: bool=False): if maybe_use_numba(engine): from pandas.core._numba.kernels import grouped_var return self._numba_agg_general(grouped_var, executor.float_dtype_mapping, engine_kwargs, min_periods=0, ddof=ddof) else: return self._cython_agg_general('var', alt=lambda x: Series(x, copy=False).var(ddof=ddof), numeric_only=numeric_only, ddof=ddof) @final def _value_counts(self, subset: Sequence[Hashable] | None=None, normalize: bool=False, sort: bool=True, ascending: bool=False, dropna: bool=True) -> DataFrame | Series: name = 'proportion' if normalize else 'count' df = self.obj obj = self._obj_with_exclusions in_axis_names = {grouping.name for grouping in self._grouper.groupings if grouping.in_axis} if isinstance(obj, Series): _name = obj.name keys: Iterable[Series] = [] if _name in in_axis_names else [obj] else: unique_cols = set(obj.columns) if subset is not None: subsetted = set(subset) clashing = subsetted & set(in_axis_names) if clashing: raise ValueError(f'Keys {clashing} in subset cannot be in the groupby column keys.') doesnt_exist = subsetted - unique_cols if doesnt_exist: raise ValueError(f'Keys {doesnt_exist} in subset do not exist in the DataFrame.') else: subsetted = unique_cols keys = (obj.iloc[:, idx] for (idx, _name) in enumerate(obj.columns) if _name not in in_axis_names and _name in subsetted) groupings = list(self._grouper.groupings) for key in keys: (grouper, _, _) = get_grouper(df, key=key, sort=self.sort, observed=False, dropna=dropna) groupings += list(grouper.groupings) gb = df.groupby(groupings, sort=self.sort, observed=self.observed, dropna=self.dropna) result_series = cast(Series, gb.size()) result_series.name = name if sort: result_series = result_series.sort_values(ascending=ascending, kind='stable') if self.sort: names = result_series.index.names result_series.index.names = range(len(names)) index_level = range(len(self._grouper.groupings)) result_series = result_series.sort_index(level=index_level, sort_remaining=False) result_series.index.names = names if normalize: levels = list(range(len(self._grouper.groupings), result_series.index.nlevels)) indexed_group_size = result_series.groupby(result_series.index.droplevel(levels), sort=self.sort, dropna=self.dropna, observed=False).transform('sum') result_series /= indexed_group_size result_series = result_series.fillna(0.0) result: Series | DataFrame if self.as_index: result = result_series else: index = result_series.index columns = com.fill_missing_names(index.names) if name in columns: raise ValueError(f"Column label '{name}' is duplicate of result column") result_series.name = name result_series.index = index.set_names(range(len(columns))) result_frame = result_series.reset_index() orig_dtype = self._grouper.groupings[0].obj.columns.dtype cols = Index(columns, dtype=orig_dtype).insert(len(columns), name) result_frame.columns = cols result = result_frame return result.__finalize__(self.obj, method='value_counts') @final def sem(self, ddof: int=1, numeric_only: bool=False) -> NDFrameT: if numeric_only and self.obj.ndim == 1 and (not is_numeric_dtype(self.obj.dtype)): raise TypeError(f'{type(self).__name__}.sem called with numeric_only={numeric_only} and dtype {self.obj.dtype}') return self._cython_agg_general('sem', alt=lambda x: Series(x, copy=False).sem(ddof=ddof), numeric_only=numeric_only, ddof=ddof) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def size(self) -> DataFrame | Series: result = self._grouper.size() dtype_backend: None | Literal['pyarrow', 'numpy_nullable'] = None if isinstance(self.obj, Series): if isinstance(self.obj.array, ArrowExtensionArray): if isinstance(self.obj.array, ArrowStringArrayNumpySemantics): dtype_backend = None elif isinstance(self.obj.array, ArrowStringArray): dtype_backend = 'numpy_nullable' else: dtype_backend = 'pyarrow' elif isinstance(self.obj.array, BaseMaskedArray): dtype_backend = 'numpy_nullable' if isinstance(self.obj, Series): result = self._obj_1d_constructor(result, name=self.obj.name) else: result = self._obj_1d_constructor(result) if dtype_backend is not None: result = result.convert_dtypes(infer_objects=False, convert_string=False, convert_boolean=False, convert_floating=False, dtype_backend=dtype_backend) if not self.as_index: result = result.rename('size').reset_index() return result @final @doc(_groupby_agg_method_engine_template, fname='sum', no=False, mc=0, e=None, ek=None, example=dedent(' For SeriesGroupBy:\n\n >>> lst = [\'a\', \'a\', \'b\', \'b\']\n >>> ser = pd.Series([1, 2, 3, 4], index=lst)\n >>> ser\n a 1\n a 2\n b 3\n b 4\n dtype: int64\n >>> ser.groupby(level=0).sum()\n a 3\n b 7\n dtype: int64\n\n For DataFrameGroupBy:\n\n >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]\n >>> df = pd.DataFrame(data, columns=["a", "b", "c"],\n ... index=["tiger", "leopard", "cheetah", "lion"])\n >>> df\n a b c\n tiger 1 8 2\n leopard 1 2 5\n cheetah 2 5 8\n lion 2 6 9\n >>> df.groupby("a").sum()\n b c\n a\n 1 10 7\n 2 11 17')) def sum(self, numeric_only: bool=False, min_count: int=0, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): from pandas.core._numba.kernels import grouped_sum return self._numba_agg_general(grouped_sum, executor.default_dtype_mapping, engine_kwargs, min_periods=min_count) else: with com.temp_setattr(self, 'observed', True): result = self._agg_general(numeric_only=numeric_only, min_count=min_count, alias='sum', npfunc=np.sum) return result @final def prod(self, numeric_only: bool=False, min_count: int=0) -> NDFrameT: return self._agg_general(numeric_only=numeric_only, min_count=min_count, alias='prod', npfunc=np.prod) @final @doc(_groupby_agg_method_engine_template, fname='min', no=False, mc=-1, e=None, ek=None, example=dedent(' For SeriesGroupBy:\n\n >>> lst = [\'a\', \'a\', \'b\', \'b\']\n >>> ser = pd.Series([1, 2, 3, 4], index=lst)\n >>> ser\n a 1\n a 2\n b 3\n b 4\n dtype: int64\n >>> ser.groupby(level=0).min()\n a 1\n b 3\n dtype: int64\n\n For DataFrameGroupBy:\n\n >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]\n >>> df = pd.DataFrame(data, columns=["a", "b", "c"],\n ... index=["tiger", "leopard", "cheetah", "lion"])\n >>> df\n a b c\n tiger 1 8 2\n leopard 1 2 5\n cheetah 2 5 8\n lion 2 6 9\n >>> df.groupby("a").min()\n b c\n a\n 1 2 2\n 2 5 8')) def min(self, numeric_only: bool=False, min_count: int=-1, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): from pandas.core._numba.kernels import grouped_min_max return self._numba_agg_general(grouped_min_max, executor.identity_dtype_mapping, engine_kwargs, min_periods=min_count, is_max=False) else: return self._agg_general(numeric_only=numeric_only, min_count=min_count, alias='min', npfunc=np.min) @final @doc(_groupby_agg_method_engine_template, fname='max', no=False, mc=-1, e=None, ek=None, example=dedent(' For SeriesGroupBy:\n\n >>> lst = [\'a\', \'a\', \'b\', \'b\']\n >>> ser = pd.Series([1, 2, 3, 4], index=lst)\n >>> ser\n a 1\n a 2\n b 3\n b 4\n dtype: int64\n >>> ser.groupby(level=0).max()\n a 2\n b 4\n dtype: int64\n\n For DataFrameGroupBy:\n\n >>> data = [[1, 8, 2], [1, 2, 5], [2, 5, 8], [2, 6, 9]]\n >>> df = pd.DataFrame(data, columns=["a", "b", "c"],\n ... index=["tiger", "leopard", "cheetah", "lion"])\n >>> df\n a b c\n tiger 1 8 2\n leopard 1 2 5\n cheetah 2 5 8\n lion 2 6 9\n >>> df.groupby("a").max()\n b c\n a\n 1 8 5\n 2 6 9')) def max(self, numeric_only: bool=False, min_count: int=-1, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): from pandas.core._numba.kernels import grouped_min_max return self._numba_agg_general(grouped_min_max, executor.identity_dtype_mapping, engine_kwargs, min_periods=min_count, is_max=True) else: return self._agg_general(numeric_only=numeric_only, min_count=min_count, alias='max', npfunc=np.max) @final def first(self, numeric_only: bool=False, min_count: int=-1, skipna: bool=True) -> NDFrameT: def first_compat(obj: NDFrameT): def first(x: Series): arr = x.array[notna(x.array)] if not len(arr): return x.array.dtype.na_value return arr[0] if isinstance(obj, DataFrame): return obj.apply(first) elif isinstance(obj, Series): return first(obj) else: raise TypeError(type(obj)) return self._agg_general(numeric_only=numeric_only, min_count=min_count, alias='first', npfunc=first_compat, skipna=skipna) @final def last(self, numeric_only: bool=False, min_count: int=-1, skipna: bool=True) -> NDFrameT: def last_compat(obj: NDFrameT): def last(x: Series): arr = x.array[notna(x.array)] if not len(arr): return x.array.dtype.na_value return arr[-1] if isinstance(obj, DataFrame): return obj.apply(last) elif isinstance(obj, Series): return last(obj) else: raise TypeError(type(obj)) return self._agg_general(numeric_only=numeric_only, min_count=min_count, alias='last', npfunc=last_compat, skipna=skipna) @final def ohlc(self) -> DataFrame: if self.obj.ndim == 1: obj = self._selected_obj is_numeric = is_numeric_dtype(obj.dtype) if not is_numeric: raise DataError('No numeric types to aggregate') res_values = self._grouper._cython_operation('aggregate', obj._values, 'ohlc', axis=0, min_count=-1) agg_names = ['open', 'high', 'low', 'close'] result = self.obj._constructor_expanddim(res_values, index=self._grouper.result_index, columns=agg_names) return result result = self._apply_to_column_groupbys(lambda sgb: sgb.ohlc()) return result @doc(DataFrame.describe) def describe(self, percentiles=None, include=None, exclude=None) -> NDFrameT: obj = self._obj_with_exclusions if len(obj) == 0: described = obj.describe(percentiles=percentiles, include=include, exclude=exclude) if obj.ndim == 1: result = described else: result = described.unstack() return result.to_frame().T.iloc[:0] with com.temp_setattr(self, 'as_index', True): result = self._python_apply_general(lambda x: x.describe(percentiles=percentiles, include=include, exclude=exclude), obj, not_indexed_same=True) result = result.unstack() if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result @final def resample(self, rule, *args, include_groups: bool=True, **kwargs) -> Resampler: from pandas.core.resample import get_resampler_for_grouping return get_resampler_for_grouping(self, rule, *args, include_groups=include_groups, **kwargs) @final def rolling(self, window: int | datetime.timedelta | str | BaseOffset | BaseIndexer, min_periods: int | None=None, center: bool=False, win_type: str | None=None, on: str | None=None, closed: IntervalClosedType | None=None, method: str='single') -> RollingGroupby: from pandas.core.window import RollingGroupby return RollingGroupby(self._selected_obj, window=window, min_periods=min_periods, center=center, win_type=win_type, on=on, closed=closed, method=method, _grouper=self._grouper, _as_index=self.as_index) @final @Substitution(name='groupby') @Appender(_common_see_also) def expanding(self, *args, **kwargs) -> ExpandingGroupby: from pandas.core.window import ExpandingGroupby return ExpandingGroupby(self._selected_obj, *args, _grouper=self._grouper, **kwargs) @final @Substitution(name='groupby') @Appender(_common_see_also) def ewm(self, *args, **kwargs) -> ExponentialMovingWindowGroupby: from pandas.core.window import ExponentialMovingWindowGroupby return ExponentialMovingWindowGroupby(self._selected_obj, *args, _grouper=self._grouper, **kwargs) @final def _fill(self, direction: Literal['ffill', 'bfill'], limit: int | None=None): if limit is None: limit = -1 ids = self._grouper.ids ngroups = self._grouper.ngroups col_func = partial(libgroupby.group_fillna_indexer, labels=ids, limit=limit, compute_ffill=direction == 'ffill', ngroups=ngroups) def blk_func(values: ArrayLike) -> ArrayLike: mask = isna(values) if values.ndim == 1: indexer = np.empty(values.shape, dtype=np.intp) col_func(out=indexer, mask=mask) return algorithms.take_nd(values, indexer) else: if isinstance(values, np.ndarray): dtype = values.dtype if self._grouper.has_dropped_na: dtype = ensure_dtype_can_hold_na(values.dtype) out = np.empty(values.shape, dtype=dtype) else: out = type(values)._empty(values.shape, dtype=values.dtype) for (i, value_element) in enumerate(values): indexer = np.empty(values.shape[1], dtype=np.intp) col_func(out=indexer, mask=mask[i]) out[i, :] = algorithms.take_nd(value_element, indexer) return out mgr = self._get_data_to_aggregate() res_mgr = mgr.apply(blk_func) new_obj = self._wrap_agged_manager(res_mgr) new_obj.index = self.obj.index return new_obj @final @Substitution(name='groupby') def ffill(self, limit: int | None=None): return self._fill('ffill', limit=limit) @final @Substitution(name='groupby') def bfill(self, limit: int | None=None): return self._fill('bfill', limit=limit) @final @property @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def nth(self) -> GroupByNthSelector: return GroupByNthSelector(self) def _nth(self, n: PositionalIndexer | tuple, dropna: Literal['any', 'all', None]=None) -> NDFrameT: if not dropna: mask = self._make_mask_from_positional_indexer(n) ids = self._grouper.ids mask = mask & (ids != -1) out = self._mask_selected_obj(mask) return out if not is_integer(n): raise ValueError('dropna option only supported for an integer argument') if dropna not in ['any', 'all']: raise ValueError(f"For a DataFrame or Series groupby.nth, dropna must be either None, 'any' or 'all', (was passed {dropna}).") n = cast(int, n) dropped = self._selected_obj.dropna(how=dropna, axis=0) grouper: np.ndarray | Index | ops.BaseGrouper if len(dropped) == len(self._selected_obj): grouper = self._grouper else: axis = self._grouper.axis grouper = self._grouper.codes_info[axis.isin(dropped.index)] if self._grouper.has_dropped_na: nulls = grouper == -1 values = np.where(nulls, NA, grouper) grouper = Index(values, dtype='Int64') grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort) return grb.nth(n) @final def quantile(self, q: float | AnyArrayLike=0.5, interpolation: str='linear', numeric_only: bool=False): mgr = self._get_data_to_aggregate(numeric_only=numeric_only, name='quantile') obj = self._wrap_agged_manager(mgr) splitter = self._grouper._get_splitter(obj) sdata = splitter._sorted_data (starts, ends) = lib.generate_slices(splitter._slabels, splitter.ngroups) def pre_processor(vals: ArrayLike) -> tuple[np.ndarray, DtypeObj | None]: if is_object_dtype(vals.dtype): raise TypeError("'quantile' cannot be performed against 'object' dtypes!") inference: DtypeObj | None = None if isinstance(vals, BaseMaskedArray) and is_numeric_dtype(vals.dtype): out = vals.to_numpy(dtype=float, na_value=np.nan) inference = vals.dtype elif is_integer_dtype(vals.dtype): if isinstance(vals, ExtensionArray): out = vals.to_numpy(dtype=float, na_value=np.nan) else: out = vals inference = np.dtype(np.int64) elif is_bool_dtype(vals.dtype) and isinstance(vals, ExtensionArray): out = vals.to_numpy(dtype=float, na_value=np.nan) elif is_bool_dtype(vals.dtype): raise TypeError('Cannot use quantile with bool dtype') elif needs_i8_conversion(vals.dtype): inference = vals.dtype return (vals, inference) elif isinstance(vals, ExtensionArray) and is_float_dtype(vals.dtype): inference = np.dtype(np.float64) out = vals.to_numpy(dtype=float, na_value=np.nan) else: out = np.asarray(vals) return (out, inference) def post_processor(vals: np.ndarray, inference: DtypeObj | None, result_mask: np.ndarray | None, orig_vals: ArrayLike) -> ArrayLike: if inference: if isinstance(orig_vals, BaseMaskedArray): assert result_mask is not None if interpolation in {'linear', 'midpoint'} and (not is_float_dtype(orig_vals)): return FloatingArray(vals, result_mask) else: with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=RuntimeWarning) return type(orig_vals)(vals.astype(inference.numpy_dtype), result_mask) elif not (is_integer_dtype(inference) and interpolation in {'linear', 'midpoint'}): if needs_i8_conversion(inference): vals = vals.astype('i8').view(orig_vals._ndarray.dtype) return orig_vals._from_backing_data(vals) assert isinstance(inference, np.dtype) return vals.astype(inference) return vals if is_scalar(q): qs = np.array([q], dtype=np.float64) pass_qs: None | np.ndarray = None else: qs = np.asarray(q, dtype=np.float64) pass_qs = qs ids = self._grouper.ids ngroups = self._grouper.ngroups if self.dropna: ids = ids[ids >= 0] nqs = len(qs) func = partial(libgroupby.group_quantile, labels=ids, qs=qs, interpolation=interpolation, starts=starts, ends=ends) def blk_func(values: ArrayLike) -> ArrayLike: orig_vals = values if isinstance(values, BaseMaskedArray): mask = values._mask result_mask = np.zeros((ngroups, nqs), dtype=np.bool_) else: mask = isna(values) result_mask = None is_datetimelike = needs_i8_conversion(values.dtype) (vals, inference) = pre_processor(values) ncols = 1 if vals.ndim == 2: ncols = vals.shape[0] out = np.empty((ncols, ngroups, nqs), dtype=np.float64) if is_datetimelike: vals = vals.view('i8') if vals.ndim == 1: func(out[0], values=vals, mask=mask, result_mask=result_mask, is_datetimelike=is_datetimelike) else: for i in range(ncols): func(out[i], values=vals[i], mask=mask[i], result_mask=None, is_datetimelike=is_datetimelike) if vals.ndim == 1: out = out.ravel('K') if result_mask is not None: result_mask = result_mask.ravel('K') else: out = out.reshape(ncols, ngroups * nqs) return post_processor(out, inference, result_mask, orig_vals) res_mgr = sdata._mgr.grouped_reduce(blk_func) res = self._wrap_agged_manager(res_mgr) return self._wrap_aggregated_output(res, qs=pass_qs) @final @Substitution(name='groupby') def ngroup(self, ascending: bool=True): obj = self._obj_with_exclusions index = obj.index comp_ids = self._grouper.ids dtype: type if self._grouper.has_dropped_na: comp_ids = np.where(comp_ids == -1, np.nan, comp_ids) dtype = np.float64 else: dtype = np.int64 if any((ping._passed_categorical for ping in self._grouper.groupings)): comp_ids = rank_1d(comp_ids, ties_method='dense') - 1 result = self._obj_1d_constructor(comp_ids, index, dtype=dtype) if not ascending: result = self.ngroups - 1 - result return result @final @Substitution(name='groupby') def cumcount(self, ascending: bool=True): index = self._obj_with_exclusions.index cumcounts = self._cumcount_array(ascending=ascending) return self._obj_1d_constructor(cumcounts, index) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def rank(self, method: str='average', ascending: bool=True, na_option: str='keep', pct: bool=False) -> NDFrameT: if na_option not in {'keep', 'top', 'bottom'}: msg = "na_option must be one of 'keep', 'top', or 'bottom'" raise ValueError(msg) kwargs = {'ties_method': method, 'ascending': ascending, 'na_option': na_option, 'pct': pct} return self._cython_transform('rank', numeric_only=False, **kwargs) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def cumprod(self, numeric_only: bool=False, *args, **kwargs) -> NDFrameT: nv.validate_groupby_func('cumprod', args, kwargs, ['skipna']) return self._cython_transform('cumprod', numeric_only, **kwargs) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def cumsum(self, numeric_only: bool=False, *args, **kwargs) -> NDFrameT: nv.validate_groupby_func('cumsum', args, kwargs, ['skipna']) return self._cython_transform('cumsum', numeric_only, **kwargs) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def cummin(self, numeric_only: bool=False, **kwargs) -> NDFrameT: skipna = kwargs.get('skipna', True) return self._cython_transform('cummin', numeric_only=numeric_only, skipna=skipna) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def cummax(self, numeric_only: bool=False, **kwargs) -> NDFrameT: skipna = kwargs.get('skipna', True) return self._cython_transform('cummax', numeric_only=numeric_only, skipna=skipna) @final @Substitution(name='groupby') def shift(self, periods: int | Sequence[int]=1, freq=None, fill_value=lib.no_default, suffix: str | None=None): if is_list_like(periods): periods = cast(Sequence, periods) if len(periods) == 0: raise ValueError('If `periods` is an iterable, it cannot be empty.') from pandas.core.reshape.concat import concat add_suffix = True else: if not is_integer(periods): raise TypeError(f'Periods must be integer, but {periods} is {type(periods)}.') if suffix: raise ValueError('Cannot specify `suffix` if `periods` is an int.') periods = [cast(int, periods)] add_suffix = False shifted_dataframes = [] for period in periods: if not is_integer(period): raise TypeError(f'Periods must be integer, but {period} is {type(period)}.') period = cast(int, period) if freq is not None: f = lambda x: x.shift(period, freq, 0, fill_value) shifted = self._python_apply_general(f, self._selected_obj, is_transform=True) else: if fill_value is lib.no_default: fill_value = None ids = self._grouper.ids ngroups = self._grouper.ngroups res_indexer = np.zeros(len(ids), dtype=np.int64) libgroupby.group_shift_indexer(res_indexer, ids, ngroups, period) obj = self._obj_with_exclusions shifted = obj._reindex_with_indexers({0: (obj.index, res_indexer)}, fill_value=fill_value, allow_dups=True) if add_suffix: if isinstance(shifted, Series): shifted = cast(NDFrameT, shifted.to_frame()) shifted = shifted.add_suffix(f'{suffix}_{period}' if suffix else f'_{period}') shifted_dataframes.append(cast(Union[Series, DataFrame], shifted)) return shifted_dataframes[0] if len(shifted_dataframes) == 1 else concat(shifted_dataframes, axis=1) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def diff(self, periods: int=1) -> NDFrameT: obj = self._obj_with_exclusions shifted = self.shift(periods=periods) dtypes_to_f32 = ['int8', 'int16'] if obj.ndim == 1: if obj.dtype in dtypes_to_f32: shifted = shifted.astype('float32') else: to_coerce = [c for (c, dtype) in obj.dtypes.items() if dtype in dtypes_to_f32] if len(to_coerce): shifted = shifted.astype({c: 'float32' for c in to_coerce}) return obj - shifted @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def pct_change(self, periods: int=1, fill_method: None=None, freq=None): if fill_method is not None: raise ValueError(f'fill_method must be None; got fill_method={fill_method!r}.') if freq is not None: f = lambda x: x.pct_change(periods=periods, freq=freq, axis=0) return self._python_apply_general(f, self._selected_obj, is_transform=True) if fill_method is None: op = 'ffill' else: op = fill_method filled = getattr(self, op)(limit=0) fill_grp = filled.groupby(self._grouper.codes, group_keys=self.group_keys) shifted = fill_grp.shift(periods=periods, freq=freq) return filled / shifted - 1 @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def head(self, n: int=5) -> NDFrameT: mask = self._make_mask_from_positional_indexer(slice(None, n)) return self._mask_selected_obj(mask) @final @Substitution(name='groupby') @Substitution(see_also=_common_see_also) def tail(self, n: int=5) -> NDFrameT: if n: mask = self._make_mask_from_positional_indexer(slice(-n, None)) else: mask = self._make_mask_from_positional_indexer([]) return self._mask_selected_obj(mask) @final def _mask_selected_obj(self, mask: npt.NDArray[np.bool_]) -> NDFrameT: ids = self._grouper.ids mask = mask & (ids != -1) return self._selected_obj[mask] @final def sample(self, n: int | None=None, frac: float | None=None, replace: bool=False, weights: Sequence | Series | None=None, random_state: RandomState | None=None): if self._selected_obj.empty: return self._selected_obj size = sample.process_sampling_size(n, frac, replace) if weights is not None: weights_arr = sample.preprocess_weights(self._selected_obj, weights, axis=0) random_state = com.random_state(random_state) group_iterator = self._grouper.get_iterator(self._selected_obj) sampled_indices = [] for (labels, obj) in group_iterator: grp_indices = self.indices[labels] group_size = len(grp_indices) if size is not None: sample_size = size else: assert frac is not None sample_size = round(frac * group_size) grp_sample = sample.sample(group_size, size=sample_size, replace=replace, weights=None if weights is None else weights_arr[grp_indices], random_state=random_state) sampled_indices.append(grp_indices[grp_sample]) sampled_indices = np.concatenate(sampled_indices) return self._selected_obj.take(sampled_indices, axis=0) def _idxmax_idxmin(self, how: Literal['idxmax', 'idxmin'], ignore_unobserved: bool=False, skipna: bool=True, numeric_only: bool=False) -> NDFrameT: if not self.observed and any((ping._passed_categorical for ping in self._grouper.groupings)): expected_len = len(self._grouper.result_index) group_sizes = self._grouper.size() result_len = group_sizes[group_sizes > 0].shape[0] assert result_len <= expected_len has_unobserved = result_len < expected_len raise_err: bool | np.bool_ = not ignore_unobserved and has_unobserved data = self._obj_with_exclusions if raise_err and isinstance(data, DataFrame): if numeric_only: data = data._get_numeric_data() raise_err = len(data.columns) > 0 if raise_err: raise ValueError(f"Can't get {how} of an empty group due to unobserved categories. Specify observed=True in groupby instead.") elif not skipna and self._obj_with_exclusions.isna().any(axis=None): raise ValueError(f'{type(self).__name__}.{how} with skipna=False encountered an NA value.') result = self._agg_general(numeric_only=numeric_only, min_count=1, alias=how, skipna=skipna) return result def _wrap_idxmax_idxmin(self, res: NDFrameT) -> NDFrameT: index = self.obj.index if res.size == 0: result = res.astype(index.dtype) else: if isinstance(index, MultiIndex): index = index.to_flat_index() values = res._values assert isinstance(values, np.ndarray) na_value = na_value_for_dtype(index.dtype, compat=False) if isinstance(res, Series): result = res._constructor(index.array.take(values, allow_fill=True, fill_value=na_value), index=res.index, name=res.name) else: data = {} for (k, column_values) in enumerate(values.T): data[k] = index.array.take(column_values, allow_fill=True, fill_value=na_value) result = self.obj._constructor(data, index=res.index) result.columns = res.columns return result @doc(GroupBy) def get_groupby(obj: NDFrame, by: _KeysArgType | None=None, grouper: ops.BaseGrouper | None=None, group_keys: bool=True) -> GroupBy: klass: type[GroupBy] if isinstance(obj, Series): from pandas.core.groupby.generic import SeriesGroupBy klass = SeriesGroupBy elif isinstance(obj, DataFrame): from pandas.core.groupby.generic import DataFrameGroupBy klass = DataFrameGroupBy else: raise TypeError(f'invalid type: {obj}') return klass(obj=obj, keys=by, grouper=grouper, group_keys=group_keys) def _insert_quantile_level(idx: Index, qs: npt.NDArray[np.float64]) -> MultiIndex: nqs = len(qs) (lev_codes, lev) = Index(qs).factorize() lev_codes = coerce_indexer_dtype(lev_codes, lev) if idx._is_multi: idx = cast(MultiIndex, idx) levels = list(idx.levels) + [lev] codes = [np.repeat(x, nqs) for x in idx.codes] + [np.tile(lev_codes, len(idx))] mi = MultiIndex(levels=levels, codes=codes, names=idx.names + [None]) else: nidx = len(idx) idx_codes = coerce_indexer_dtype(np.arange(nidx), idx) levels = [idx, lev] codes = [np.repeat(idx_codes, nqs), np.tile(lev_codes, nidx)] mi = MultiIndex(levels=levels, codes=codes, names=[idx.name, None]) return mi _apply_groupings_depr = '{}.{} operated on the grouping columns. This behavior is deprecated, and in a future version of pandas the grouping columns will be excluded from the operation. Either pass `include_groups=False` to exclude the groupings or explicitly select the grouping columns after groupby to silence this warning.' # File: pandas-main/pandas/core/groupby/grouper.py """""" from __future__ import annotations from typing import TYPE_CHECKING, final import numpy as np from pandas._libs.tslibs import OutOfBoundsDatetime from pandas.errors import InvalidIndexError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import is_list_like, is_scalar from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core import algorithms from pandas.core.arrays import Categorical, ExtensionArray import pandas.core.common as com from pandas.core.frame import DataFrame from pandas.core.groupby import ops from pandas.core.groupby.categorical import recode_for_groupby from pandas.core.indexes.api import Index, MultiIndex, default_index from pandas.core.series import Series from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from collections.abc import Hashable, Iterator from pandas._typing import ArrayLike, NDFrameT, npt from pandas.core.generic import NDFrame class Grouper: sort: bool dropna: bool _grouper: Index | None _attributes: tuple[str, ...] = ('key', 'level', 'freq', 'sort', 'dropna') def __new__(cls, *args, **kwargs): if kwargs.get('freq') is not None: from pandas.core.resample import TimeGrouper cls = TimeGrouper return super().__new__(cls) def __init__(self, key=None, level=None, freq=None, sort: bool=False, dropna: bool=True) -> None: self.key = key self.level = level self.freq = freq self.sort = sort self.dropna = dropna self._indexer_deprecated: npt.NDArray[np.intp] | None = None self.binner = None self._grouper = None self._indexer: npt.NDArray[np.intp] | None = None def _get_grouper(self, obj: NDFrameT, validate: bool=True) -> tuple[ops.BaseGrouper, NDFrameT]: (obj, _, _) = self._set_grouper(obj) (grouper, _, obj) = get_grouper(obj, [self.key], level=self.level, sort=self.sort, validate=validate, dropna=self.dropna) return (grouper, obj) def _set_grouper(self, obj: NDFrameT, sort: bool=False, *, gpr_index: Index | None=None) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]: assert obj is not None if self.key is not None and self.level is not None: raise ValueError('The Grouper cannot specify both a key and a level!') if self._grouper is None: self._grouper = gpr_index self._indexer = self._indexer_deprecated if self.key is not None: key = self.key if getattr(gpr_index, 'name', None) == key and isinstance(obj, Series): assert self._grouper is not None if self._indexer is not None: reverse_indexer = self._indexer.argsort() unsorted_ax = self._grouper.take(reverse_indexer) ax = unsorted_ax.take(obj.index) else: ax = self._grouper.take(obj.index) else: if key not in obj._info_axis: raise KeyError(f'The grouper name {key} is not found') ax = Index(obj[key], name=key) else: ax = obj.index if self.level is not None: level = self.level if isinstance(ax, MultiIndex): level = ax._get_level_number(level) ax = Index(ax._get_level_values(level), name=ax.names[level]) elif level not in (0, ax.name): raise ValueError(f'The level {level} is not valid') indexer: npt.NDArray[np.intp] | None = None if (self.sort or sort) and (not ax.is_monotonic_increasing): indexer = self._indexer_deprecated = ax.array.argsort(kind='mergesort', na_position='first') ax = ax.take(indexer) obj = obj.take(indexer, axis=0) return (obj, ax, indexer) @final def __repr__(self) -> str: attrs_list = (f'{attr_name}={getattr(self, attr_name)!r}' for attr_name in self._attributes if getattr(self, attr_name) is not None) attrs = ', '.join(attrs_list) cls_name = type(self).__name__ return f'{cls_name}({attrs})' @final class Grouping: _codes: npt.NDArray[np.signedinteger] | None = None _orig_cats: Index | None _index: Index def __init__(self, index: Index, grouper=None, obj: NDFrame | None=None, level=None, sort: bool=True, observed: bool=False, in_axis: bool=False, dropna: bool=True, uniques: ArrayLike | None=None) -> None: self.level = level self._orig_grouper = grouper grouping_vector = _convert_grouper(index, grouper) self._orig_cats = None self._index = index self._sort = sort self.obj = obj self._observed = observed self.in_axis = in_axis self._dropna = dropna self._uniques = uniques ilevel = self._ilevel if ilevel is not None: if isinstance(index, MultiIndex): index_level = index.get_level_values(ilevel) else: index_level = index if grouping_vector is None: grouping_vector = index_level else: mapper = grouping_vector grouping_vector = index_level.map(mapper) elif isinstance(grouping_vector, Grouper): assert self.obj is not None (newgrouper, newobj) = grouping_vector._get_grouper(self.obj, validate=False) self.obj = newobj if isinstance(newgrouper, ops.BinGrouper): grouping_vector = newgrouper else: ng = newgrouper.groupings[0].grouping_vector grouping_vector = Index(ng, name=newgrouper.result_index.name) elif not isinstance(grouping_vector, (Series, Index, ExtensionArray, np.ndarray)): if getattr(grouping_vector, 'ndim', 1) != 1: t = str(type(grouping_vector)) raise ValueError(f"Grouper for '{t}' not 1-dimensional") grouping_vector = index.map(grouping_vector) if not (hasattr(grouping_vector, '__len__') and len(grouping_vector) == len(index)): grper = pprint_thing(grouping_vector) errmsg = f'Grouper result violates len(labels) == len(data)\nresult: {grper}' raise AssertionError(errmsg) if isinstance(grouping_vector, np.ndarray): if grouping_vector.dtype.kind in 'mM': grouping_vector = Series(grouping_vector).to_numpy() elif isinstance(getattr(grouping_vector, 'dtype', None), CategoricalDtype): self._orig_cats = grouping_vector.categories grouping_vector = recode_for_groupby(grouping_vector, sort, observed) self.grouping_vector = grouping_vector def __repr__(self) -> str: return f'Grouping({self.name})' def __iter__(self) -> Iterator: return iter(self.indices) @cache_readonly def _passed_categorical(self) -> bool: dtype = getattr(self.grouping_vector, 'dtype', None) return isinstance(dtype, CategoricalDtype) @cache_readonly def name(self) -> Hashable: ilevel = self._ilevel if ilevel is not None: return self._index.names[ilevel] if isinstance(self._orig_grouper, (Index, Series)): return self._orig_grouper.name elif isinstance(self.grouping_vector, ops.BaseGrouper): return self.grouping_vector.result_index.name elif isinstance(self.grouping_vector, Index): return self.grouping_vector.name return None @cache_readonly def _ilevel(self) -> int | None: level = self.level if level is None: return None if not isinstance(level, int): index = self._index if level not in index.names: raise AssertionError(f'Level {level} not in index') return index.names.index(level) return level @property def ngroups(self) -> int: return len(self.uniques) @cache_readonly def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: if isinstance(self.grouping_vector, ops.BaseGrouper): return self.grouping_vector.indices values = Categorical(self.grouping_vector) return values._reverse_indexer() @property def codes(self) -> npt.NDArray[np.signedinteger]: return self._codes_and_uniques[0] @property def uniques(self) -> ArrayLike: return self._codes_and_uniques[1] @cache_readonly def _codes_and_uniques(self) -> tuple[npt.NDArray[np.signedinteger], ArrayLike]: uniques: ArrayLike if self._passed_categorical: cat = self.grouping_vector categories = cat.categories if self._observed: ucodes = algorithms.unique1d(cat.codes) ucodes = ucodes[ucodes != -1] if self._sort: ucodes = np.sort(ucodes) else: ucodes = np.arange(len(categories)) has_dropped_na = False if not self._dropna: na_mask = cat.isna() if np.any(na_mask): has_dropped_na = True if self._sort: na_code = len(categories) else: na_idx = na_mask.argmax() na_code = algorithms.nunique_ints(cat.codes[:na_idx]) ucodes = np.insert(ucodes, na_code, -1) uniques = Categorical.from_codes(codes=ucodes, categories=categories, ordered=cat.ordered, validate=False) codes = cat.codes if has_dropped_na: if not self._sort: codes = np.where(codes >= na_code, codes + 1, codes) codes = np.where(na_mask, na_code, codes) return (codes, uniques) elif isinstance(self.grouping_vector, ops.BaseGrouper): codes = self.grouping_vector.codes_info uniques = self.grouping_vector.result_index._values elif self._uniques is not None: cat = Categorical(self.grouping_vector, categories=self._uniques) codes = cat.codes uniques = self._uniques else: (codes, uniques) = algorithms.factorize(self.grouping_vector, sort=self._sort, use_na_sentinel=self._dropna) return (codes, uniques) @cache_readonly def groups(self) -> dict[Hashable, Index]: (codes, uniques) = self._codes_and_uniques uniques = Index._with_infer(uniques, name=self.name) cats = Categorical.from_codes(codes, uniques, validate=False) return self._index.groupby(cats) @property def observed_grouping(self) -> Grouping: if self._observed: return self return self._observed_grouping @cache_readonly def _observed_grouping(self) -> Grouping: grouping = Grouping(self._index, self._orig_grouper, obj=self.obj, level=self.level, sort=self._sort, observed=True, in_axis=self.in_axis, dropna=self._dropna, uniques=self._uniques) return grouping def get_grouper(obj: NDFrameT, key=None, level=None, sort: bool=True, observed: bool=False, validate: bool=True, dropna: bool=True) -> tuple[ops.BaseGrouper, frozenset[Hashable], NDFrameT]: group_axis = obj.index if level is not None: if isinstance(group_axis, MultiIndex): if is_list_like(level) and len(level) == 1: level = level[0] if key is None and is_scalar(level): key = group_axis.get_level_values(level) level = None else: if is_list_like(level): nlevels = len(level) if nlevels == 1: level = level[0] elif nlevels == 0: raise ValueError('No group keys passed!') else: raise ValueError('multiple levels only valid with MultiIndex') if isinstance(level, str): if obj.index.name != level: raise ValueError(f'level name {level} is not the name of the index') elif level > 0 or level < -1: raise ValueError('level > 0 or level < -1 only valid with MultiIndex') level = None key = group_axis if isinstance(key, Grouper): (grouper, obj) = key._get_grouper(obj, validate=False) if key.key is None: return (grouper, frozenset(), obj) else: return (grouper, frozenset({key.key}), obj) elif isinstance(key, ops.BaseGrouper): return (key, frozenset(), obj) if not isinstance(key, list): keys = [key] match_axis_length = False else: keys = key match_axis_length = len(keys) == len(group_axis) any_callable = any((callable(g) or isinstance(g, dict) for g in keys)) any_groupers = any((isinstance(g, (Grouper, Grouping)) for g in keys)) any_arraylike = any((isinstance(g, (list, tuple, Series, Index, np.ndarray)) for g in keys)) if not any_callable and (not any_arraylike) and (not any_groupers) and match_axis_length and (level is None): if isinstance(obj, DataFrame): all_in_columns_index = all((g in obj.columns or g in obj.index.names for g in keys)) else: assert isinstance(obj, Series) all_in_columns_index = all((g in obj.index.names for g in keys)) if not all_in_columns_index: keys = [com.asarray_tuplesafe(keys)] if isinstance(level, (tuple, list)): if key is None: keys = [None] * len(level) levels = level else: levels = [level] * len(keys) groupings: list[Grouping] = [] exclusions: set[Hashable] = set() def is_in_axis(key) -> bool: if not _is_label_like(key): if obj.ndim == 1: return False items = obj.axes[-1] try: items.get_loc(key) except (KeyError, TypeError, InvalidIndexError): return False return True def is_in_obj(gpr) -> bool: if not hasattr(gpr, 'name'): return False try: obj_gpr_column = obj[gpr.name] except (KeyError, IndexError, InvalidIndexError, OutOfBoundsDatetime): return False if isinstance(gpr, Series) and isinstance(obj_gpr_column, Series): return gpr._mgr.references_same_values(obj_gpr_column._mgr, 0) return False for (gpr, level) in zip(keys, levels): if is_in_obj(gpr): in_axis = True exclusions.add(gpr.name) elif is_in_axis(gpr): if obj.ndim != 1 and gpr in obj: if validate: obj._check_label_or_level_ambiguity(gpr, axis=0) (in_axis, name, gpr) = (True, gpr, obj[gpr]) if gpr.ndim != 1: raise ValueError(f"Grouper for '{name}' not 1-dimensional") exclusions.add(name) elif obj._is_level_reference(gpr, axis=0): (in_axis, level, gpr) = (False, gpr, None) else: raise KeyError(gpr) elif isinstance(gpr, Grouper) and gpr.key is not None: exclusions.add(gpr.key) in_axis = True else: in_axis = False ping = Grouping(group_axis, gpr, obj=obj, level=level, sort=sort, observed=observed, in_axis=in_axis, dropna=dropna) if not isinstance(gpr, Grouping) else gpr groupings.append(ping) if len(groupings) == 0 and len(obj): raise ValueError('No group keys passed!') if len(groupings) == 0: groupings.append(Grouping(default_index(0), np.array([], dtype=np.intp))) grouper = ops.BaseGrouper(group_axis, groupings, sort=sort, dropna=dropna) return (grouper, frozenset(exclusions), obj) def _is_label_like(val) -> bool: return isinstance(val, (str, tuple)) or (val is not None and is_scalar(val)) def _convert_grouper(axis: Index, grouper): if isinstance(grouper, dict): return grouper.get elif isinstance(grouper, Series): if grouper.index.equals(axis): return grouper._values else: return grouper.reindex(axis)._values elif isinstance(grouper, MultiIndex): return grouper._values elif isinstance(grouper, (list, tuple, Index, Categorical, np.ndarray)): if len(grouper) != len(axis): raise ValueError('Grouper and axis must be same length') if isinstance(grouper, (list, tuple)): grouper = com.asarray_tuplesafe(grouper) return grouper else: return grouper # File: pandas-main/pandas/core/groupby/indexing.py from __future__ import annotations from collections.abc import Iterable from typing import TYPE_CHECKING, Literal, cast import numpy as np from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import is_integer, is_list_like if TYPE_CHECKING: from pandas._typing import PositionalIndexer from pandas import DataFrame, Series from pandas.core.groupby import groupby class GroupByIndexingMixin: @cache_readonly def _positional_selector(self) -> GroupByPositionalSelector: if TYPE_CHECKING: groupby_self = cast(groupby.GroupBy, self) else: groupby_self = self return GroupByPositionalSelector(groupby_self) def _make_mask_from_positional_indexer(self, arg: PositionalIndexer | tuple) -> np.ndarray: if is_list_like(arg): if all((is_integer(i) for i in cast(Iterable, arg))): mask = self._make_mask_from_list(cast(Iterable[int], arg)) else: mask = self._make_mask_from_tuple(cast(tuple, arg)) elif isinstance(arg, slice): mask = self._make_mask_from_slice(arg) elif is_integer(arg): mask = self._make_mask_from_int(cast(int, arg)) else: raise TypeError(f'Invalid index {type(arg)}. Must be integer, list-like, slice or a tuple of integers and slices') if isinstance(mask, bool): if mask: mask = self._ascending_count >= 0 else: mask = self._ascending_count < 0 return cast(np.ndarray, mask) def _make_mask_from_int(self, arg: int) -> np.ndarray: if arg >= 0: return self._ascending_count == arg else: return self._descending_count == -arg - 1 def _make_mask_from_list(self, args: Iterable[int]) -> bool | np.ndarray: positive = [arg for arg in args if arg >= 0] negative = [-arg - 1 for arg in args if arg < 0] mask: bool | np.ndarray = False if positive: mask |= np.isin(self._ascending_count, positive) if negative: mask |= np.isin(self._descending_count, negative) return mask def _make_mask_from_tuple(self, args: tuple) -> bool | np.ndarray: mask: bool | np.ndarray = False for arg in args: if is_integer(arg): mask |= self._make_mask_from_int(cast(int, arg)) elif isinstance(arg, slice): mask |= self._make_mask_from_slice(arg) else: raise ValueError(f'Invalid argument {type(arg)}. Should be int or slice.') return mask def _make_mask_from_slice(self, arg: slice) -> bool | np.ndarray: start = arg.start stop = arg.stop step = arg.step if step is not None and step < 0: raise ValueError(f'Invalid step {step}. Must be non-negative') mask: bool | np.ndarray = True if step is None: step = 1 if start is None: if step > 1: mask &= self._ascending_count % step == 0 elif start >= 0: mask &= self._ascending_count >= start if step > 1: mask &= (self._ascending_count - start) % step == 0 else: mask &= self._descending_count < -start offset_array = self._descending_count + start + 1 limit_array = self._ascending_count + self._descending_count + (start + 1) < 0 offset_array = np.where(limit_array, self._ascending_count, offset_array) mask &= offset_array % step == 0 if stop is not None: if stop >= 0: mask &= self._ascending_count < stop else: mask &= self._descending_count >= -stop return mask @cache_readonly def _ascending_count(self) -> np.ndarray: if TYPE_CHECKING: groupby_self = cast(groupby.GroupBy, self) else: groupby_self = self return groupby_self._cumcount_array() @cache_readonly def _descending_count(self) -> np.ndarray: if TYPE_CHECKING: groupby_self = cast(groupby.GroupBy, self) else: groupby_self = self return groupby_self._cumcount_array(ascending=False) @doc(GroupByIndexingMixin._positional_selector) class GroupByPositionalSelector: def __init__(self, groupby_object: groupby.GroupBy) -> None: self.groupby_object = groupby_object def __getitem__(self, arg: PositionalIndexer | tuple) -> DataFrame | Series: mask = self.groupby_object._make_mask_from_positional_indexer(arg) return self.groupby_object._mask_selected_obj(mask) class GroupByNthSelector: def __init__(self, groupby_object: groupby.GroupBy) -> None: self.groupby_object = groupby_object def __call__(self, n: PositionalIndexer | tuple, dropna: Literal['any', 'all', None]=None) -> DataFrame | Series: return self.groupby_object._nth(n, dropna) def __getitem__(self, n: PositionalIndexer | tuple) -> DataFrame | Series: return self.groupby_object._nth(n) # File: pandas-main/pandas/core/groupby/numba_.py """""" from __future__ import annotations import functools import inspect from typing import TYPE_CHECKING, Any import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import NumbaUtilError, jit_user_function if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import Scalar def validate_udf(func: Callable) -> None: if not callable(func): raise NotImplementedError('Numba engine can only be used with a single function.') udf_signature = list(inspect.signature(func).parameters.keys()) expected_args = ['values', 'index'] min_number_args = len(expected_args) if len(udf_signature) < min_number_args or udf_signature[:min_number_args] != expected_args: raise NumbaUtilError(f'The first {min_number_args} arguments to {func.__name__} must be {expected_args}') @functools.cache def generate_numba_agg_func(func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: numba_func = jit_user_function(func) if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def group_agg(values: np.ndarray, index: np.ndarray, begin: np.ndarray, end: np.ndarray, num_columns: int, *args: Any) -> np.ndarray: assert len(begin) == len(end) num_groups = len(begin) result = np.empty((num_groups, num_columns)) for i in numba.prange(num_groups): group_index = index[begin[i]:end[i]] for j in numba.prange(num_columns): group = values[begin[i]:end[i], j] result[i, j] = numba_func(group, group_index, *args) return result return group_agg @functools.cache def generate_numba_transform_func(func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: numba_func = jit_user_function(func) if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def group_transform(values: np.ndarray, index: np.ndarray, begin: np.ndarray, end: np.ndarray, num_columns: int, *args: Any) -> np.ndarray: assert len(begin) == len(end) num_groups = len(begin) result = np.empty((len(values), num_columns)) for i in numba.prange(num_groups): group_index = index[begin[i]:end[i]] for j in numba.prange(num_columns): group = values[begin[i]:end[i], j] result[begin[i]:end[i], j] = numba_func(group, group_index, *args) return result return group_transform # File: pandas-main/pandas/core/groupby/ops.py """""" from __future__ import annotations import collections import functools from typing import TYPE_CHECKING, Generic, final import numpy as np from pandas._libs import NaT, lib import pandas._libs.groupby as libgroupby from pandas._typing import ArrayLike, AxisInt, NDFrameT, Shape, npt from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import maybe_cast_pointwise_result, maybe_downcast_to_dtype from pandas.core.dtypes.common import ensure_float64, ensure_int64, ensure_platform_int, ensure_uint64, is_1d_only_ea_dtype from pandas.core.dtypes.missing import isna, maybe_fill from pandas.core.arrays import Categorical from pandas.core.frame import DataFrame from pandas.core.groupby import grouper from pandas.core.indexes.api import CategoricalIndex, Index, MultiIndex, ensure_index from pandas.core.series import Series from pandas.core.sorting import compress_group_index, decons_obs_group_ids, get_group_index, get_group_index_sorter, get_indexer_dict if TYPE_CHECKING: from collections.abc import Callable, Generator, Hashable, Iterator from pandas.core.generic import NDFrame def check_result_array(obj, dtype) -> None: if isinstance(obj, np.ndarray): if dtype != object: raise ValueError('Must produce aggregated value') def extract_result(res): if hasattr(res, '_values'): res = res._values if res.ndim == 1 and len(res) == 1: res = res[0] return res class WrappedCythonOp: cast_blocklist = frozenset(['any', 'all', 'rank', 'count', 'size', 'idxmin', 'idxmax']) def __init__(self, kind: str, how: str, has_dropped_na: bool) -> None: self.kind = kind self.how = how self.has_dropped_na = has_dropped_na _CYTHON_FUNCTIONS: dict[str, dict] = {'aggregate': {'any': functools.partial(libgroupby.group_any_all, val_test='any'), 'all': functools.partial(libgroupby.group_any_all, val_test='all'), 'sum': 'group_sum', 'prod': 'group_prod', 'idxmin': functools.partial(libgroupby.group_idxmin_idxmax, name='idxmin'), 'idxmax': functools.partial(libgroupby.group_idxmin_idxmax, name='idxmax'), 'min': 'group_min', 'max': 'group_max', 'mean': 'group_mean', 'median': 'group_median_float64', 'var': 'group_var', 'std': functools.partial(libgroupby.group_var, name='std'), 'sem': functools.partial(libgroupby.group_var, name='sem'), 'skew': 'group_skew', 'first': 'group_nth', 'last': 'group_last', 'ohlc': 'group_ohlc'}, 'transform': {'cumprod': 'group_cumprod', 'cumsum': 'group_cumsum', 'cummin': 'group_cummin', 'cummax': 'group_cummax', 'rank': 'group_rank'}} _cython_arity = {'ohlc': 4} @classmethod def get_kind_from_how(cls, how: str) -> str: if how in cls._CYTHON_FUNCTIONS['aggregate']: return 'aggregate' return 'transform' @classmethod @functools.cache def _get_cython_function(cls, kind: str, how: str, dtype: np.dtype, is_numeric: bool): dtype_str = dtype.name ftype = cls._CYTHON_FUNCTIONS[kind][how] if callable(ftype): f = ftype else: f = getattr(libgroupby, ftype) if is_numeric: return f elif dtype == np.dtype(object): if how in ['median', 'cumprod']: raise NotImplementedError(f'function is not implemented for this dtype: [how->{how},dtype->{dtype_str}]') elif how in ['std', 'sem', 'idxmin', 'idxmax']: return f elif how == 'skew': pass elif 'object' not in f.__signatures__: raise NotImplementedError(f'function is not implemented for this dtype: [how->{how},dtype->{dtype_str}]') return f else: raise NotImplementedError('This should not be reached. Please report a bug at github.com/pandas-dev/pandas/', dtype) def _get_cython_vals(self, values: np.ndarray) -> np.ndarray: how = self.how if how in ['median', 'std', 'sem', 'skew']: values = ensure_float64(values) elif values.dtype.kind in 'iu': if how in ['var', 'mean'] or (self.kind == 'transform' and self.has_dropped_na): values = ensure_float64(values) elif how in ['sum', 'ohlc', 'prod', 'cumsum', 'cumprod']: if values.dtype.kind == 'i': values = ensure_int64(values) else: values = ensure_uint64(values) return values def _get_output_shape(self, ngroups: int, values: np.ndarray) -> Shape: how = self.how kind = self.kind arity = self._cython_arity.get(how, 1) out_shape: Shape if how == 'ohlc': out_shape = (ngroups, arity) elif arity > 1: raise NotImplementedError("arity of more than 1 is not supported for the 'how' argument") elif kind == 'transform': out_shape = values.shape else: out_shape = (ngroups,) + values.shape[1:] return out_shape def _get_out_dtype(self, dtype: np.dtype) -> np.dtype: how = self.how if how == 'rank': out_dtype = 'float64' elif how in ['idxmin', 'idxmax']: out_dtype = 'intp' elif dtype.kind in 'iufcb': out_dtype = f'{dtype.kind}{dtype.itemsize}' else: out_dtype = 'object' return np.dtype(out_dtype) def _get_result_dtype(self, dtype: np.dtype) -> np.dtype: how = self.how if how in ['sum', 'cumsum', 'sum', 'prod', 'cumprod']: if dtype == np.dtype(bool): return np.dtype(np.int64) elif how in ['mean', 'median', 'var', 'std', 'sem']: if dtype.kind in 'fc': return dtype elif dtype.kind in 'iub': return np.dtype(np.float64) return dtype @final def _cython_op_ndim_compat(self, values: np.ndarray, *, min_count: int, ngroups: int, comp_ids: np.ndarray, mask: npt.NDArray[np.bool_] | None=None, result_mask: npt.NDArray[np.bool_] | None=None, **kwargs) -> np.ndarray: if values.ndim == 1: values2d = values[None, :] if mask is not None: mask = mask[None, :] if result_mask is not None: result_mask = result_mask[None, :] res = self._call_cython_op(values2d, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=mask, result_mask=result_mask, **kwargs) if res.shape[0] == 1: return res[0] return res.T return self._call_cython_op(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=mask, result_mask=result_mask, **kwargs) @final def _call_cython_op(self, values: np.ndarray, *, min_count: int, ngroups: int, comp_ids: np.ndarray, mask: npt.NDArray[np.bool_] | None, result_mask: npt.NDArray[np.bool_] | None, **kwargs) -> np.ndarray: orig_values = values dtype = values.dtype is_numeric = dtype.kind in 'iufcb' is_datetimelike = dtype.kind in 'mM' if is_datetimelike: values = values.view('int64') is_numeric = True elif dtype.kind == 'b': values = values.view('uint8') if values.dtype == 'float16': values = values.astype(np.float32) if self.how in ['any', 'all']: if mask is None: mask = isna(values) if dtype == object: if kwargs['skipna']: if mask.any(): values = values.copy() values[mask] = True values = values.astype(bool, copy=False).view(np.int8) is_numeric = True values = values.T if mask is not None: mask = mask.T if result_mask is not None: result_mask = result_mask.T out_shape = self._get_output_shape(ngroups, values) func = self._get_cython_function(self.kind, self.how, values.dtype, is_numeric) values = self._get_cython_vals(values) out_dtype = self._get_out_dtype(values.dtype) result = maybe_fill(np.empty(out_shape, dtype=out_dtype)) if self.kind == 'aggregate': counts = np.zeros(ngroups, dtype=np.int64) if self.how in ['idxmin', 'idxmax', 'min', 'max', 'mean', 'last', 'first', 'sum', 'median']: func(out=result, counts=counts, values=values, labels=comp_ids, min_count=min_count, mask=mask, result_mask=result_mask, is_datetimelike=is_datetimelike, **kwargs) elif self.how in ['sem', 'std', 'var', 'ohlc', 'prod']: if self.how in ['std', 'sem']: kwargs['is_datetimelike'] = is_datetimelike func(result, counts, values, comp_ids, min_count=min_count, mask=mask, result_mask=result_mask, **kwargs) elif self.how in ['any', 'all']: func(out=result, values=values, labels=comp_ids, mask=mask, result_mask=result_mask, **kwargs) result = result.astype(bool, copy=False) elif self.how in ['skew']: func(out=result, counts=counts, values=values, labels=comp_ids, mask=mask, result_mask=result_mask, **kwargs) if dtype == object: result = result.astype(object) else: raise NotImplementedError(f'{self.how} is not implemented') else: if self.how != 'rank': kwargs['result_mask'] = result_mask func(out=result, values=values, labels=comp_ids, ngroups=ngroups, is_datetimelike=is_datetimelike, mask=mask, **kwargs) if self.kind == 'aggregate' and self.how not in ['idxmin', 'idxmax']: if result.dtype.kind in 'iu' and (not is_datetimelike): cutoff = max(0 if self.how in ['sum', 'prod'] else 1, min_count) empty_groups = counts < cutoff if empty_groups.any(): if result_mask is not None: assert result_mask[empty_groups].all() else: result = result.astype('float64') result[empty_groups] = np.nan result = result.T if self.how not in self.cast_blocklist: res_dtype = self._get_result_dtype(orig_values.dtype) op_result = maybe_downcast_to_dtype(result, res_dtype) else: op_result = result return op_result @final def _validate_axis(self, axis: AxisInt, values: ArrayLike) -> None: if values.ndim > 2: raise NotImplementedError('number of dimensions is currently limited to 2') if values.ndim == 2: assert axis == 1, axis elif not is_1d_only_ea_dtype(values.dtype): assert axis == 0 @final def cython_operation(self, *, values: ArrayLike, axis: AxisInt, min_count: int=-1, comp_ids: np.ndarray, ngroups: int, **kwargs) -> ArrayLike: self._validate_axis(axis, values) if not isinstance(values, np.ndarray): return values._groupby_op(how=self.how, has_dropped_na=self.has_dropped_na, min_count=min_count, ngroups=ngroups, ids=comp_ids, **kwargs) return self._cython_op_ndim_compat(values, min_count=min_count, ngroups=ngroups, comp_ids=comp_ids, mask=None, **kwargs) class BaseGrouper: axis: Index def __init__(self, axis: Index, groupings: list[grouper.Grouping], sort: bool=True, dropna: bool=True) -> None: assert isinstance(axis, Index), axis self.axis = axis self._groupings = groupings self._sort = sort self.dropna = dropna @property def groupings(self) -> list[grouper.Grouping]: return self._groupings def __iter__(self) -> Iterator[Hashable]: return iter(self.indices) @property def nkeys(self) -> int: return len(self.groupings) def get_iterator(self, data: NDFrameT) -> Iterator[tuple[Hashable, NDFrameT]]: splitter = self._get_splitter(data) keys = self.result_index yield from zip(keys, splitter) @final def _get_splitter(self, data: NDFrame) -> DataSplitter: if isinstance(data, Series): klass: type[DataSplitter] = SeriesSplitter else: klass = FrameSplitter return klass(data, self.ngroups, sorted_ids=self._sorted_ids, sort_idx=self.result_ilocs) @cache_readonly def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]: if len(self.groupings) == 1 and isinstance(self.result_index, CategoricalIndex): return self.groupings[0].indices codes_list = [ping.codes for ping in self.groupings] return get_indexer_dict(codes_list, self.levels) @final @cache_readonly def result_ilocs(self) -> npt.NDArray[np.intp]: ids = self.ids if self.has_dropped_na: mask = np.where(ids >= 0) null_gaps = np.cumsum(ids == -1)[mask] ids = ids[mask] result = get_group_index_sorter(ids, self.ngroups) if self.has_dropped_na: result += np.take(null_gaps, result) return result @property def codes(self) -> list[npt.NDArray[np.signedinteger]]: return [ping.codes for ping in self.groupings] @property def levels(self) -> list[Index]: if len(self.groupings) > 1: return list(self.result_index.levels) else: return [self.result_index] @property def names(self) -> list[Hashable]: return [ping.name for ping in self.groupings] @final def size(self) -> Series: ids = self.ids ngroups = self.ngroups out: np.ndarray | list if ngroups: out = np.bincount(ids[ids != -1], minlength=ngroups) else: out = [] return Series(out, index=self.result_index, dtype='int64', copy=False) @cache_readonly def groups(self) -> dict[Hashable, Index]: if len(self.groupings) == 1: return self.groupings[0].groups (result_index, ids) = self.result_index_and_ids values = result_index._values categories = Categorical(ids, categories=range(len(result_index))) result = {values[group]: self.axis.take(axis_ilocs) for (group, axis_ilocs) in categories._reverse_indexer().items()} return result @final @cache_readonly def is_monotonic(self) -> bool: return Index(self.ids).is_monotonic_increasing @final @cache_readonly def has_dropped_na(self) -> bool: return bool((self.ids < 0).any()) @cache_readonly def codes_info(self) -> npt.NDArray[np.intp]: return self.ids @final @cache_readonly def ngroups(self) -> int: return len(self.result_index) @property def result_index(self) -> Index: return self.result_index_and_ids[0] @property def ids(self) -> npt.NDArray[np.intp]: return self.result_index_and_ids[1] @cache_readonly def result_index_and_ids(self) -> tuple[Index, npt.NDArray[np.intp]]: levels = [Index._with_infer(ping.uniques) for ping in self.groupings] obs = [ping._observed or not ping._passed_categorical for ping in self.groupings] for (k, (ping, level)) in enumerate(zip(self.groupings, levels)): if ping._passed_categorical: levels[k] = level.set_categories(ping._orig_cats) if len(self.groupings) == 1: result_index = levels[0] result_index.name = self.names[0] ids = ensure_platform_int(self.codes[0]) elif all(obs): (result_index, ids) = self._ob_index_and_ids(levels, self.codes, self.names) elif not any(obs): (result_index, ids) = self._unob_index_and_ids(levels, self.codes, self.names) else: names = self.names codes = [ping.codes for ping in self.groupings] ob_indices = [idx for (idx, ob) in enumerate(obs) if ob] unob_indices = [idx for (idx, ob) in enumerate(obs) if not ob] (ob_index, ob_ids) = self._ob_index_and_ids(levels=[levels[idx] for idx in ob_indices], codes=[codes[idx] for idx in ob_indices], names=[names[idx] for idx in ob_indices]) (unob_index, unob_ids) = self._unob_index_and_ids(levels=[levels[idx] for idx in unob_indices], codes=[codes[idx] for idx in unob_indices], names=[names[idx] for idx in unob_indices]) result_index_codes = np.concatenate([np.tile(unob_index.codes, len(ob_index)), np.repeat(ob_index.codes, len(unob_index), axis=1)], axis=0) (_, index) = np.unique(unob_indices + ob_indices, return_index=True) result_index = MultiIndex(levels=list(unob_index.levels) + list(ob_index.levels), codes=result_index_codes, names=list(unob_index.names) + list(ob_index.names)).reorder_levels(index) ids = len(unob_index) * ob_ids + unob_ids if self._sort: sorter = result_index.argsort() result_index = result_index.take(sorter) (_, index) = np.unique(sorter, return_index=True) ids = ensure_platform_int(ids) ids = index.take(ids) else: (ids, uniques) = compress_group_index(ids, sort=False) ids = ensure_platform_int(ids) taker = np.concatenate([uniques, np.delete(np.arange(len(result_index)), uniques)]) result_index = result_index.take(taker) return (result_index, ids) @property def observed_grouper(self) -> BaseGrouper: if all((ping._observed for ping in self.groupings)): return self return self._observed_grouper @cache_readonly def _observed_grouper(self) -> BaseGrouper: groupings = [ping.observed_grouping for ping in self.groupings] grouper = BaseGrouper(self.axis, groupings, sort=self._sort, dropna=self.dropna) return grouper def _ob_index_and_ids(self, levels: list[Index], codes: list[npt.NDArray[np.intp]], names: list[Hashable]) -> tuple[MultiIndex, npt.NDArray[np.intp]]: shape = tuple((len(level) for level in levels)) group_index = get_group_index(codes, shape, sort=True, xnull=True) (ob_ids, obs_group_ids) = compress_group_index(group_index, sort=self._sort) ob_ids = ensure_platform_int(ob_ids) ob_index_codes = decons_obs_group_ids(ob_ids, obs_group_ids, shape, codes, xnull=True) ob_index = MultiIndex(levels=levels, codes=ob_index_codes, names=names, verify_integrity=False) ob_ids = ensure_platform_int(ob_ids) return (ob_index, ob_ids) def _unob_index_and_ids(self, levels: list[Index], codes: list[npt.NDArray[np.intp]], names: list[Hashable]) -> tuple[MultiIndex, npt.NDArray[np.intp]]: shape = tuple((len(level) for level in levels)) unob_ids = get_group_index(codes, shape, sort=True, xnull=True) unob_index = MultiIndex.from_product(levels, names=names) unob_ids = ensure_platform_int(unob_ids) return (unob_index, unob_ids) @final def get_group_levels(self) -> Generator[Index, None, None]: result_index = self.result_index if len(self.groupings) == 1: yield result_index else: for level in range(result_index.nlevels - 1, -1, -1): yield result_index.get_level_values(level) @final def _cython_operation(self, kind: str, values, how: str, axis: AxisInt, min_count: int=-1, **kwargs) -> ArrayLike: assert kind in ['transform', 'aggregate'] cy_op = WrappedCythonOp(kind=kind, how=how, has_dropped_na=self.has_dropped_na) return cy_op.cython_operation(values=values, axis=axis, min_count=min_count, comp_ids=self.ids, ngroups=self.ngroups, **kwargs) @final def agg_series(self, obj: Series, func: Callable, preserve_dtype: bool=False) -> ArrayLike: if not isinstance(obj._values, np.ndarray): preserve_dtype = True result = self._aggregate_series_pure_python(obj, func) npvalues = lib.maybe_convert_objects(result, try_float=False) if preserve_dtype: out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only=True) else: out = npvalues return out @final def _aggregate_series_pure_python(self, obj: Series, func: Callable) -> npt.NDArray[np.object_]: result = np.empty(self.ngroups, dtype='O') initialized = False splitter = self._get_splitter(obj) for (i, group) in enumerate(splitter): res = func(group) res = extract_result(res) if not initialized: check_result_array(res, group.dtype) initialized = True result[i] = res return result @final def apply_groupwise(self, f: Callable, data: DataFrame | Series) -> tuple[list, bool]: mutated = False splitter = self._get_splitter(data) group_keys = self.result_index result_values = [] zipped = zip(group_keys, splitter) for (key, group) in zipped: object.__setattr__(group, 'name', key) group_axes = group.axes res = f(group) if not mutated and (not _is_indexed_like(res, group_axes)): mutated = True result_values.append(res) if len(group_keys) == 0 and getattr(f, '__name__', None) in ['skew', 'sum', 'prod']: f(data.iloc[:0]) return (result_values, mutated) @final @cache_readonly def _sorted_ids(self) -> npt.NDArray[np.intp]: result = self.ids.take(self.result_ilocs) if getattr(self, 'dropna', True): result = result[result >= 0] return result class BinGrouper(BaseGrouper): bins: npt.NDArray[np.int64] binlabels: Index def __init__(self, bins, binlabels, indexer=None) -> None: self.bins = ensure_int64(bins) self.binlabels = ensure_index(binlabels) self.indexer = indexer assert len(self.binlabels) == len(self.bins) @cache_readonly def groups(self): result = {key: value for (key, value) in zip(self.binlabels, self.bins) if key is not NaT} return result @property def nkeys(self) -> int: return 1 @cache_readonly def codes_info(self) -> npt.NDArray[np.intp]: ids = self.ids if self.indexer is not None: sorter = np.lexsort((ids, self.indexer)) ids = ids[sorter] return ids def get_iterator(self, data: NDFrame): slicer = lambda start, edge: data.iloc[start:edge] start = 0 for (edge, label) in zip(self.bins, self.binlabels): if label is not NaT: yield (label, slicer(start, edge)) start = edge if start < len(data): yield (self.binlabels[-1], slicer(start, None)) @cache_readonly def indices(self): indices = collections.defaultdict(list) i = 0 for (label, bin) in zip(self.binlabels, self.bins): if i < bin: if label is not NaT: indices[label] = list(range(i, bin)) i = bin return indices @cache_readonly def codes(self) -> list[npt.NDArray[np.intp]]: return [self.ids] @cache_readonly def result_index_and_ids(self): result_index = self.binlabels if len(self.binlabels) != 0 and isna(self.binlabels[0]): result_index = result_index[1:] ngroups = len(result_index) rep = np.diff(np.r_[0, self.bins]) rep = ensure_platform_int(rep) if ngroups == len(self.bins): ids = np.repeat(np.arange(ngroups), rep) else: ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep) ids = ensure_platform_int(ids) return (result_index, ids) @property def levels(self) -> list[Index]: return [self.binlabels] @property def names(self) -> list[Hashable]: return [self.binlabels.name] @property def groupings(self) -> list[grouper.Grouping]: lev = self.binlabels codes = self.ids labels = lev.take(codes) ping = grouper.Grouping(labels, labels, in_axis=False, level=None, uniques=lev._values) return [ping] @property def observed_grouper(self) -> BinGrouper: return self def _is_indexed_like(obj, axes) -> bool: if isinstance(obj, Series): if len(axes) > 1: return False return obj.index.equals(axes[0]) elif isinstance(obj, DataFrame): return obj.index.equals(axes[0]) return False class DataSplitter(Generic[NDFrameT]): def __init__(self, data: NDFrameT, ngroups: int, *, sort_idx: npt.NDArray[np.intp], sorted_ids: npt.NDArray[np.intp]) -> None: self.data = data self.ngroups = ngroups self._slabels = sorted_ids self._sort_idx = sort_idx def __iter__(self) -> Iterator: if self.ngroups == 0: return (starts, ends) = lib.generate_slices(self._slabels, self.ngroups) sdata = self._sorted_data for (start, end) in zip(starts, ends): yield self._chop(sdata, slice(start, end)) @cache_readonly def _sorted_data(self) -> NDFrameT: return self.data.take(self._sort_idx, axis=0) def _chop(self, sdata, slice_obj: slice) -> NDFrame: raise AbstractMethodError(self) class SeriesSplitter(DataSplitter): def _chop(self, sdata: Series, slice_obj: slice) -> Series: mgr = sdata._mgr.get_slice(slice_obj) ser = sdata._constructor_from_mgr(mgr, axes=mgr.axes) ser._name = sdata.name return ser.__finalize__(sdata, method='groupby') class FrameSplitter(DataSplitter): def _chop(self, sdata: DataFrame, slice_obj: slice) -> DataFrame: mgr = sdata._mgr.get_slice(slice_obj, axis=1) df = sdata._constructor_from_mgr(mgr, axes=mgr.axes) return df.__finalize__(sdata, method='groupby') # File: pandas-main/pandas/core/indexers/__init__.py from pandas.core.indexers.utils import check_array_indexer, check_key_length, check_setitem_lengths, disallow_ndim_indexing, is_empty_indexer, is_list_like_indexer, is_scalar_indexer, is_valid_positional_slice, length_of_indexer, maybe_convert_indices, unpack_1tuple, unpack_tuple_and_ellipses, validate_indices __all__ = ['is_valid_positional_slice', 'is_list_like_indexer', 'is_scalar_indexer', 'is_empty_indexer', 'check_setitem_lengths', 'validate_indices', 'maybe_convert_indices', 'length_of_indexer', 'disallow_ndim_indexing', 'unpack_1tuple', 'check_key_length', 'check_array_indexer', 'unpack_tuple_and_ellipses'] # File: pandas-main/pandas/core/indexers/objects.py """""" from __future__ import annotations from datetime import timedelta import numpy as np from pandas._libs.tslibs import BaseOffset from pandas._libs.window.indexers import calculate_variable_window_bounds from pandas.util._decorators import Appender from pandas.core.dtypes.common import ensure_platform_int from pandas.core.indexes.datetimes import DatetimeIndex from pandas.tseries.offsets import Nano get_window_bounds_doc = '\nComputes the bounds of a window.\n\nParameters\n----------\nnum_values : int, default 0\n number of values that will be aggregated over\nwindow_size : int, default 0\n the number of rows in a window\nmin_periods : int, default None\n min_periods passed from the top level rolling API\ncenter : bool, default None\n center passed from the top level rolling API\nclosed : str, default None\n closed passed from the top level rolling API\nstep : int, default None\n step passed from the top level rolling API\n .. versionadded:: 1.5\nwin_type : str, default None\n win_type passed from the top level rolling API\n\nReturns\n-------\nA tuple of ndarray[int64]s, indicating the boundaries of each\nwindow\n' class BaseIndexer: def __init__(self, index_array: np.ndarray | None=None, window_size: int=0, **kwargs) -> None: self.index_array = index_array self.window_size = window_size for (key, value) in kwargs.items(): setattr(self, key, value) @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: raise NotImplementedError class FixedWindowIndexer(BaseIndexer): @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: if center or self.window_size == 0: offset = (self.window_size - 1) // 2 else: offset = 0 end = np.arange(1 + offset, num_values + 1 + offset, step, dtype='int64') start = end - self.window_size if closed in ['left', 'both']: start -= 1 if closed in ['left', 'neither']: end -= 1 end = np.clip(end, 0, num_values) start = np.clip(start, 0, num_values) return (start, end) class VariableWindowIndexer(BaseIndexer): @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: return calculate_variable_window_bounds(num_values, self.window_size, min_periods, center, closed, self.index_array) class VariableOffsetWindowIndexer(BaseIndexer): def __init__(self, index_array: np.ndarray | None=None, window_size: int=0, index: DatetimeIndex | None=None, offset: BaseOffset | None=None, **kwargs) -> None: super().__init__(index_array, window_size, **kwargs) if not isinstance(index, DatetimeIndex): raise ValueError('index must be a DatetimeIndex.') self.index = index if not isinstance(offset, BaseOffset): raise ValueError('offset must be a DateOffset-like object.') self.offset = offset @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: if step is not None: raise NotImplementedError('step not implemented for variable offset window') if num_values <= 0: return (np.empty(0, dtype='int64'), np.empty(0, dtype='int64')) if closed is None: closed = 'right' if self.index is not None else 'both' right_closed = closed in ['right', 'both'] left_closed = closed in ['left', 'both'] if self.index[num_values - 1] < self.index[0]: index_growth_sign = -1 else: index_growth_sign = 1 offset_diff = index_growth_sign * self.offset start = np.empty(num_values, dtype='int64') start.fill(-1) end = np.empty(num_values, dtype='int64') end.fill(-1) start[0] = 0 if right_closed: end[0] = 1 else: end[0] = 0 zero = timedelta(0) for i in range(1, num_values): end_bound = self.index[i] start_bound = end_bound - offset_diff if left_closed: start_bound -= Nano(1) start[i] = i for j in range(start[i - 1], i): start_diff = (self.index[j] - start_bound) * index_growth_sign if start_diff > zero: start[i] = j break end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign if end_diff == zero and (not right_closed): end[i] = end[i - 1] + 1 elif end_diff <= zero: end[i] = i + 1 else: end[i] = end[i - 1] if not right_closed: end[i] -= 1 return (start, end) class ExpandingIndexer(BaseIndexer): @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: return (np.zeros(num_values, dtype=np.int64), np.arange(1, num_values + 1, dtype=np.int64)) class FixedForwardWindowIndexer(BaseIndexer): @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: if center: raise ValueError("Forward-looking windows can't have center=True") if closed is not None: raise ValueError("Forward-looking windows don't support setting the closed argument") if step is None: step = 1 start = np.arange(0, num_values, step, dtype='int64') end = start + self.window_size if self.window_size: end = np.clip(end, 0, num_values) return (start, end) class GroupbyIndexer(BaseIndexer): def __init__(self, index_array: np.ndarray | None=None, window_size: int | BaseIndexer=0, groupby_indices: dict | None=None, window_indexer: type[BaseIndexer]=BaseIndexer, indexer_kwargs: dict | None=None, **kwargs) -> None: self.groupby_indices = groupby_indices or {} self.window_indexer = window_indexer self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {} super().__init__(index_array=index_array, window_size=self.indexer_kwargs.pop('window_size', window_size), **kwargs) @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: start_arrays = [] end_arrays = [] window_indices_start = 0 for indices in self.groupby_indices.values(): index_array: np.ndarray | None if self.index_array is not None: index_array = self.index_array.take(ensure_platform_int(indices)) else: index_array = self.index_array indexer = self.window_indexer(index_array=index_array, window_size=self.window_size, **self.indexer_kwargs) (start, end) = indexer.get_window_bounds(len(indices), min_periods, center, closed, step) start = start.astype(np.int64) end = end.astype(np.int64) assert len(start) == len(end), 'these should be equal in length from get_window_bounds' window_indices = np.arange(window_indices_start, window_indices_start + len(indices)) window_indices_start += len(indices) window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(np.int64, copy=False) start_arrays.append(window_indices.take(ensure_platform_int(start))) end_arrays.append(window_indices.take(ensure_platform_int(end))) if len(start_arrays) == 0: return (np.array([], dtype=np.int64), np.array([], dtype=np.int64)) start = np.concatenate(start_arrays) end = np.concatenate(end_arrays) return (start, end) class ExponentialMovingWindowIndexer(BaseIndexer): @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int=0, min_periods: int | None=None, center: bool | None=None, closed: str | None=None, step: int | None=None) -> tuple[np.ndarray, np.ndarray]: return (np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64)) # File: pandas-main/pandas/core/indexers/utils.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_integer, is_integer_dtype, is_list_like from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCIndex, ABCSeries if TYPE_CHECKING: from pandas._typing import AnyArrayLike from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index def is_valid_positional_slice(slc: slice) -> bool: return lib.is_int_or_none(slc.start) and lib.is_int_or_none(slc.stop) and lib.is_int_or_none(slc.step) def is_list_like_indexer(key) -> bool: return is_list_like(key) and (not (isinstance(key, tuple) and type(key) is not tuple)) def is_scalar_indexer(indexer, ndim: int) -> bool: if ndim == 1 and is_integer(indexer): return True if isinstance(indexer, tuple) and len(indexer) == ndim: return all((is_integer(x) for x in indexer)) return False def is_empty_indexer(indexer) -> bool: if is_list_like(indexer) and (not len(indexer)): return True if not isinstance(indexer, tuple): indexer = (indexer,) return any((isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)) def check_setitem_lengths(indexer, value, values) -> bool: no_op = False if isinstance(indexer, (np.ndarray, list)): if is_list_like(value): if len(indexer) != len(value) and values.ndim == 1: if isinstance(indexer, list): indexer = np.array(indexer) if not (isinstance(indexer, np.ndarray) and indexer.dtype == np.bool_ and (indexer.sum() == len(value))): raise ValueError('cannot set using a list-like indexer with a different length than the value') if not len(indexer): no_op = True elif isinstance(indexer, slice): if is_list_like(value): if len(value) != length_of_indexer(indexer, values) and values.ndim == 1: raise ValueError('cannot set using a slice indexer with a different length than the value') if not len(value): no_op = True return no_op def validate_indices(indices: np.ndarray, n: int) -> None: if len(indices): min_idx = indices.min() if min_idx < -1: msg = f"'indices' contains values less than allowed ({min_idx} < -1)" raise ValueError(msg) max_idx = indices.max() if max_idx >= n: raise IndexError('indices are out-of-bounds') def maybe_convert_indices(indices, n: int, verify: bool=True) -> np.ndarray: if isinstance(indices, list): indices = np.array(indices) if len(indices) == 0: return np.empty(0, dtype=np.intp) mask = indices < 0 if mask.any(): indices = indices.copy() indices[mask] += n if verify: mask = (indices >= n) | (indices < 0) if mask.any(): raise IndexError('indices are out-of-bounds') return indices def length_of_indexer(indexer, target=None) -> int: if target is not None and isinstance(indexer, slice): target_len = len(target) start = indexer.start stop = indexer.stop step = indexer.step if start is None: start = 0 elif start < 0: start += target_len if stop is None or stop > target_len: stop = target_len elif stop < 0: stop += target_len if step is None: step = 1 elif step < 0: (start, stop) = (stop + 1, start + 1) step = -step return (stop - start + step - 1) // step elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)): if isinstance(indexer, list): indexer = np.array(indexer) if indexer.dtype == bool: return indexer.sum() return len(indexer) elif isinstance(indexer, range): return (indexer.stop - indexer.start) // indexer.step elif not is_list_like_indexer(indexer): return 1 raise AssertionError('cannot find the length of the indexer') def disallow_ndim_indexing(result) -> None: if np.ndim(result) > 1: raise ValueError('Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer supported. Convert to a numpy array before indexing instead.') def unpack_1tuple(tup): if len(tup) == 1 and isinstance(tup[0], slice): if isinstance(tup, list): raise ValueError('Indexing with a single-item list containing a slice is not allowed. Pass a tuple instead.') return tup[0] return tup def check_key_length(columns: Index, key, value: DataFrame) -> None: if columns.is_unique: if len(value.columns) != len(key): raise ValueError('Columns must be same length as key') elif len(columns.get_indexer_non_unique(key)[0]) != len(value.columns): raise ValueError('Columns must be same length as key') def unpack_tuple_and_ellipses(item: tuple): if len(item) > 1: if item[0] is Ellipsis: item = item[1:] elif item[-1] is Ellipsis: item = item[:-1] if len(item) > 1: raise IndexError('too many indices for array.') item = item[0] return item def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any: from pandas.core.construction import array as pd_array if is_list_like(indexer): if isinstance(indexer, tuple): return indexer else: return indexer if not is_array_like(indexer): indexer = pd_array(indexer) if len(indexer) == 0: indexer = np.array([], dtype=np.intp) dtype = indexer.dtype if is_bool_dtype(dtype): if isinstance(dtype, ExtensionDtype): indexer = indexer.to_numpy(dtype=bool, na_value=False) else: indexer = np.asarray(indexer, dtype=bool) if len(indexer) != len(array): raise IndexError(f'Boolean index has wrong length: {len(indexer)} instead of {len(array)}') elif is_integer_dtype(dtype): try: indexer = np.asarray(indexer, dtype=np.intp) except ValueError as err: raise ValueError('Cannot index with an integer indexer containing NA values') from err else: raise IndexError('arrays used as indices must be of integer or boolean type') return indexer # File: pandas-main/pandas/core/indexes/accessors.py """""" from __future__ import annotations from typing import TYPE_CHECKING, NoReturn, cast import warnings import numpy as np from pandas._libs import lib from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_integer_dtype, is_list_like from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype, DatetimeTZDtype, PeriodDtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.accessor import PandasDelegate, delegate_names from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray from pandas.core.arrays.arrow.array import ArrowExtensionArray from pandas.core.base import NoNewAttributesMixin, PandasObject from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex if TYPE_CHECKING: from pandas import DataFrame, Series class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin): _hidden_attrs = PandasObject._hidden_attrs | {'orig', 'name'} def __init__(self, data: Series, orig) -> None: if not isinstance(data, ABCSeries): raise TypeError(f'cannot convert an object of type {type(data)} to a datetimelike index') self._parent = data self.orig = orig self.name = getattr(data, 'name', None) self._freeze() def _get_values(self): data = self._parent if lib.is_np_dtype(data.dtype, 'M'): return DatetimeIndex(data, copy=False, name=self.name) elif isinstance(data.dtype, DatetimeTZDtype): return DatetimeIndex(data, copy=False, name=self.name) elif lib.is_np_dtype(data.dtype, 'm'): return TimedeltaIndex(data, copy=False, name=self.name) elif isinstance(data.dtype, PeriodDtype): return PeriodArray(data, copy=False) raise TypeError(f'cannot convert an object of type {type(data)} to a datetimelike index') def _delegate_property_get(self, name: str): from pandas import Series values = self._get_values() result = getattr(values, name) if isinstance(result, np.ndarray): if is_integer_dtype(result): result = result.astype('int64') elif not is_list_like(result): return result result = np.asarray(result) if self.orig is not None: index = self.orig.index else: index = self._parent.index return Series(result, index=index, name=self.name).__finalize__(self._parent) def _delegate_property_set(self, name: str, value, *args, **kwargs) -> NoReturn: raise ValueError('modifications to a property of a datetimelike object are not supported. Change values on the original.') def _delegate_method(self, name: str, *args, **kwargs): from pandas import Series values = self._get_values() method = getattr(values, name) result = method(*args, **kwargs) if not is_list_like(result): return result return Series(result, index=self._parent.index, name=self.name).__finalize__(self._parent) @delegate_names(delegate=ArrowExtensionArray, accessors=TimedeltaArray._datetimelike_ops, typ='property', accessor_mapping=lambda x: f'_dt_{x}', raise_on_missing=False) @delegate_names(delegate=ArrowExtensionArray, accessors=TimedeltaArray._datetimelike_methods, typ='method', accessor_mapping=lambda x: f'_dt_{x}', raise_on_missing=False) @delegate_names(delegate=ArrowExtensionArray, accessors=DatetimeArray._datetimelike_ops, typ='property', accessor_mapping=lambda x: f'_dt_{x}', raise_on_missing=False) @delegate_names(delegate=ArrowExtensionArray, accessors=DatetimeArray._datetimelike_methods, typ='method', accessor_mapping=lambda x: f'_dt_{x}', raise_on_missing=False) class ArrowTemporalProperties(PandasDelegate, PandasObject, NoNewAttributesMixin): def __init__(self, data: Series, orig) -> None: if not isinstance(data, ABCSeries): raise TypeError(f'cannot convert an object of type {type(data)} to a datetimelike index') self._parent = data self._orig = orig self._freeze() def _delegate_property_get(self, name: str): if not hasattr(self._parent.array, f'_dt_{name}'): raise NotImplementedError(f'dt.{name} is not supported for {self._parent.dtype}') result = getattr(self._parent.array, f'_dt_{name}') if not is_list_like(result): return result if self._orig is not None: index = self._orig.index else: index = self._parent.index result = type(self._parent)(result, index=index, name=self._parent.name).__finalize__(self._parent) return result def _delegate_method(self, name: str, *args, **kwargs): if not hasattr(self._parent.array, f'_dt_{name}'): raise NotImplementedError(f'dt.{name} is not supported for {self._parent.dtype}') result = getattr(self._parent.array, f'_dt_{name}')(*args, **kwargs) if self._orig is not None: index = self._orig.index else: index = self._parent.index result = type(self._parent)(result, index=index, name=self._parent.name).__finalize__(self._parent) return result def to_pytimedelta(self): warnings.warn(f'The behavior of {type(self).__name__}.to_pytimedelta is deprecated, in a future version this will return a Series containing python datetime.timedelta objects instead of an ndarray. To retain the old behavior, call `np.array` on the result', FutureWarning, stacklevel=find_stack_level()) return cast(ArrowExtensionArray, self._parent.array)._dt_to_pytimedelta() def to_pydatetime(self) -> Series: return cast(ArrowExtensionArray, self._parent.array)._dt_to_pydatetime() def isocalendar(self) -> DataFrame: from pandas import DataFrame result = cast(ArrowExtensionArray, self._parent.array)._dt_isocalendar()._pa_array.combine_chunks() iso_calendar_df = DataFrame({col: type(self._parent.array)(result.field(i)) for (i, col) in enumerate(['year', 'week', 'day'])}) return iso_calendar_df @property def components(self) -> DataFrame: from pandas import DataFrame components_df = DataFrame({col: getattr(self._parent.array, f'_dt_{col}') for col in ['days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds']}) return components_df @delegate_names(delegate=DatetimeArray, accessors=DatetimeArray._datetimelike_ops + ['unit'], typ='property') @delegate_names(delegate=DatetimeArray, accessors=DatetimeArray._datetimelike_methods + ['as_unit'], typ='method') class DatetimeProperties(Properties): def to_pydatetime(self) -> Series: from pandas import Series return Series(self._get_values().to_pydatetime(), dtype=object) @property def freq(self): return self._get_values().inferred_freq def isocalendar(self) -> DataFrame: return self._get_values().isocalendar().set_index(self._parent.index) @delegate_names(delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_ops, typ='property') @delegate_names(delegate=TimedeltaArray, accessors=TimedeltaArray._datetimelike_methods, typ='method') class TimedeltaProperties(Properties): def to_pytimedelta(self) -> np.ndarray: warnings.warn(f'The behavior of {type(self).__name__}.to_pytimedelta is deprecated, in a future version this will return a Series containing python datetime.timedelta objects instead of an ndarray. To retain the old behavior, call `np.array` on the result', FutureWarning, stacklevel=find_stack_level()) return self._get_values().to_pytimedelta() @property def components(self) -> DataFrame: return self._get_values().components.set_index(self._parent.index).__finalize__(self._parent) @property def freq(self): return self._get_values().inferred_freq @delegate_names(delegate=PeriodArray, accessors=PeriodArray._datetimelike_ops, typ='property') @delegate_names(delegate=PeriodArray, accessors=PeriodArray._datetimelike_methods, typ='method') class PeriodProperties(Properties): class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties, PeriodProperties): def __new__(cls, data: Series): if not isinstance(data, ABCSeries): raise TypeError(f'cannot convert an object of type {type(data)} to a datetimelike index') orig = data if isinstance(data.dtype, CategoricalDtype) else None if orig is not None: data = data._constructor(orig.array, name=orig.name, copy=False, dtype=orig._values.categories.dtype, index=orig.index) if isinstance(data.dtype, ArrowDtype) and data.dtype.kind in 'Mm': return ArrowTemporalProperties(data, orig) if lib.is_np_dtype(data.dtype, 'M'): return DatetimeProperties(data, orig) elif isinstance(data.dtype, DatetimeTZDtype): return DatetimeProperties(data, orig) elif lib.is_np_dtype(data.dtype, 'm'): return TimedeltaProperties(data, orig) elif isinstance(data.dtype, PeriodDtype): return PeriodProperties(data, orig) raise AttributeError('Can only use .dt accessor with datetimelike values') # File: pandas-main/pandas/core/indexes/api.py from __future__ import annotations from typing import TYPE_CHECKING, cast import numpy as np from pandas._libs import NaT, lib from pandas.errors import InvalidIndexError from pandas.core.dtypes.cast import find_common_type from pandas.core.algorithms import safe_sort from pandas.core.indexes.base import Index, _new_Index, ensure_index, ensure_index_from_sequences, get_unanimous_names, maybe_sequence_to_range from pandas.core.indexes.category import CategoricalIndex from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.interval import IntervalIndex from pandas.core.indexes.multi import MultiIndex from pandas.core.indexes.period import PeriodIndex from pandas.core.indexes.range import RangeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex if TYPE_CHECKING: from pandas._typing import Axis __all__ = ['Index', 'MultiIndex', 'CategoricalIndex', 'IntervalIndex', 'RangeIndex', 'InvalidIndexError', 'TimedeltaIndex', 'PeriodIndex', 'DatetimeIndex', '_new_Index', 'NaT', 'ensure_index', 'ensure_index_from_sequences', 'get_objs_combined_axis', 'union_indexes', 'get_unanimous_names', 'all_indexes_same', 'default_index', 'safe_sort_index', 'maybe_sequence_to_range'] def get_objs_combined_axis(objs, intersect: bool=False, axis: Axis=0, sort: bool=True) -> Index: obs_idxes = [obj._get_axis(axis) for obj in objs] return _get_combined_index(obs_idxes, intersect=intersect, sort=sort) def _get_distinct_objs(objs: list[Index]) -> list[Index]: ids: set[int] = set() res = [] for obj in objs: if id(obj) not in ids: ids.add(id(obj)) res.append(obj) return res def _get_combined_index(indexes: list[Index], intersect: bool=False, sort: bool=False) -> Index: indexes = _get_distinct_objs(indexes) if len(indexes) == 0: index: Index = default_index(0) elif len(indexes) == 1: index = indexes[0] elif intersect: index = indexes[0] for other in indexes[1:]: index = index.intersection(other) else: index = union_indexes(indexes, sort=False) index = ensure_index(index) if sort: index = safe_sort_index(index) return index def safe_sort_index(index: Index) -> Index: if index.is_monotonic_increasing: return index try: array_sorted = safe_sort(index) except TypeError: pass else: if isinstance(array_sorted, Index): return array_sorted array_sorted = cast(np.ndarray, array_sorted) if isinstance(index, MultiIndex): index = MultiIndex.from_tuples(array_sorted, names=index.names) else: index = Index(array_sorted, name=index.name, dtype=index.dtype) return index def union_indexes(indexes, sort: bool | None=True) -> Index: if len(indexes) == 0: raise AssertionError('Must have at least 1 Index to union') if len(indexes) == 1: result = indexes[0] if isinstance(result, list): if not sort: result = Index(result) else: result = Index(sorted(result)) return result (indexes, kind) = _sanitize_and_check(indexes) if kind == 'special': result = indexes[0] num_dtis = 0 num_dti_tzs = 0 for idx in indexes: if isinstance(idx, DatetimeIndex): num_dtis += 1 if idx.tz is not None: num_dti_tzs += 1 if num_dti_tzs not in [0, num_dtis]: raise TypeError('Cannot join tz-naive with tz-aware DatetimeIndex') if num_dtis == len(indexes): sort = True result = indexes[0] elif num_dtis > 1: sort = False indexes = [x.astype(object, copy=False) for x in indexes] result = indexes[0] for other in indexes[1:]: result = result.union(other, sort=None if sort else False) return result elif kind == 'array': if not all_indexes_same(indexes): dtype = find_common_type([idx.dtype for idx in indexes]) inds = [ind.astype(dtype, copy=False) for ind in indexes] index = inds[0].unique() other = inds[1].append(inds[2:]) diff = other[index.get_indexer_for(other) == -1] if len(diff): index = index.append(diff.unique()) if sort: index = index.sort_values() else: index = indexes[0] name = get_unanimous_names(*indexes)[0] if name != index.name: index = index.rename(name) return index elif kind == 'list': dtypes = [idx.dtype for idx in indexes if isinstance(idx, Index)] if dtypes: dtype = find_common_type(dtypes) else: dtype = None all_lists = (idx.tolist() if isinstance(idx, Index) else idx for idx in indexes) return Index(lib.fast_unique_multiple_list_gen(all_lists, sort=bool(sort)), dtype=dtype) else: raise ValueError(f"kind={kind!r} must be 'special', 'array' or 'list'.") def _sanitize_and_check(indexes): kinds = {type(index) for index in indexes} if list in kinds: if len(kinds) > 1: indexes = [Index(list(x)) if not isinstance(x, Index) else x for x in indexes] kinds -= {list} else: return (indexes, 'list') if len(kinds) > 1 or Index not in kinds: return (indexes, 'special') else: return (indexes, 'array') def all_indexes_same(indexes) -> bool: itr = iter(indexes) first = next(itr) return all((first.equals(index) for index in itr)) def default_index(n: int) -> RangeIndex: rng = range(n) return RangeIndex._simple_new(rng, name=None) # File: pandas-main/pandas/core/indexes/base.py from __future__ import annotations from collections import abc from datetime import datetime import functools from itertools import zip_longest import operator from typing import TYPE_CHECKING, Any, ClassVar, Literal, NoReturn, cast, final, overload import warnings import numpy as np from pandas._config import get_option from pandas._libs import NaT, algos as libalgos, index as libindex, lib, writers from pandas._libs.internals import BlockValuesRefs import pandas._libs.join as libjoin from pandas._libs.lib import is_datetime_array, no_default from pandas._libs.tslibs import IncompatibleFrequency, OutOfBoundsDatetime, Timestamp, tz_compare from pandas._typing import AnyAll, ArrayLike, Axes, Axis, AxisInt, DropKeep, Dtype, DtypeObj, F, IgnoreRaise, IndexLabel, IndexT, JoinHow, Level, NaPosition, ReindexMethod, Self, Shape, SliceType, npt from pandas.compat.numpy import function as nv from pandas.errors import DuplicateLabelError, InvalidIndexError from pandas.util._decorators import Appender, cache_readonly, doc from pandas.util._exceptions import find_stack_level, rewrite_exception from pandas.core.dtypes.astype import astype_array, astype_is_view from pandas.core.dtypes.cast import LossySetitemError, can_hold_element, common_dtype_categorical_compat, find_result_type, infer_dtype_from, maybe_cast_pointwise_result, np_can_hold_element from pandas.core.dtypes.common import ensure_int64, ensure_object, ensure_platform_int, is_any_real_numeric_dtype, is_bool_dtype, is_ea_or_datetimelike_dtype, is_float, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_signed_integer_dtype, is_string_dtype, needs_i8_conversion, pandas_dtype, validate_all_hashable from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype, DatetimeTZDtype, ExtensionDtype, IntervalDtype, PeriodDtype, SparseDtype from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCDataFrame, ABCDatetimeIndex, ABCIntervalIndex, ABCMultiIndex, ABCPeriodIndex, ABCRangeIndex, ABCSeries, ABCTimedeltaIndex from pandas.core.dtypes.inference import is_dict_like from pandas.core.dtypes.missing import array_equivalent, is_valid_na_for_dtype, isna from pandas.core import arraylike, nanops, ops from pandas.core.accessor import Accessor import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import setitem_datetimelike_compat, validate_putmask from pandas.core.arrays import ArrowExtensionArray, BaseMaskedArray, Categorical, DatetimeArray, ExtensionArray, TimedeltaArray from pandas.core.arrays.string_ import StringArray, StringDtype from pandas.core.base import IndexOpsMixin, PandasObject import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike, extract_array, sanitize_array from pandas.core.indexers import disallow_ndim_indexing, is_valid_positional_slice from pandas.core.indexes.frozen import FrozenList from pandas.core.missing import clean_reindex_fill_method from pandas.core.ops import get_op_result_name from pandas.core.sorting import ensure_key_mapped, get_group_index_sorter, nargsort from pandas.core.strings.accessor import StringMethods from pandas.io.formats.printing import PrettyDict, default_pprint, format_object_summary, pprint_thing if TYPE_CHECKING: from collections.abc import Callable, Hashable, Iterable, Sequence from pandas import CategoricalIndex, DataFrame, MultiIndex, Series from pandas.core.arrays import IntervalArray, PeriodArray __all__ = ['Index'] _unsortable_types = frozenset(('mixed', 'mixed-integer')) _index_doc_kwargs: dict[str, str] = {'klass': 'Index', 'inplace': '', 'target_klass': 'Index', 'raises_section': '', 'unique': 'Index', 'duplicated': 'np.ndarray'} _index_shared_docs: dict[str, str] = {} str_t = str _dtype_obj = np.dtype('object') _masked_engines = {'Complex128': libindex.MaskedComplex128Engine, 'Complex64': libindex.MaskedComplex64Engine, 'Float64': libindex.MaskedFloat64Engine, 'Float32': libindex.MaskedFloat32Engine, 'UInt64': libindex.MaskedUInt64Engine, 'UInt32': libindex.MaskedUInt32Engine, 'UInt16': libindex.MaskedUInt16Engine, 'UInt8': libindex.MaskedUInt8Engine, 'Int64': libindex.MaskedInt64Engine, 'Int32': libindex.MaskedInt32Engine, 'Int16': libindex.MaskedInt16Engine, 'Int8': libindex.MaskedInt8Engine, 'boolean': libindex.MaskedBoolEngine, 'double[pyarrow]': libindex.MaskedFloat64Engine, 'float64[pyarrow]': libindex.MaskedFloat64Engine, 'float32[pyarrow]': libindex.MaskedFloat32Engine, 'float[pyarrow]': libindex.MaskedFloat32Engine, 'uint64[pyarrow]': libindex.MaskedUInt64Engine, 'uint32[pyarrow]': libindex.MaskedUInt32Engine, 'uint16[pyarrow]': libindex.MaskedUInt16Engine, 'uint8[pyarrow]': libindex.MaskedUInt8Engine, 'int64[pyarrow]': libindex.MaskedInt64Engine, 'int32[pyarrow]': libindex.MaskedInt32Engine, 'int16[pyarrow]': libindex.MaskedInt16Engine, 'int8[pyarrow]': libindex.MaskedInt8Engine, 'bool[pyarrow]': libindex.MaskedBoolEngine} def _maybe_return_indexers(meth: F) -> F: @functools.wraps(meth) def join(self, other: Index, *, how: JoinHow='left', level=None, return_indexers: bool=False, sort: bool=False): (join_index, lidx, ridx) = meth(self, other, how=how, level=level, sort=sort) if not return_indexers: return join_index if lidx is not None: lidx = ensure_platform_int(lidx) if ridx is not None: ridx = ensure_platform_int(ridx) return (join_index, lidx, ridx) return cast(F, join) def _new_Index(cls, d): if issubclass(cls, ABCPeriodIndex): from pandas.core.indexes.period import _new_PeriodIndex return _new_PeriodIndex(cls, **d) if issubclass(cls, ABCMultiIndex): if 'labels' in d and 'codes' not in d: d['codes'] = d.pop('labels') d['verify_integrity'] = False elif 'dtype' not in d and 'data' in d: d['dtype'] = d['data'].dtype return cls.__new__(cls, **d) class Index(IndexOpsMixin, PandasObject): __pandas_priority__ = 2000 @final def _left_indexer_unique(self, other: Self) -> npt.NDArray[np.intp]: sv = self._get_join_target() ov = other._get_join_target() return libjoin.left_join_indexer_unique(sv, ov) @final def _left_indexer(self, other: Self) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: sv = self._get_join_target() ov = other._get_join_target() (joined_ndarray, lidx, ridx) = libjoin.left_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return (joined, lidx, ridx) @final def _inner_indexer(self, other: Self) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: sv = self._get_join_target() ov = other._get_join_target() (joined_ndarray, lidx, ridx) = libjoin.inner_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return (joined, lidx, ridx) @final def _outer_indexer(self, other: Self) -> tuple[ArrayLike, npt.NDArray[np.intp], npt.NDArray[np.intp]]: sv = self._get_join_target() ov = other._get_join_target() (joined_ndarray, lidx, ridx) = libjoin.outer_join_indexer(sv, ov) joined = self._from_join_target(joined_ndarray) return (joined, lidx, ridx) _typ: str = 'index' _data: ExtensionArray | np.ndarray _data_cls: type[ExtensionArray] | tuple[type[np.ndarray], type[ExtensionArray]] = (np.ndarray, ExtensionArray) _id: object | None = None _name: Hashable = None _no_setting_name: bool = False _comparables: list[str] = ['name'] _attributes: list[str] = ['name'] @cache_readonly def _can_hold_strings(self) -> bool: return not is_numeric_dtype(self.dtype) _engine_types: dict[np.dtype | ExtensionDtype, type[libindex.IndexEngine]] = {np.dtype(np.int8): libindex.Int8Engine, np.dtype(np.int16): libindex.Int16Engine, np.dtype(np.int32): libindex.Int32Engine, np.dtype(np.int64): libindex.Int64Engine, np.dtype(np.uint8): libindex.UInt8Engine, np.dtype(np.uint16): libindex.UInt16Engine, np.dtype(np.uint32): libindex.UInt32Engine, np.dtype(np.uint64): libindex.UInt64Engine, np.dtype(np.float32): libindex.Float32Engine, np.dtype(np.float64): libindex.Float64Engine, np.dtype(np.complex64): libindex.Complex64Engine, np.dtype(np.complex128): libindex.Complex128Engine} @property def _engine_type(self) -> type[libindex.IndexEngine | libindex.ExtensionEngine]: return self._engine_types.get(self.dtype, libindex.ObjectEngine) _supports_partial_string_indexing = False _accessors = {'str'} str = Accessor('str', StringMethods) _references = None def __new__(cls, data=None, dtype=None, copy: bool=False, name=None, tupleize_cols: bool=True) -> Self: from pandas.core.indexes.range import RangeIndex name = maybe_extract_name(name, data, cls) if dtype is not None: dtype = pandas_dtype(dtype) data_dtype = getattr(data, 'dtype', None) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references if isinstance(data, (range, RangeIndex)): result = RangeIndex(start=data, copy=copy, name=name) if dtype is not None: return result.astype(dtype, copy=False) return result elif is_ea_or_datetimelike_dtype(dtype): if isinstance(data, (set, frozenset)): data = list(data) elif is_ea_or_datetimelike_dtype(data_dtype): pass elif isinstance(data, (np.ndarray, ABCMultiIndex)): if isinstance(data, ABCMultiIndex): data = data._values if data.dtype.kind not in 'iufcbmM': data = com.asarray_tuplesafe(data, dtype=_dtype_obj) elif isinstance(data, (ABCSeries, Index)): pass elif is_scalar(data): raise cls._raise_scalar_data_error(data) elif hasattr(data, '__array__'): return cls(np.asarray(data), dtype=dtype, copy=copy, name=name) elif not is_list_like(data) and (not isinstance(data, memoryview)): raise cls._raise_scalar_data_error(data) else: if tupleize_cols: if is_iterator(data): data = list(data) if data and all((isinstance(e, tuple) for e in data)): from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_tuples(data, names=name) if not isinstance(data, (list, tuple)): data = list(data) if len(data) == 0: data = np.array(data, dtype=object) if len(data) and isinstance(data[0], tuple): data = com.asarray_tuplesafe(data, dtype=_dtype_obj) try: arr = sanitize_array(data, None, dtype=dtype, copy=copy) except ValueError as err: if 'index must be specified when data is not list-like' in str(err): raise cls._raise_scalar_data_error(data) from err if 'Data must be 1-dimensional' in str(err): raise ValueError('Index data must be 1-dimensional') from err raise arr = ensure_wrapped_if_datetimelike(arr) klass = cls._dtype_to_subclass(arr.dtype) arr = klass._ensure_array(arr, arr.dtype, copy=False) return klass._simple_new(arr, name, refs=refs) @classmethod def _ensure_array(cls, data, dtype, copy: bool): if data.ndim > 1: raise ValueError('Index data must be 1-dimensional') elif dtype == np.float16: raise NotImplementedError('float16 indexes are not supported') if copy: data = data.copy() return data @final @classmethod def _dtype_to_subclass(cls, dtype: DtypeObj): if isinstance(dtype, ExtensionDtype): return dtype.index_class if dtype.kind == 'M': from pandas import DatetimeIndex return DatetimeIndex elif dtype.kind == 'm': from pandas import TimedeltaIndex return TimedeltaIndex elif dtype.kind == 'O': return Index elif issubclass(dtype.type, str) or is_numeric_dtype(dtype): return Index raise NotImplementedError(dtype) @classmethod def _simple_new(cls, values: ArrayLike, name: Hashable | None=None, refs=None) -> Self: assert isinstance(values, cls._data_cls), type(values) result = object.__new__(cls) result._data = values result._name = name result._cache = {} result._reset_identity() if refs is not None: result._references = refs else: result._references = BlockValuesRefs() result._references.add_index_reference(result) return result @classmethod def _with_infer(cls, *args, **kwargs): result = cls(*args, **kwargs) if result.dtype == _dtype_obj and (not result._is_multi): values = lib.maybe_convert_objects(result._values) if values.dtype.kind in 'iufb': return Index(values, name=result.name) return result @cache_readonly def _constructor(self) -> type[Self]: return type(self) @final def _maybe_check_unique(self) -> None: if not self.is_unique: msg = 'Index has duplicates.' duplicates = self._format_duplicate_message() msg += f'\n{duplicates}' raise DuplicateLabelError(msg) @final def _format_duplicate_message(self) -> DataFrame: from pandas import Series duplicates = self[self.duplicated(keep='first')].unique() assert len(duplicates) out = Series(np.arange(len(self)), copy=False).groupby(self, observed=False).agg(list)[duplicates] if self._is_multi: out.index = type(self).from_tuples(out.index) if self.nlevels == 1: out = out.rename_axis('label') return out.to_frame(name='positions') def _shallow_copy(self, values, name: Hashable=no_default) -> Self: name = self._name if name is no_default else name return self._simple_new(values, name=name, refs=self._references) def _view(self) -> Self: result = self._simple_new(self._values, name=self._name, refs=self._references) result._cache = self._cache return result @final def _rename(self, name: Hashable) -> Self: result = self._view() result._name = name return result @final def is_(self, other) -> bool: if self is other: return True elif not hasattr(other, '_id'): return False elif self._id is None or other._id is None: return False else: return self._id is other._id @final def _reset_identity(self) -> None: self._id = object() @final def _cleanup(self) -> None: if '_engine' in self._cache: self._engine.clear_mapping() @cache_readonly def _engine(self) -> libindex.IndexEngine | libindex.ExtensionEngine | libindex.MaskedIndexEngine: target_values = self._get_engine_target() if isinstance(self._values, ArrowExtensionArray) and self.dtype.kind in 'Mm': import pyarrow as pa pa_type = self._values._pa_array.type if pa.types.is_timestamp(pa_type): target_values = self._values._to_datetimearray() return libindex.DatetimeEngine(target_values._ndarray) elif pa.types.is_duration(pa_type): target_values = self._values._to_timedeltaarray() return libindex.TimedeltaEngine(target_values._ndarray) if isinstance(target_values, ExtensionArray): if isinstance(target_values, (BaseMaskedArray, ArrowExtensionArray)): try: return _masked_engines[target_values.dtype.name](target_values) except KeyError: pass elif self._engine_type is libindex.ObjectEngine: return libindex.ExtensionEngine(target_values) target_values = cast(np.ndarray, target_values) if target_values.dtype == bool: return libindex.BoolEngine(target_values) elif target_values.dtype == np.complex64: return libindex.Complex64Engine(target_values) elif target_values.dtype == np.complex128: return libindex.Complex128Engine(target_values) elif needs_i8_conversion(self.dtype): target_values = self._data._ndarray elif is_string_dtype(self.dtype) and (not is_object_dtype(self.dtype)): return libindex.StringEngine(target_values) return self._engine_type(target_values) @final @cache_readonly def _dir_additions_for_owner(self) -> set[str_t]: return {c for c in self.unique(level=0)[:get_option('display.max_dir_items')] if isinstance(c, str) and c.isidentifier()} def __len__(self) -> int: return len(self._data) def __array__(self, dtype=None, copy=None) -> np.ndarray: return np.asarray(self._data, dtype=dtype) def __array_ufunc__(self, ufunc: np.ufunc, method: str_t, *inputs, **kwargs): if any((isinstance(other, (ABCSeries, ABCDataFrame)) for other in inputs)): return NotImplemented result = arraylike.maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result if 'out' in kwargs: return arraylike.dispatch_ufunc_with_out(self, ufunc, method, *inputs, **kwargs) if method == 'reduce': result = arraylike.dispatch_reduction_ufunc(self, ufunc, method, *inputs, **kwargs) if result is not NotImplemented: return result new_inputs = [x if x is not self else x._values for x in inputs] result = getattr(ufunc, method)(*new_inputs, **kwargs) if ufunc.nout == 2: return tuple((self.__array_wrap__(x) for x in result)) elif method == 'reduce': result = lib.item_from_zerodim(result) return result elif is_scalar(result): return result if result.dtype == np.float16: result = result.astype(np.float32) return self.__array_wrap__(result) @final def __array_wrap__(self, result, context=None, return_scalar=False): result = lib.item_from_zerodim(result) if not isinstance(result, Index) and is_bool_dtype(result.dtype) or np.ndim(result) > 1: return result return Index(result, name=self.name) @cache_readonly def dtype(self) -> DtypeObj: return self._data.dtype @final def ravel(self, order: str_t='C') -> Self: return self[:] def view(self, cls=None): if cls is not None: dtype = cls if isinstance(cls, str): dtype = pandas_dtype(cls) if needs_i8_conversion(dtype): idx_cls = self._dtype_to_subclass(dtype) arr = self.array.view(dtype) if isinstance(arr, ExtensionArray): return idx_cls._simple_new(arr, name=self.name, refs=self._references) return arr result = self._data.view(cls) else: result = self._view() if isinstance(result, Index): result._id = self._id return result def astype(self, dtype: Dtype, copy: bool=True): if dtype is not None: dtype = pandas_dtype(dtype) if self.dtype == dtype: return self.copy() if copy else self values = self._data if isinstance(values, ExtensionArray): with rewrite_exception(type(values).__name__, type(self).__name__): new_values = values.astype(dtype, copy=copy) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() new_values = cls._from_sequence(self, dtype=dtype, copy=copy) else: new_values = astype_array(values, dtype=dtype, copy=copy) result = Index(new_values, name=self.name, dtype=new_values.dtype, copy=False) if not copy and self._references is not None and astype_is_view(self.dtype, dtype): result._references = self._references result._references.add_index_reference(result) return result _index_shared_docs['take'] = "\n Return a new %(klass)s of the values selected by the indices.\n\n For internal compatibility with numpy arrays.\n\n Parameters\n ----------\n indices : array-like\n Indices to be taken.\n axis : int, optional\n The axis over which to select values, always 0.\n allow_fill : bool, default True\n How to handle negative values in `indices`.\n\n * False: negative values in `indices` indicate positional indices\n from the right (the default). This is similar to\n :func:`numpy.take`.\n\n * True: negative values in `indices` indicate\n missing values. These values are set to `fill_value`. Any other\n other negative values raise a ``ValueError``.\n\n fill_value : scalar, default None\n If allow_fill=True and fill_value is not None, indices specified by\n -1 are regarded as NA. If Index doesn't hold NA, raise ValueError.\n **kwargs\n Required for compatibility with numpy.\n\n Returns\n -------\n Index\n An index formed of elements at the given indices. Will be the same\n type as self, except for RangeIndex.\n\n See Also\n --------\n numpy.ndarray.take: Return an array formed from the\n elements of a at the given indices.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx.take([2, 2, 1, 2])\n Index(['c', 'c', 'b', 'c'], dtype='object')\n " @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis: Axis=0, allow_fill: bool=True, fill_value=None, **kwargs) -> Self: if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError('Expected indices to be array-like') indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) if indices.ndim == 1 and lib.is_range_indexer(indices, len(self)): return self.copy() values = self._values if isinstance(values, np.ndarray): taken = algos.take(values, indices, allow_fill=allow_fill, fill_value=self._na_value) else: taken = values.take(indices, allow_fill=allow_fill, fill_value=self._na_value) return self._constructor._simple_new(taken, name=self.name) @final def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool: if allow_fill and fill_value is not None: if self._can_hold_na: if (indices < -1).any(): raise ValueError('When allow_fill=True and fill_value is not None, all indices must be >= -1') else: cls_name = type(self).__name__ raise ValueError(f'Unable to fill values because {cls_name} cannot contain NA') else: allow_fill = False return allow_fill _index_shared_docs['repeat'] = "\n Repeat elements of a %(klass)s.\n\n Returns a new %(klass)s where each element of the current %(klass)s\n is repeated consecutively a given number of times.\n\n Parameters\n ----------\n repeats : int or array of ints\n The number of repetitions for each element. This should be a\n non-negative integer. Repeating 0 times will return an empty\n %(klass)s.\n axis : None\n Must be ``None``. Has no effect but is accepted for compatibility\n with numpy.\n\n Returns\n -------\n %(klass)s\n Newly created %(klass)s with repeated elements.\n\n See Also\n --------\n Series.repeat : Equivalent function for Series.\n numpy.repeat : Similar method for :class:`numpy.ndarray`.\n\n Examples\n --------\n >>> idx = pd.Index(['a', 'b', 'c'])\n >>> idx\n Index(['a', 'b', 'c'], dtype='object')\n >>> idx.repeat(2)\n Index(['a', 'a', 'b', 'b', 'c', 'c'], dtype='object')\n >>> idx.repeat([1, 2, 3])\n Index(['a', 'b', 'b', 'c', 'c', 'c'], dtype='object')\n " @Appender(_index_shared_docs['repeat'] % _index_doc_kwargs) def repeat(self, repeats, axis: None=None) -> Self: repeats = ensure_platform_int(repeats) nv.validate_repeat((), {'axis': axis}) res_values = self._values.repeat(repeats) return self._constructor._simple_new(res_values, name=self.name) def copy(self, name: Hashable | None=None, deep: bool=False) -> Self: name = self._validate_names(name=name, deep=deep)[0] if deep: new_data = self._data.copy() new_index = type(self)._simple_new(new_data, name=name) else: new_index = self._rename(name=name) return new_index @final def __copy__(self, **kwargs) -> Self: return self.copy(**kwargs) @final def __deepcopy__(self, memo=None) -> Self: return self.copy(deep=True) @final def __repr__(self) -> str_t: klass_name = type(self).__name__ data = self._format_data() attrs = self._format_attrs() attrs_str = [f'{k}={v}' for (k, v) in attrs] prepr = ', '.join(attrs_str) return f'{klass_name}({data}{prepr})' @property def _formatter_func(self): return default_pprint @final def _format_data(self, name=None) -> str_t: is_justify = True if self.inferred_type == 'string': is_justify = False elif isinstance(self.dtype, CategoricalDtype): self = cast('CategoricalIndex', self) if is_object_dtype(self.categories.dtype): is_justify = False elif isinstance(self, ABCRangeIndex): return '' return format_object_summary(self, self._formatter_func, is_justify=is_justify, name=name, line_break_each_value=self._is_multi) def _format_attrs(self) -> list[tuple[str_t, str_t | int | bool | None]]: attrs: list[tuple[str_t, str_t | int | bool | None]] = [] if not self._is_multi: attrs.append(('dtype', f"'{self.dtype}'")) if self.name is not None: attrs.append(('name', default_pprint(self.name))) elif self._is_multi and any((x is not None for x in self.names)): attrs.append(('names', default_pprint(self.names))) max_seq_items = get_option('display.max_seq_items') or len(self) if len(self) > max_seq_items: attrs.append(('length', len(self))) return attrs @final def _get_level_names(self) -> range | Sequence[Hashable]: if self._is_multi: return maybe_sequence_to_range([level if name is None else name for (level, name) in enumerate(self.names)]) else: return range(1) if self.name is None else [self.name] @final def _mpl_repr(self) -> np.ndarray: if isinstance(self.dtype, np.dtype) and self.dtype.kind != 'M': return cast(np.ndarray, self.values) return self.astype(object, copy=False)._values _default_na_rep = 'NaN' @final def _format_flat(self, *, include_name: bool, formatter: Callable | None=None) -> list[str_t]: header = [] if include_name: header.append(pprint_thing(self.name, escape_chars=('\t', '\r', '\n')) if self.name is not None else '') if formatter is not None: return header + list(self.map(formatter)) return self._format_with_header(header=header, na_rep=self._default_na_rep) def _format_with_header(self, *, header: list[str_t], na_rep: str_t) -> list[str_t]: from pandas.io.formats.format import format_array values = self._values if is_object_dtype(values.dtype) or is_string_dtype(values.dtype) or isinstance(self.dtype, (IntervalDtype, CategoricalDtype)): justify = 'all' else: justify = 'left' formatted = format_array(values, None, justify=justify) result = trim_front(formatted) return header + result def _get_values_for_csv(self, *, na_rep: str_t='', decimal: str_t='.', float_format=None, date_format=None, quoting=None) -> npt.NDArray[np.object_]: return get_values_for_csv(self._values, na_rep=na_rep, decimal=decimal, float_format=float_format, date_format=date_format, quoting=quoting) def _summary(self, name=None) -> str_t: if len(self) > 0: head = self[0] if hasattr(head, 'format') and (not isinstance(head, str)): head = head.format() elif needs_i8_conversion(self.dtype): head = self._formatter_func(head).replace("'", '') tail = self[-1] if hasattr(tail, 'format') and (not isinstance(tail, str)): tail = tail.format() elif needs_i8_conversion(self.dtype): tail = self._formatter_func(tail).replace("'", '') index_summary = f', {head} to {tail}' else: index_summary = '' if name is None: name = type(self).__name__ return f'{name}: {len(self)} entries{index_summary}' def to_flat_index(self) -> Self: return self @final def to_series(self, index=None, name: Hashable | None=None) -> Series: from pandas import Series if index is None: index = self._view() if name is None: name = self.name return Series(self._values.copy(), index=index, name=name) def to_frame(self, index: bool=True, name: Hashable=lib.no_default) -> DataFrame: from pandas import DataFrame if name is lib.no_default: result_name = self._get_level_names() else: result_name = Index([name]) result = DataFrame(self, copy=False) result.columns = result_name if index: result.index = self return result @property def name(self) -> Hashable: return self._name @name.setter def name(self, value: Hashable) -> None: if self._no_setting_name: raise RuntimeError("Cannot set name on a level of a MultiIndex. Use 'MultiIndex.set_names' instead.") maybe_extract_name(value, None, type(self)) self._name = value @final def _validate_names(self, name=None, names=None, deep: bool=False) -> list[Hashable]: from copy import deepcopy if names is not None and name is not None: raise TypeError('Can only provide one of `names` and `name`') if names is None and name is None: new_names = deepcopy(self.names) if deep else self.names elif names is not None: if not is_list_like(names): raise TypeError('Must pass list-like as `names`.') new_names = names elif not is_list_like(name): new_names = [name] else: new_names = name if len(new_names) != len(self.names): raise ValueError(f'Length of new names must be {len(self.names)}, got {len(new_names)}') validate_all_hashable(*new_names, error_name=f'{type(self).__name__}.name') return new_names def _get_default_index_names(self, names: Hashable | Sequence[Hashable] | None=None, default=None) -> list[Hashable]: from pandas.core.indexes.multi import MultiIndex if names is not None: if isinstance(names, (int, str)): names = [names] if not isinstance(names, list) and names is not None: raise ValueError('Index names must be str or 1-dimensional list') if not names: if isinstance(self, MultiIndex): names = com.fill_missing_names(self.names) else: names = [default] if self.name is None else [self.name] return names def _get_names(self) -> FrozenList: return FrozenList((self.name,)) def _set_names(self, values, *, level=None) -> None: if not is_list_like(values): raise ValueError('Names must be a list-like') if len(values) != 1: raise ValueError(f'Length of new names must be 1, got {len(values)}') validate_all_hashable(*values, error_name=f'{type(self).__name__}.name') self._name = values[0] names = property(fset=_set_names, fget=_get_names) @overload def set_names(self, names, *, level=..., inplace: Literal[False]=...) -> Self: ... @overload def set_names(self, names, *, level=..., inplace: Literal[True]) -> None: ... @overload def set_names(self, names, *, level=..., inplace: bool=...) -> Self | None: ... def set_names(self, names, *, level=None, inplace: bool=False) -> Self | None: if level is not None and (not isinstance(self, ABCMultiIndex)): raise ValueError('Level must be None for non-MultiIndex') if level is not None and (not is_list_like(level)) and is_list_like(names): raise TypeError('Names must be a string when a single level is provided.') if not is_list_like(names) and level is None and (self.nlevels > 1): raise TypeError('Must pass list-like as `names`.') if is_dict_like(names) and (not isinstance(self, ABCMultiIndex)): raise TypeError('Can only pass dict-like as `names` for MultiIndex.') if is_dict_like(names) and level is not None: raise TypeError('Can not pass level for dictlike `names`.') if isinstance(self, ABCMultiIndex) and is_dict_like(names) and (level is None): (level, names_adjusted) = ([], []) for (i, name) in enumerate(self.names): if name in names.keys(): level.append(i) names_adjusted.append(names[name]) names = names_adjusted if not is_list_like(names): names = [names] if level is not None and (not is_list_like(level)): level = [level] if inplace: idx = self else: idx = self._view() idx._set_names(names, level=level) if not inplace: return idx return None @overload def rename(self, name, *, inplace: Literal[False]=...) -> Self: ... @overload def rename(self, name, *, inplace: Literal[True]) -> None: ... def rename(self, name, *, inplace: bool=False) -> Self | None: return self.set_names([name], inplace=inplace) @property def nlevels(self) -> int: return 1 def _sort_levels_monotonic(self) -> Self: return self @final def _validate_index_level(self, level) -> None: if isinstance(level, int): if level < 0 and level != -1: raise IndexError(f'Too many levels: Index has only 1 level, {level} is not a valid level number') if level > 0: raise IndexError(f'Too many levels: Index has only 1 level, not {level + 1}') elif level != self.name: raise KeyError(f'Requested level ({level}) does not match index name ({self.name})') def _get_level_number(self, level) -> int: self._validate_index_level(level) return 0 def sortlevel(self, level=None, ascending: bool | list[bool]=True, sort_remaining=None, na_position: NaPosition='first') -> tuple[Self, np.ndarray]: if not isinstance(ascending, (list, bool)): raise TypeError('ascending must be a single bool value ora list of bool values of length 1') if isinstance(ascending, list): if len(ascending) != 1: raise TypeError('ascending must be a list of bool values of length 1') ascending = ascending[0] if not isinstance(ascending, bool): raise TypeError('ascending must be a bool value') return self.sort_values(return_indexer=True, ascending=ascending, na_position=na_position) def _get_level_values(self, level) -> Index: self._validate_index_level(level) return self get_level_values = _get_level_values @final def droplevel(self, level: IndexLabel=0): if not isinstance(level, (tuple, list)): level = [level] levnums = sorted((self._get_level_number(lev) for lev in level), reverse=True) return self._drop_level_numbers(levnums) @final def _drop_level_numbers(self, levnums: list[int]): if not levnums and (not isinstance(self, ABCMultiIndex)): return self if len(levnums) >= self.nlevels: raise ValueError(f'Cannot remove {len(levnums)} levels from an index with {self.nlevels} levels: at least one level must be left.') self = cast('MultiIndex', self) new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) for i in levnums: new_levels.pop(i) new_codes.pop(i) new_names.pop(i) if len(new_levels) == 1: lev = new_levels[0] if len(lev) == 0: if len(new_codes[0]) == 0: result = lev[:0] else: res_values = algos.take(lev._values, new_codes[0], allow_fill=True) result = lev._constructor._simple_new(res_values, name=new_names[0]) else: mask = new_codes[0] == -1 result = new_levels[0].take(new_codes[0]) if mask.any(): result = result.putmask(mask, np.nan) result._name = new_names[0] return result else: from pandas.core.indexes.multi import MultiIndex return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) @cache_readonly @final def _can_hold_na(self) -> bool: if isinstance(self.dtype, ExtensionDtype): return self.dtype._can_hold_na if self.dtype.kind in 'iub': return False return True @property def is_monotonic_increasing(self) -> bool: return self._engine.is_monotonic_increasing @property def is_monotonic_decreasing(self) -> bool: return self._engine.is_monotonic_decreasing @final @property def _is_strictly_monotonic_increasing(self) -> bool: return self.is_unique and self.is_monotonic_increasing @final @property def _is_strictly_monotonic_decreasing(self) -> bool: return self.is_unique and self.is_monotonic_decreasing @cache_readonly def is_unique(self) -> bool: return self._engine.is_unique @final @property def has_duplicates(self) -> bool: return not self.is_unique @cache_readonly def inferred_type(self) -> str_t: return lib.infer_dtype(self._values, skipna=False) @cache_readonly @final def _is_all_dates(self) -> bool: if needs_i8_conversion(self.dtype): return True elif self.dtype != _dtype_obj: return False elif self._is_multi: return False return is_datetime_array(ensure_object(self._values)) @final @cache_readonly def _is_multi(self) -> bool: return isinstance(self, ABCMultiIndex) def __reduce__(self): d = {'data': self._data, 'name': self.name} return (_new_Index, (type(self), d), None) @cache_readonly def _na_value(self): dtype = self.dtype if isinstance(dtype, np.dtype): if dtype.kind in 'mM': return NaT return np.nan return dtype.na_value @cache_readonly def _isnan(self) -> npt.NDArray[np.bool_]: if self._can_hold_na: return isna(self) else: values = np.empty(len(self), dtype=np.bool_) values.fill(False) return values @cache_readonly def hasnans(self) -> bool: if self._can_hold_na: return bool(self._isnan.any()) else: return False @final def isna(self) -> npt.NDArray[np.bool_]: return self._isnan isnull = isna @final def notna(self) -> npt.NDArray[np.bool_]: return ~self.isna() notnull = notna def fillna(self, value): if not is_scalar(value): raise TypeError(f"'value' must be a scalar, passed: {type(value).__name__}") if self.hasnans: result = self.putmask(self._isnan, value) return Index._with_infer(result, name=self.name) return self._view() def dropna(self, how: AnyAll='any') -> Self: if how not in ('any', 'all'): raise ValueError(f'invalid how option: {how}') if self.hasnans: res_values = self._values[~self._isnan] return type(self)._simple_new(res_values, name=self.name) return self._view() def unique(self, level: Hashable | None=None) -> Self: if level is not None: self._validate_index_level(level) if self.is_unique: return self._view() result = super().unique() return self._shallow_copy(result) def drop_duplicates(self, *, keep: DropKeep='first') -> Self: if self.is_unique: return self._view() return super().drop_duplicates(keep=keep) def duplicated(self, keep: DropKeep='first') -> npt.NDArray[np.bool_]: if self.is_unique: return np.zeros(len(self), dtype=bool) return self._duplicated(keep=keep) def __iadd__(self, other): return self + other @final def __bool__(self) -> NoReturn: raise ValueError(f'The truth value of a {type(self).__name__} is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().') def _get_reconciled_name_object(self, other): name = get_op_result_name(self, other) if self.name is not name: return self.rename(name) return self @final def _validate_sort_keyword(self, sort) -> None: if sort not in [None, False, True]: raise ValueError(f"The 'sort' keyword only takes the values of None, True, or False; {sort} was passed.") @final def _dti_setop_align_tzs(self, other: Index, setop: str_t) -> tuple[Index, Index]: if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex) and (self.tz is not None) and (other.tz is not None): left = self.tz_convert('UTC') right = other.tz_convert('UTC') return (left, right) return (self, other) @final def union(self, other, sort: bool | None=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) (other, result_name) = self._convert_can_do_setop(other) if self.dtype != other.dtype: if isinstance(self, ABCMultiIndex) and (not is_object_dtype(_unpack_nested_dtype(other))) and (len(other) > 0): raise NotImplementedError('Can only union MultiIndex with MultiIndex or Index of tuples, try mi.to_flat_index().union(other) instead.') (self, other) = self._dti_setop_align_tzs(other, 'union') dtype = self._find_common_type_compat(other) left = self.astype(dtype, copy=False) right = other.astype(dtype, copy=False) return left.union(right, sort=sort) elif not len(other) or self.equals(other): result = self._get_reconciled_name_object(other) if sort is True: return result.sort_values() return result elif not len(self): result = other._get_reconciled_name_object(self) if sort is True: return result.sort_values() return result result = self._union(other, sort=sort) return self._wrap_setop_result(other, result) def _union(self, other: Index, sort: bool | None): lvals = self._values rvals = other._values if sort in (None, True) and (self.is_unique or other.is_unique) and self._can_use_libjoin and other._can_use_libjoin: try: return self._outer_indexer(other)[0] except (TypeError, IncompatibleFrequency): value_list = list(lvals) value_set = set(lvals) value_list.extend((x for x in rvals if x not in value_set)) return np.array(value_list, dtype=object) elif not other.is_unique: result_dups = algos.union_with_duplicates(self, other) return _maybe_try_sort(result_dups, sort) if self._index_as_unique: indexer = self.get_indexer(other) missing = (indexer == -1).nonzero()[0] else: missing = algos.unique1d(self.get_indexer_non_unique(other)[1]) result: Index | MultiIndex | ArrayLike if self._is_multi: result = self.append(other.take(missing)) elif len(missing) > 0: other_diff = rvals.take(missing) result = concat_compat((lvals, other_diff)) else: result = lvals if not self.is_monotonic_increasing or not other.is_monotonic_increasing: result = _maybe_try_sort(result, sort) return result @final def _wrap_setop_result(self, other: Index, result) -> Index: name = get_op_result_name(self, other) if isinstance(result, Index): if result.name != name: result = result.rename(name) else: result = self._shallow_copy(result, name=name) return result @final def intersection(self, other, sort: bool=False): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) (other, result_name) = self._convert_can_do_setop(other) if self.dtype != other.dtype: (self, other) = self._dti_setop_align_tzs(other, 'intersection') if self.equals(other): if not self.is_unique: result = self.unique()._get_reconciled_name_object(other) else: result = self._get_reconciled_name_object(other) if sort is True: result = result.sort_values() return result if len(self) == 0 or len(other) == 0: if self._is_multi or other._is_multi: return self[:0].rename(result_name) dtype = self._find_common_type_compat(other) if self.dtype == dtype: if len(self) == 0: return self[:0].rename(result_name) else: return other[:0].rename(result_name) return Index([], dtype=dtype, name=result_name) elif not self._should_compare(other): if isinstance(self, ABCMultiIndex): return self[:0].rename(result_name) return Index([], name=result_name) elif self.dtype != other.dtype: dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.intersection(other, sort=sort) result = self._intersection(other, sort=sort) return self._wrap_intersection_result(other, result) def _intersection(self, other: Index, sort: bool=False): if self._can_use_libjoin and other._can_use_libjoin: try: (res_indexer, indexer, _) = self._inner_indexer(other) except TypeError: pass else: if is_numeric_dtype(self.dtype): res = algos.unique1d(res_indexer) else: result = self.take(indexer) res = result.drop_duplicates() return ensure_wrapped_if_datetimelike(res) res_values = self._intersection_via_get_indexer(other, sort=sort) res_values = _maybe_try_sort(res_values, sort) return res_values def _wrap_intersection_result(self, other, result): return self._wrap_setop_result(other, result) @final def _intersection_via_get_indexer(self, other: Index | MultiIndex, sort) -> ArrayLike | MultiIndex: left_unique = self.unique() right_unique = other.unique() indexer = left_unique.get_indexer_for(right_unique) mask = indexer != -1 taker = indexer.take(mask.nonzero()[0]) if sort is False: taker = np.sort(taker) result: MultiIndex | ExtensionArray | np.ndarray if isinstance(left_unique, ABCMultiIndex): result = left_unique.take(taker) else: result = left_unique.take(taker)._values return result @final def difference(self, other, sort: bool | None=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) (other, result_name) = self._convert_can_do_setop(other) if self.equals(other): return self[:0].rename(result_name) if len(other) == 0: result = self.unique().rename(result_name) if sort is True: return result.sort_values() return result if not self._should_compare(other): result = self.unique().rename(result_name) if sort is True: return result.sort_values() return result result = self._difference(other, sort=sort) return self._wrap_difference_result(other, result) def _difference(self, other, sort): this = self if isinstance(self, ABCCategoricalIndex) and self.hasnans and other.hasnans: this = this.dropna() other = other.unique() the_diff = this[other.get_indexer_for(this) == -1] the_diff = the_diff if this.is_unique else the_diff.unique() the_diff = _maybe_try_sort(the_diff, sort) return the_diff def _wrap_difference_result(self, other, result): return self._wrap_setop_result(other, result) def symmetric_difference(self, other, result_name: abc.Hashable | None=None, sort: bool | None=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) (other, result_name_update) = self._convert_can_do_setop(other) if result_name is None: result_name = result_name_update if self.dtype != other.dtype: (self, other) = self._dti_setop_align_tzs(other, 'symmetric_difference') if not self._should_compare(other): return self.union(other, sort=sort).rename(result_name) elif self.dtype != other.dtype: dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) that = other.astype(dtype, copy=False) return this.symmetric_difference(that, sort=sort).rename(result_name) this = self.unique() other = other.unique() indexer = this.get_indexer_for(other) common_indexer = indexer.take((indexer != -1).nonzero()[0]) left_indexer = np.setdiff1d(np.arange(this.size), common_indexer, assume_unique=True) left_diff = this.take(left_indexer) right_indexer = (indexer == -1).nonzero()[0] right_diff = other.take(right_indexer) res_values = left_diff.append(right_diff) result = _maybe_try_sort(res_values, sort) if not self._is_multi: return Index(result, name=result_name, dtype=res_values.dtype) else: left_diff = cast('MultiIndex', left_diff) if len(result) == 0: return left_diff.remove_unused_levels().set_names(result_name) return result.set_names(result_name) @final def _assert_can_do_setop(self, other) -> bool: if not is_list_like(other): raise TypeError('Input must be Index or array-like') return True def _convert_can_do_setop(self, other) -> tuple[Index, Hashable]: if not isinstance(other, Index): other = Index(other, name=self.name) result_name = self.name else: result_name = get_op_result_name(self, other) return (other, result_name) def get_loc(self, key): casted_key = self._maybe_cast_indexer(key) try: return self._engine.get_loc(casted_key) except KeyError as err: if isinstance(casted_key, slice) or (isinstance(casted_key, abc.Iterable) and any((isinstance(x, slice) for x in casted_key))): raise InvalidIndexError(key) from err raise KeyError(key) from err except TypeError: self._check_indexing_error(key) raise @final def get_indexer(self, target, method: ReindexMethod | None=None, limit: int | None=None, tolerance=None) -> npt.NDArray[np.intp]: method = clean_reindex_fill_method(method) orig_target = target target = self._maybe_cast_listlike_indexer(target) self._check_indexing_method(method, limit, tolerance) if not self._index_as_unique: raise InvalidIndexError(self._requires_unique_msg) if len(target) == 0: return np.array([], dtype=np.intp) if not self._should_compare(target) and (not self._should_partial_index(target)): return self._get_indexer_non_comparable(target, method=method, unique=True) if isinstance(self.dtype, CategoricalDtype): assert self.dtype == target.dtype indexer = self._engine.get_indexer(target.codes) if self.hasnans and target.hasnans: target_nans = isna(orig_target) loc = self.get_loc(np.nan) mask = target.isna() indexer[target_nans] = loc indexer[mask & ~target_nans] = -1 return indexer if isinstance(target.dtype, CategoricalDtype): categories_indexer = self.get_indexer(target.categories) indexer = algos.take_nd(categories_indexer, target.codes, fill_value=-1) if (not self._is_multi and self.hasnans) and target.hasnans: loc = self.get_loc(np.nan) mask = target.isna() indexer[mask] = loc return ensure_platform_int(indexer) (pself, ptarget) = self._maybe_downcast_for_indexing(target) if pself is not self or ptarget is not target: return pself.get_indexer(ptarget, method=method, limit=limit, tolerance=tolerance) if self.dtype == target.dtype and self.equals(target): return np.arange(len(target), dtype=np.intp) if self.dtype != target.dtype and (not self._should_partial_index(target)): dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) target = target.astype(dtype, copy=False) return this._get_indexer(target, method=method, limit=limit, tolerance=tolerance) return self._get_indexer(target, method, limit, tolerance) def _get_indexer(self, target: Index, method: str_t | None=None, limit: int | None=None, tolerance=None) -> npt.NDArray[np.intp]: if tolerance is not None: tolerance = self._convert_tolerance(tolerance, target) if method in ['pad', 'backfill']: indexer = self._get_fill_indexer(target, method, limit, tolerance) elif method == 'nearest': indexer = self._get_nearest_indexer(target, limit, tolerance) else: if target._is_multi and self._is_multi: engine = self._engine tgt_values = engine._extract_level_codes(target) else: tgt_values = target._get_engine_target() indexer = self._engine.get_indexer(tgt_values) return ensure_platform_int(indexer) @final def _should_partial_index(self, target: Index) -> bool: if isinstance(self.dtype, IntervalDtype): if isinstance(target.dtype, IntervalDtype): return False return self.left._should_compare(target) return False @final def _check_indexing_method(self, method: str_t | None, limit: int | None=None, tolerance=None) -> None: if method not in [None, 'bfill', 'backfill', 'pad', 'ffill', 'nearest']: raise ValueError('Invalid fill method') if self._is_multi: if method == 'nearest': raise NotImplementedError("method='nearest' not implemented yet for MultiIndex; see GitHub issue 9365") if method in ('pad', 'backfill'): if tolerance is not None: raise NotImplementedError('tolerance not implemented yet for MultiIndex') if isinstance(self.dtype, (IntervalDtype, CategoricalDtype)): if method is not None: raise NotImplementedError(f'method {method} not yet implemented for {type(self).__name__}') if method is None: if tolerance is not None: raise ValueError('tolerance argument only valid if doing pad, backfill or nearest reindexing') if limit is not None: raise ValueError('limit argument only valid if doing pad, backfill or nearest reindexing') def _convert_tolerance(self, tolerance, target: np.ndarray | Index) -> np.ndarray: tolerance = np.asarray(tolerance) if target.size != tolerance.size and tolerance.size > 1: raise ValueError('list-like tolerance size must match target index size') elif is_numeric_dtype(self) and (not np.issubdtype(tolerance.dtype, np.number)): if tolerance.ndim > 0: raise ValueError(f'tolerance argument for {type(self).__name__} with dtype {self.dtype} must contain numeric elements if it is list type') raise ValueError(f'tolerance argument for {type(self).__name__} with dtype {self.dtype} must be numeric if it is a scalar: {tolerance!r}') return tolerance @final def _get_fill_indexer(self, target: Index, method: str_t, limit: int | None=None, tolerance=None) -> npt.NDArray[np.intp]: if self._is_multi: if not (self.is_monotonic_increasing or self.is_monotonic_decreasing): raise ValueError('index must be monotonic increasing or decreasing') encoded = self.append(target)._engine.values self_encoded = Index(encoded[:len(self)]) target_encoded = Index(encoded[len(self):]) return self_encoded._get_fill_indexer(target_encoded, method, limit, tolerance) if self.is_monotonic_increasing and target.is_monotonic_increasing: target_values = target._get_engine_target() own_values = self._get_engine_target() if not isinstance(target_values, np.ndarray) or not isinstance(own_values, np.ndarray): raise NotImplementedError if method == 'pad': indexer = libalgos.pad(own_values, target_values, limit=limit) else: indexer = libalgos.backfill(own_values, target_values, limit=limit) else: indexer = self._get_fill_indexer_searchsorted(target, method, limit) if tolerance is not None and len(self): indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer @final def _get_fill_indexer_searchsorted(self, target: Index, method: str_t, limit: int | None=None) -> npt.NDArray[np.intp]: if limit is not None: raise ValueError(f'limit argument for {method!r} method only well-defined if index and target are monotonic') side: Literal['left', 'right'] = 'left' if method == 'pad' else 'right' indexer = self.get_indexer(target) nonexact = indexer == -1 indexer[nonexact] = self._searchsorted_monotonic(target[nonexact], side) if side == 'left': indexer[nonexact] -= 1 else: indexer[indexer == len(self)] = -1 return indexer @final def _get_nearest_indexer(self, target: Index, limit: int | None, tolerance) -> npt.NDArray[np.intp]: if not len(self): return self._get_fill_indexer(target, 'pad') left_indexer = self.get_indexer(target, 'pad', limit=limit) right_indexer = self.get_indexer(target, 'backfill', limit=limit) left_distances = self._difference_compat(target, left_indexer) right_distances = self._difference_compat(target, right_indexer) op = operator.lt if self.is_monotonic_increasing else operator.le indexer = np.where(op(left_distances, right_distances) | (right_indexer == -1), left_indexer, right_indexer) if tolerance is not None: indexer = self._filter_indexer_tolerance(target, indexer, tolerance) return indexer @final def _filter_indexer_tolerance(self, target: Index, indexer: npt.NDArray[np.intp], tolerance) -> npt.NDArray[np.intp]: distance = self._difference_compat(target, indexer) return np.where(distance <= tolerance, indexer, -1) @final def _difference_compat(self, target: Index, indexer: npt.NDArray[np.intp]) -> ArrayLike: if isinstance(self.dtype, PeriodDtype): own_values = cast('PeriodArray', self._data)._ndarray target_values = cast('PeriodArray', target._data)._ndarray diff = own_values[indexer] - target_values else: diff = self._values[indexer] - target._values return abs(diff) @final def _validate_positional_slice(self, key: slice) -> None: self._validate_indexer('positional', key.start, 'iloc') self._validate_indexer('positional', key.stop, 'iloc') self._validate_indexer('positional', key.step, 'iloc') def _convert_slice_indexer(self, key: slice, kind: Literal['loc', 'getitem']): (start, stop, step) = (key.start, key.stop, key.step) is_index_slice = is_valid_positional_slice(key) if kind == 'getitem': if is_index_slice: return key elif self.dtype.kind in 'iu': self._validate_indexer('slice', key.start, 'getitem') self._validate_indexer('slice', key.stop, 'getitem') self._validate_indexer('slice', key.step, 'getitem') return key is_positional = is_index_slice and self._should_fallback_to_positional if is_positional: try: if start is not None: self.get_loc(start) if stop is not None: self.get_loc(stop) is_positional = False except KeyError: pass if com.is_null_slice(key): indexer = key elif is_positional: if kind == 'loc': raise TypeError('Slicing a positional slice with .loc is not allowed, Use .loc with labels or .iloc with positions instead.') indexer = key else: indexer = self.slice_indexer(start, stop, step) return indexer @final def _raise_invalid_indexer(self, form: Literal['slice', 'positional'], key, reraise: lib.NoDefault | None | Exception=lib.no_default) -> None: msg = f'cannot do {form} indexing on {type(self).__name__} with these indexers [{key}] of type {type(key).__name__}' if reraise is not lib.no_default: raise TypeError(msg) from reraise raise TypeError(msg) @final def _validate_can_reindex(self, indexer: np.ndarray) -> None: if not self._index_as_unique and len(indexer): raise ValueError('cannot reindex on an axis with duplicate labels') def reindex(self, target, method: ReindexMethod | None=None, level=None, limit: int | None=None, tolerance: float | None=None) -> tuple[Index, npt.NDArray[np.intp] | None]: preserve_names = not hasattr(target, 'name') target = ensure_has_len(target) if not isinstance(target, Index) and len(target) == 0: if level is not None and self._is_multi: idx = self.levels[level] else: idx = self target = idx[:0] else: target = ensure_index(target) if level is not None and (isinstance(self, ABCMultiIndex) or isinstance(target, ABCMultiIndex)): if method is not None: raise TypeError('Fill method not supported if level passed') (target, indexer, _) = self._join_level(target, level, how='right', keep_order=not self._is_multi) elif self.equals(target): indexer = None elif self._index_as_unique: indexer = self.get_indexer(target, method=method, limit=limit, tolerance=tolerance) elif self._is_multi: raise ValueError('cannot handle a non-unique multi-index!') elif not self.is_unique: raise ValueError('cannot reindex on an axis with duplicate labels') else: (indexer, _) = self.get_indexer_non_unique(target) target = self._wrap_reindex_result(target, indexer, preserve_names) return (target, indexer) def _wrap_reindex_result(self, target, indexer, preserve_names: bool): target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: IndexT, preserve_names: bool) -> IndexT: if preserve_names and target.nlevels == 1 and (target.name != self.name): target = target.copy(deep=False) target.name = self.name return target @final def _reindex_non_unique(self, target: Index) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp] | None]: target = ensure_index(target) if len(target) == 0: return (self[:0], np.array([], dtype=np.intp), None) (indexer, missing) = self.get_indexer_non_unique(target) check = indexer != -1 new_labels: Index | np.ndarray = self.take(indexer[check]) new_indexer = None if len(missing): length = np.arange(len(indexer), dtype=np.intp) missing = ensure_platform_int(missing) missing_labels = target.take(missing) missing_indexer = length[~check] cur_labels = self.take(indexer[check]).values cur_indexer = length[check] new_labels = np.empty((len(indexer),), dtype=object) new_labels[cur_indexer] = cur_labels new_labels[missing_indexer] = missing_labels if not len(self): new_indexer = np.arange(0, dtype=np.intp) elif target.is_unique: new_indexer = np.arange(len(indexer), dtype=np.intp) new_indexer[cur_indexer] = np.arange(len(cur_labels)) new_indexer[missing_indexer] = -1 else: indexer[~check] = -1 new_indexer = np.arange(len(self.take(indexer)), dtype=np.intp) new_indexer[~check] = -1 if not isinstance(self, ABCMultiIndex): new_index = Index(new_labels, name=self.name) else: new_index = type(self).from_tuples(new_labels, names=self.names) return (new_index, indexer, new_indexer) @overload def join(self, other: Index, *, how: JoinHow=..., level: Level=..., return_indexers: Literal[True], sort: bool=...) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... @overload def join(self, other: Index, *, how: JoinHow=..., level: Level=..., return_indexers: Literal[False]=..., sort: bool=...) -> Index: ... @overload def join(self, other: Index, *, how: JoinHow=..., level: Level=..., return_indexers: bool=..., sort: bool=...) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: ... @final @_maybe_return_indexers def join(self, other: Index, *, how: JoinHow='left', level: Level | None=None, return_indexers: bool=False, sort: bool=False) -> Index | tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: other = ensure_index(other) sort = sort or how == 'outer' if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if (self.tz is None) ^ (other.tz is None): raise TypeError('Cannot join tz-naive with tz-aware DatetimeIndex') if not self._is_multi and (not other._is_multi): (pself, pother) = self._maybe_downcast_for_indexing(other) if pself is not self or pother is not other: return pself.join(pother, how=how, level=level, return_indexers=True, sort=sort) if level is None and (self._is_multi or other._is_multi): if self.names == other.names: pass else: return self._join_multi(other, how=how) if level is not None and (self._is_multi or other._is_multi): return self._join_level(other, level, how=how) if len(self) == 0 or len(other) == 0: try: return self._join_empty(other, how, sort) except TypeError: pass if self.dtype != other.dtype: dtype = self._find_common_type_compat(other) this = self.astype(dtype, copy=False) other = other.astype(dtype, copy=False) return this.join(other, how=how, return_indexers=True) elif isinstance(self, ABCCategoricalIndex) and isinstance(other, ABCCategoricalIndex) and (not self.ordered) and (not self.categories.equals(other.categories)): other = Index(other._values.reorder_categories(self.categories)) _validate_join_method(how) if self.is_monotonic_increasing and other.is_monotonic_increasing and self._can_use_libjoin and other._can_use_libjoin and (self.is_unique or other.is_unique): try: return self._join_monotonic(other, how=how) except TypeError: pass elif not self.is_unique or not other.is_unique: return self._join_non_unique(other, how=how, sort=sort) return self._join_via_get_indexer(other, how, sort) def _join_empty(self, other: Index, how: JoinHow, sort: bool) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: assert len(self) == 0 or len(other) == 0 _validate_join_method(how) lidx: np.ndarray | None ridx: np.ndarray | None if len(other): how = cast(JoinHow, {'left': 'right', 'right': 'left'}.get(how, how)) (join_index, ridx, lidx) = other._join_empty(self, how, sort) elif how in ['left', 'outer']: if sort and (not self.is_monotonic_increasing): lidx = self.argsort() join_index = self.take(lidx) else: lidx = None join_index = self._view() ridx = np.broadcast_to(np.intp(-1), len(join_index)) else: join_index = other._view() lidx = np.array([], dtype=np.intp) ridx = None return (join_index, lidx, ridx) @final def _join_via_get_indexer(self, other: Index, how: JoinHow, sort: bool) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: lindexer: npt.NDArray[np.intp] | None rindexer: npt.NDArray[np.intp] | None if how == 'left': if sort: (join_index, lindexer) = self.sort_values(return_indexer=True) rindexer = other.get_indexer_for(join_index) return (join_index, lindexer, rindexer) else: join_index = self elif how == 'right': if sort: (join_index, rindexer) = other.sort_values(return_indexer=True) lindexer = self.get_indexer_for(join_index) return (join_index, lindexer, rindexer) else: join_index = other elif how == 'inner': join_index = self.intersection(other, sort=sort) elif how == 'outer': try: join_index = self.union(other, sort=sort) except TypeError: join_index = self.union(other) try: join_index = _maybe_try_sort(join_index, sort) except TypeError: pass names = other.names if how == 'right' else self.names if join_index.names != names: join_index = join_index.set_names(names) if join_index is self: lindexer = None else: lindexer = self.get_indexer_for(join_index) if join_index is other: rindexer = None else: rindexer = other.get_indexer_for(join_index) return (join_index, lindexer, rindexer) @final def _join_multi(self, other: Index, how: JoinHow): from pandas.core.indexes.multi import MultiIndex from pandas.core.reshape.merge import restore_dropped_levels_multijoin self_names_list = list(self.names) other_names_list = list(other.names) self_names_order = self_names_list.index other_names_order = other_names_list.index self_names = set(self_names_list) other_names = set(other_names_list) overlap = self_names & other_names if not overlap: raise ValueError('cannot join with no overlapping index names') if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): ldrop_names = sorted(self_names - overlap, key=self_names_order) rdrop_names = sorted(other_names - overlap, key=other_names_order) if not len(ldrop_names + rdrop_names): self_jnlevels = self other_jnlevels = other.reorder_levels(self.names) else: self_jnlevels = self.droplevel(ldrop_names) other_jnlevels = other.droplevel(rdrop_names) (join_idx, lidx, ridx) = self_jnlevels.join(other_jnlevels, how=how, return_indexers=True) dropped_names = ldrop_names + rdrop_names (levels, codes, names) = restore_dropped_levels_multijoin(self, other, dropped_names, join_idx, lidx, ridx) multi_join_idx = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False) multi_join_idx = multi_join_idx.remove_unused_levels() if how == 'right': level_order = other_names_list + ldrop_names else: level_order = self_names_list + rdrop_names multi_join_idx = multi_join_idx.reorder_levels(level_order) return (multi_join_idx, lidx, ridx) jl = next(iter(overlap)) flip_order = False if isinstance(self, MultiIndex): (self, other) = (other, self) flip_order = True flip: dict[JoinHow, JoinHow] = {'right': 'left', 'left': 'right'} how = flip.get(how, how) level = other.names.index(jl) result = self._join_level(other, level, how=how) if flip_order: return (result[0], result[2], result[1]) return result @final def _join_non_unique(self, other: Index, how: JoinHow='left', sort: bool=False) -> tuple[Index, npt.NDArray[np.intp], npt.NDArray[np.intp]]: from pandas.core.reshape.merge import get_join_indexers_non_unique assert self.dtype == other.dtype (left_idx, right_idx) = get_join_indexers_non_unique(self._values, other._values, how=how, sort=sort) if how == 'right': join_index = other.take(right_idx) else: join_index = self.take(left_idx) if how == 'outer': mask = left_idx == -1 if mask.any(): right = other.take(right_idx) join_index = join_index.putmask(mask, right) if isinstance(join_index, ABCMultiIndex) and how == 'outer': join_index = join_index._sort_levels_monotonic() return (join_index, left_idx, right_idx) @final def _join_level(self, other: Index, level, how: JoinHow='left', keep_order: bool=True) -> tuple[MultiIndex, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: from pandas.core.indexes.multi import MultiIndex def _get_leaf_sorter(labels: list[np.ndarray]) -> npt.NDArray[np.intp]: if labels[0].size == 0: return np.empty(0, dtype=np.intp) if len(labels) == 1: return get_group_index_sorter(ensure_platform_int(labels[0])) tic = labels[0][:-1] != labels[0][1:] for lab in labels[1:-1]: tic |= lab[:-1] != lab[1:] starts = np.hstack(([True], tic, [True])).nonzero()[0] lab = ensure_int64(labels[-1]) return lib.get_level_sorter(lab, ensure_platform_int(starts)) if isinstance(self, MultiIndex) and isinstance(other, MultiIndex): raise TypeError('Join on level between two MultiIndex objects is ambiguous') (left, right) = (self, other) flip_order = not isinstance(self, MultiIndex) if flip_order: (left, right) = (right, left) flip: dict[JoinHow, JoinHow] = {'right': 'left', 'left': 'right'} how = flip.get(how, how) assert isinstance(left, MultiIndex) level = left._get_level_number(level) old_level = left.levels[level] if not right.is_unique: raise NotImplementedError('Index._join_level on non-unique index is not implemented') (new_level, left_lev_indexer, right_lev_indexer) = old_level.join(right, how=how, return_indexers=True) if left_lev_indexer is None: if keep_order or len(left) == 0: left_indexer = None join_index = left else: left_indexer = _get_leaf_sorter(left.codes[:level + 1]) join_index = left[left_indexer] else: left_lev_indexer = ensure_platform_int(left_lev_indexer) rev_indexer = lib.get_reverse_indexer(left_lev_indexer, len(old_level)) old_codes = left.codes[level] taker = old_codes[old_codes != -1] new_lev_codes = rev_indexer.take(taker) new_codes = list(left.codes) new_codes[level] = new_lev_codes new_levels = list(left.levels) new_levels[level] = new_level if keep_order: left_indexer = np.arange(len(left), dtype=np.intp) left_indexer = cast(np.ndarray, left_indexer) mask = new_lev_codes != -1 if not mask.all(): new_codes = [lab[mask] for lab in new_codes] left_indexer = left_indexer[mask] elif level == 0: max_new_lev = 0 if len(new_lev_codes) == 0 else new_lev_codes.max() ngroups = 1 + max_new_lev (left_indexer, counts) = libalgos.groupsort_indexer(new_lev_codes, ngroups) left_indexer = left_indexer[counts[0]:] new_codes = [lab[left_indexer] for lab in new_codes] else: mask = new_lev_codes != -1 mask_all = mask.all() if not mask_all: new_codes = [lab[mask] for lab in new_codes] left_indexer = _get_leaf_sorter(new_codes[:level + 1]) new_codes = [lab[left_indexer] for lab in new_codes] if not mask_all: left_indexer = mask.nonzero()[0][left_indexer] join_index = MultiIndex(levels=new_levels, codes=new_codes, names=left.names, verify_integrity=False) if right_lev_indexer is not None: right_indexer = right_lev_indexer.take(join_index.codes[level]) else: right_indexer = join_index.codes[level] if flip_order: (left_indexer, right_indexer) = (right_indexer, left_indexer) left_indexer = None if left_indexer is None else ensure_platform_int(left_indexer) right_indexer = None if right_indexer is None else ensure_platform_int(right_indexer) return (join_index, left_indexer, right_indexer) def _join_monotonic(self, other: Index, how: JoinHow='left') -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: assert other.dtype == self.dtype assert self._can_use_libjoin and other._can_use_libjoin if self.equals(other): ret_index = other if how == 'right' else self return (ret_index, None, None) ridx: npt.NDArray[np.intp] | None lidx: npt.NDArray[np.intp] | None if how == 'left': if other.is_unique: join_index = self lidx = None ridx = self._left_indexer_unique(other) else: (join_array, lidx, ridx) = self._left_indexer(other) (join_index, lidx, ridx) = self._wrap_join_result(join_array, other, lidx, ridx, how) elif how == 'right': if self.is_unique: join_index = other lidx = other._left_indexer_unique(self) ridx = None else: (join_array, ridx, lidx) = other._left_indexer(self) (join_index, lidx, ridx) = self._wrap_join_result(join_array, other, lidx, ridx, how) elif how == 'inner': (join_array, lidx, ridx) = self._inner_indexer(other) (join_index, lidx, ridx) = self._wrap_join_result(join_array, other, lidx, ridx, how) elif how == 'outer': (join_array, lidx, ridx) = self._outer_indexer(other) (join_index, lidx, ridx) = self._wrap_join_result(join_array, other, lidx, ridx, how) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return (join_index, lidx, ridx) def _wrap_join_result(self, joined: ArrayLike, other: Self, lidx: npt.NDArray[np.intp] | None, ridx: npt.NDArray[np.intp] | None, how: JoinHow) -> tuple[Self, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: assert other.dtype == self.dtype if lidx is not None and lib.is_range_indexer(lidx, len(self)): lidx = None if ridx is not None and lib.is_range_indexer(ridx, len(other)): ridx = None if lidx is None: join_index = self elif ridx is None: join_index = other else: join_index = self._constructor._with_infer(joined, dtype=self.dtype) names = other.names if how == 'right' else self.names if join_index.names != names: join_index = join_index.set_names(names) return (join_index, lidx, ridx) @final @cache_readonly def _can_use_libjoin(self) -> bool: if not self.is_monotonic_increasing: return False if type(self) is Index: return isinstance(self.dtype, np.dtype) or isinstance(self._values, (ArrowExtensionArray, BaseMaskedArray)) or (isinstance(self.dtype, StringDtype) and self.dtype.storage == 'python') return not isinstance(self, (ABCIntervalIndex, ABCMultiIndex)) @property def values(self) -> ArrayLike: data = self._data if isinstance(data, np.ndarray): data = data.view() data.flags.writeable = False return data @cache_readonly @doc(IndexOpsMixin.array) def array(self) -> ExtensionArray: array = self._data if isinstance(array, np.ndarray): from pandas.core.arrays.numpy_ import NumpyExtensionArray array = NumpyExtensionArray(array) return array @property def _values(self) -> ExtensionArray | np.ndarray: return self._data def _get_engine_target(self) -> ArrayLike: vals = self._values if isinstance(vals, StringArray): return vals._ndarray if isinstance(vals, ArrowExtensionArray) and self.dtype.kind in 'Mm': import pyarrow as pa pa_type = vals._pa_array.type if pa.types.is_timestamp(pa_type): vals = vals._to_datetimearray() return vals._ndarray.view('i8') elif pa.types.is_duration(pa_type): vals = vals._to_timedeltaarray() return vals._ndarray.view('i8') if type(self) is Index and isinstance(self._values, ExtensionArray) and (not isinstance(self._values, BaseMaskedArray)) and (not (isinstance(self._values, ArrowExtensionArray) and is_numeric_dtype(self.dtype) and (self.dtype.kind != 'O'))): return self._values.astype(object) return vals @final def _get_join_target(self) -> np.ndarray: if isinstance(self._values, BaseMaskedArray): return self._values._data elif isinstance(self._values, ArrowExtensionArray): return self._values.to_numpy() target = self._get_engine_target() if not isinstance(target, np.ndarray): raise ValueError('_can_use_libjoin should return False.') return target def _from_join_target(self, result: np.ndarray) -> ArrayLike: if isinstance(self.values, BaseMaskedArray): return type(self.values)(result, np.zeros(result.shape, dtype=np.bool_)) elif isinstance(self.values, (ArrowExtensionArray, StringArray)): return type(self.values)._from_sequence(result, dtype=self.dtype) return result @doc(IndexOpsMixin._memory_usage) def memory_usage(self, deep: bool=False) -> int: result = self._memory_usage(deep=deep) if '_engine' in self._cache: result += self._engine.sizeof(deep=deep) return result @final def where(self, cond, other=None) -> Index: if isinstance(self, ABCMultiIndex): raise NotImplementedError('.where is not supported for MultiIndex operations') cond = np.asarray(cond, dtype=bool) return self.putmask(~cond, other) @final @classmethod def _raise_scalar_data_error(cls, data): raise TypeError(f'{cls.__name__}(...) must be called with a collection of some kind, {(repr(data) if not isinstance(data, np.generic) else str(data))} was passed') def _validate_fill_value(self, value): dtype = self.dtype if isinstance(dtype, np.dtype) and dtype.kind not in 'mM': try: return np_can_hold_element(dtype, value) except LossySetitemError as err: raise TypeError from err elif not can_hold_element(self._values, value): raise TypeError return value @cache_readonly def _is_memory_usage_qualified(self) -> bool: return is_object_dtype(self.dtype) def __contains__(self, key: Any) -> bool: hash(key) try: return key in self._engine except (OverflowError, TypeError, ValueError): return False __hash__: ClassVar[None] @final def __setitem__(self, key, value) -> None: raise TypeError('Index does not support mutable operations') def __getitem__(self, key): getitem = self._data.__getitem__ if is_integer(key) or is_float(key): key = com.cast_scalar_indexer(key) return getitem(key) if isinstance(key, slice): return self._getitem_slice(key) if com.is_bool_indexer(key): if isinstance(getattr(key, 'dtype', None), ExtensionDtype): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) if not isinstance(self.dtype, ExtensionDtype): if len(key) == 0 and len(key) != len(self): raise ValueError('The length of the boolean indexer cannot be 0 when the Index has length greater than 0.') result = getitem(key) if result.ndim > 1: disallow_ndim_indexing(result) return self._constructor._simple_new(result, name=self._name) def _getitem_slice(self, slobj: slice) -> Self: res = self._data[slobj] result = type(self)._simple_new(res, name=self._name, refs=self._references) if '_engine' in self._cache: reverse = slobj.step is not None and slobj.step < 0 result._engine._update_from_sliced(self._engine, reverse=reverse) return result @final def _can_hold_identifiers_and_holds_name(self, name) -> bool: if is_object_dtype(self.dtype) or is_string_dtype(self.dtype) or isinstance(self.dtype, CategoricalDtype): return name in self return False def append(self, other: Index | Sequence[Index]) -> Index: to_concat = [self] if isinstance(other, (list, tuple)): to_concat += list(other) else: to_concat.append(other) for obj in to_concat: if not isinstance(obj, Index): raise TypeError('all inputs must be Index') names = {obj.name for obj in to_concat} name = None if len(names) > 1 else self.name return self._concat(to_concat, name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: to_concat_vals = [x._values for x in to_concat] result = concat_compat(to_concat_vals) return Index._with_infer(result, name=name) def putmask(self, mask, value) -> Index: (mask, noop) = validate_putmask(self._values, mask) if noop: return self.copy() if self.dtype != object and is_valid_na_for_dtype(value, self.dtype): value = self._na_value try: converted = self._validate_fill_value(value) except (LossySetitemError, ValueError, TypeError) as err: if is_object_dtype(self.dtype): raise err dtype = self._find_common_type_compat(value) return self.astype(dtype).putmask(mask, value) values = self._values.copy() if isinstance(values, np.ndarray): converted = setitem_datetimelike_compat(values, mask.sum(), converted) np.putmask(values, mask, converted) else: values._putmask(mask, value) return self._shallow_copy(values) def equals(self, other: Any) -> bool: if self.is_(other): return True if not isinstance(other, Index): return False if len(self) != len(other): return False if isinstance(self.dtype, StringDtype) and self.dtype.na_value is np.nan and (other.dtype != self.dtype): return other.equals(self.astype(object)) if is_object_dtype(self.dtype) and (not is_object_dtype(other.dtype)): return other.equals(self) if isinstance(other, ABCMultiIndex): return other.equals(self) if isinstance(self._values, ExtensionArray): if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if isinstance(other.dtype, ExtensionDtype): return other.equals(self) return array_equivalent(self._values, other._values) @final def identical(self, other) -> bool: return self.equals(other) and all((getattr(self, c, None) == getattr(other, c, None) for c in self._comparables)) and (type(self) == type(other)) and (self.dtype == other.dtype) @final def asof(self, label): self._searchsorted_monotonic(label) try: loc = self.get_loc(label) except (KeyError, TypeError) as err: indexer = self.get_indexer([label], method='pad') if indexer.ndim > 1 or indexer.size > 1: raise TypeError('asof requires scalar valued input') from err loc = indexer.item() if loc == -1: return self._na_value else: if isinstance(loc, slice): loc = loc.indices(len(self))[-1] return self[loc] def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> npt.NDArray[np.intp]: locs = self._values[mask].searchsorted(where._values, side='right') locs = np.where(locs > 0, locs - 1, 0) result = np.arange(len(self), dtype=np.intp)[mask].take(locs) first_value = self._values[mask.argmax()] result[(locs == 0) & (where._values < first_value)] = -1 return result @overload def sort_values(self, *, return_indexer: Literal[False]=..., ascending: bool=..., na_position: NaPosition=..., key: Callable | None=...) -> Self: ... @overload def sort_values(self, *, return_indexer: Literal[True], ascending: bool=..., na_position: NaPosition=..., key: Callable | None=...) -> tuple[Self, np.ndarray]: ... @overload def sort_values(self, *, return_indexer: bool=..., ascending: bool=..., na_position: NaPosition=..., key: Callable | None=...) -> Self | tuple[Self, np.ndarray]: ... def sort_values(self, *, return_indexer: bool=False, ascending: bool=True, na_position: NaPosition='last', key: Callable | None=None) -> Self | tuple[Self, np.ndarray]: if key is None and (ascending and self.is_monotonic_increasing or (not ascending and self.is_monotonic_decreasing)): if return_indexer: indexer = np.arange(len(self), dtype=np.intp) return (self.copy(), indexer) else: return self.copy() if not isinstance(self, ABCMultiIndex): _as = nargsort(items=self, ascending=ascending, na_position=na_position, key=key) else: idx = cast(Index, ensure_key_mapped(self, key)) _as = idx.argsort(na_position=na_position) if not ascending: _as = _as[::-1] sorted_index = self.take(_as) if return_indexer: return (sorted_index, _as) else: return sorted_index def shift(self, periods: int=1, freq=None) -> Self: raise NotImplementedError(f'This method is only implemented for DatetimeIndex, PeriodIndex and TimedeltaIndex; Got type {type(self).__name__}') def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: return self._data.argsort(*args, **kwargs) def _check_indexing_error(self, key) -> None: if not is_scalar(key): raise InvalidIndexError(key) @cache_readonly def _should_fallback_to_positional(self) -> bool: return self.inferred_type not in {'integer', 'mixed-integer', 'floating', 'complex'} _index_shared_docs['get_indexer_non_unique'] = "\n Compute indexer and mask for new index given the current index.\n\n The indexer should be then used as an input to ndarray.take to align the\n current data to the new index.\n\n Parameters\n ----------\n target : %(target_klass)s\n An iterable containing the values to be used for computing indexer.\n\n Returns\n -------\n indexer : np.ndarray[np.intp]\n Integers from 0 to n - 1 indicating that the index at these\n positions matches the corresponding target values. Missing values\n in the target are marked by -1.\n missing : np.ndarray[np.intp]\n An indexer into the target of the values not found.\n These correspond to the -1 in the indexer array.\n\n See Also\n --------\n Index.get_indexer : Computes indexer and mask for new index given\n the current index.\n Index.get_indexer_for : Returns an indexer even when non-unique.\n\n Examples\n --------\n >>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])\n >>> index.get_indexer_non_unique(['b', 'b'])\n (array([1, 3, 4, 1, 3, 4]), array([], dtype=int64))\n\n In the example below there are no matched values.\n\n >>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])\n >>> index.get_indexer_non_unique(['q', 'r', 't'])\n (array([-1, -1, -1]), array([0, 1, 2]))\n\n For this reason, the returned ``indexer`` contains only integers equal to -1.\n It demonstrates that there's no match between the index and the ``target``\n values at these positions. The mask [0, 1, 2] in the return value shows that\n the first, second, and third elements are missing.\n\n Notice that the return value is a tuple contains two items. In the example\n below the first item is an array of locations in ``index``. The second\n item is a mask shows that the first and third elements are missing.\n\n >>> index = pd.Index(['c', 'b', 'a', 'b', 'b'])\n >>> index.get_indexer_non_unique(['f', 'b', 's'])\n (array([-1, 1, 3, 4, -1]), array([0, 2]))\n " @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) target = self._maybe_cast_listlike_indexer(target) if not self._should_compare(target) and (not self._should_partial_index(target)): return self._get_indexer_non_comparable(target, method=None, unique=False) (pself, ptarget) = self._maybe_downcast_for_indexing(target) if pself is not self or ptarget is not target: return pself.get_indexer_non_unique(ptarget) if self.dtype != target.dtype: dtype = self._find_common_type_compat(target) this = self.astype(dtype, copy=False) that = target.astype(dtype, copy=False) return this.get_indexer_non_unique(that) if self._is_multi and target._is_multi: engine = self._engine tgt_values = engine._extract_level_codes(target) else: tgt_values = target._get_engine_target() (indexer, missing) = self._engine.get_indexer_non_unique(tgt_values) return (ensure_platform_int(indexer), ensure_platform_int(missing)) @final def get_indexer_for(self, target) -> npt.NDArray[np.intp]: if self._index_as_unique: return self.get_indexer(target) (indexer, _) = self.get_indexer_non_unique(target) return indexer def _get_indexer_strict(self, key, axis_name: str_t) -> tuple[Index, np.ndarray]: keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if self._index_as_unique: indexer = self.get_indexer_for(keyarr) keyarr = self.reindex(keyarr)[0] else: (keyarr, indexer, new_indexer) = self._reindex_non_unique(keyarr) self._raise_if_missing(keyarr, indexer, axis_name) keyarr = self.take(indexer) if isinstance(key, Index): keyarr.name = key.name if lib.is_np_dtype(keyarr.dtype, 'mM') or isinstance(keyarr.dtype, DatetimeTZDtype): if isinstance(key, list) or (isinstance(key, type(self)) and key.freq is None): keyarr = keyarr._with_freq(None) return (keyarr, indexer) def _raise_if_missing(self, key, indexer, axis_name: str_t) -> None: if len(key) == 0: return missing_mask = indexer < 0 nmissing = missing_mask.sum() if nmissing: if nmissing == len(indexer): raise KeyError(f'None of [{key}] are in the [{axis_name}]') not_found = list(ensure_index(key)[missing_mask.nonzero()[0]].unique()) raise KeyError(f'{not_found} not in index') @overload def _get_indexer_non_comparable(self, target: Index, method, unique: Literal[True]=...) -> npt.NDArray[np.intp]: ... @overload def _get_indexer_non_comparable(self, target: Index, method, unique: Literal[False]) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... @overload def _get_indexer_non_comparable(self, target: Index, method, unique: bool=True) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ... @final def _get_indexer_non_comparable(self, target: Index, method, unique: bool=True) -> npt.NDArray[np.intp] | tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: if method is not None: other_dtype = _unpack_nested_dtype(target) raise TypeError(f'Cannot compare dtypes {self.dtype} and {other_dtype}') no_matches = -1 * np.ones(target.shape, dtype=np.intp) if unique: return no_matches else: missing = np.arange(len(target), dtype=np.intp) return (no_matches, missing) @property def _index_as_unique(self) -> bool: return self.is_unique _requires_unique_msg = 'Reindexing only valid with uniquely valued Index objects' @final def _maybe_downcast_for_indexing(self, other: Index) -> tuple[Index, Index]: if isinstance(self, ABCDatetimeIndex) and isinstance(other, ABCDatetimeIndex): if self.tz is not None and other.tz is not None and (not tz_compare(self.tz, other.tz)): return (self.tz_convert('UTC'), other.tz_convert('UTC')) elif self.inferred_type == 'date' and isinstance(other, ABCDatetimeIndex): try: return (type(other)(self), other) except OutOfBoundsDatetime: return (self, other) elif self.inferred_type == 'timedelta' and isinstance(other, ABCTimedeltaIndex): return (type(other)(self), other) elif self.dtype.kind == 'u' and other.dtype.kind == 'i': if other.min() >= 0: return (self, other.astype(self.dtype)) elif self._is_multi and (not other._is_multi): try: other = type(self).from_tuples(other) except (TypeError, ValueError): self = Index(self._values) if not is_object_dtype(self.dtype) and is_object_dtype(other.dtype): (other, self) = other._maybe_downcast_for_indexing(self) return (self, other) @final def _find_common_type_compat(self, target) -> DtypeObj: (target_dtype, _) = infer_dtype_from(target) if self.dtype == 'uint64' or target_dtype == 'uint64': if is_signed_integer_dtype(self.dtype) or is_signed_integer_dtype(target_dtype): return _dtype_obj dtype = find_result_type(self.dtype, target) dtype = common_dtype_categorical_compat([self, target], dtype) return dtype @final def _should_compare(self, other: Index) -> bool: if other.inferred_type == 'boolean' and is_any_real_numeric_dtype(self.dtype) or (self.inferred_type == 'boolean' and is_any_real_numeric_dtype(other.dtype)): return False dtype = _unpack_nested_dtype(other) return self._is_comparable_dtype(dtype) or is_object_dtype(dtype) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: if self.dtype.kind == 'b': return dtype.kind == 'b' elif is_numeric_dtype(self.dtype): return is_numeric_dtype(dtype) return True @final def groupby(self, values) -> PrettyDict[Hashable, Index]: if isinstance(values, ABCMultiIndex): values = values._values values = Categorical(values) result = values._reverse_indexer() result = {k: self.take(v) for (k, v) in result.items()} return PrettyDict(result) def map(self, mapper, na_action: Literal['ignore'] | None=None): from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: dtype = self.dtype same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result(new_values, self.dtype, same_dtype=same_dtype) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) @final def _transform_index(self, func, *, level=None) -> Index: if isinstance(self, ABCMultiIndex): values = [self.get_level_values(i).map(func) if i == level or level is None else self.get_level_values(i) for i in range(self.nlevels)] return type(self).from_arrays(values) else: items = [func(x) for x in self] return Index(items, name=self.name, tupleize_cols=False) def isin(self, values, level: str_t | int | None=None) -> npt.NDArray[np.bool_]: if level is not None: self._validate_index_level(level) return algos.isin(self._values, values) def _get_string_slice(self, key: str_t): raise NotImplementedError def slice_indexer(self, start: Hashable | None=None, end: Hashable | None=None, step: int | None=None) -> slice: (start_slice, end_slice) = self.slice_locs(start, end, step=step) if not is_scalar(start_slice): raise AssertionError('Start slice bound is non-scalar') if not is_scalar(end_slice): raise AssertionError('End slice bound is non-scalar') return slice(start_slice, end_slice, step) def _maybe_cast_indexer(self, key): return key def _maybe_cast_listlike_indexer(self, target) -> Index: return ensure_index(target) @final def _validate_indexer(self, form: Literal['positional', 'slice'], key, kind: Literal['getitem', 'iloc']) -> None: if not lib.is_int_or_none(key): self._raise_invalid_indexer(form, key) def _maybe_cast_slice_bound(self, label, side: str_t): if is_numeric_dtype(self.dtype): return self._maybe_cast_indexer(label) if (is_float(label) or is_integer(label)) and label not in self: self._raise_invalid_indexer('slice', label) return label def _searchsorted_monotonic(self, label, side: Literal['left', 'right']='left'): if self.is_monotonic_increasing: return self.searchsorted(label, side=side) elif self.is_monotonic_decreasing: pos = self[::-1].searchsorted(label, side='right' if side == 'left' else 'left') return len(self) - pos raise ValueError('index must be monotonic increasing or decreasing') def get_slice_bound(self, label, side: Literal['left', 'right']) -> int: if side not in ('left', 'right'): raise ValueError(f"Invalid value for side kwarg, must be either 'left' or 'right': {side}") original_label = label label = self._maybe_cast_slice_bound(label, side) try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: raise err from None if isinstance(slc, np.ndarray): assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view('u1')) if isinstance(slc, np.ndarray): raise KeyError(f'Cannot get {side} slice bound for non-unique label: {original_label!r}') if isinstance(slc, slice): if side == 'left': return slc.start else: return slc.stop elif side == 'right': return slc + 1 else: return slc def slice_locs(self, start: SliceType=None, end: SliceType=None, step: int | None=None) -> tuple[int, int]: inc = step is None or step >= 0 if not inc: (start, end) = (end, start) if isinstance(start, (str, datetime)) and isinstance(end, (str, datetime)): try: ts_start = Timestamp(start) ts_end = Timestamp(end) except (ValueError, TypeError): pass else: if not tz_compare(ts_start.tzinfo, ts_end.tzinfo): raise ValueError('Both dates must have the same UTC offset') start_slice = None if start is not None: start_slice = self.get_slice_bound(start, 'left') if start_slice is None: start_slice = 0 end_slice = None if end is not None: end_slice = self.get_slice_bound(end, 'right') if end_slice is None: end_slice = len(self) if not inc: (end_slice, start_slice) = (start_slice - 1, end_slice - 1) if end_slice == -1: end_slice -= len(self) if start_slice == -1: start_slice -= len(self) return (start_slice, end_slice) def delete(self, loc: int | np.integer | list[int] | npt.NDArray[np.integer]) -> Self: values = self._values res_values: ArrayLike if isinstance(values, np.ndarray): res_values = np.delete(values, loc) else: res_values = values.delete(loc) return self._constructor._simple_new(res_values, name=self.name) def insert(self, loc: int, item) -> Index: item = lib.item_from_zerodim(item) if is_valid_na_for_dtype(item, self.dtype) and self.dtype != object: item = self._na_value arr = self._values try: if isinstance(arr, ExtensionArray): res_values = arr.insert(loc, item) return type(self)._simple_new(res_values, name=self.name) else: item = self._validate_fill_value(item) except (TypeError, ValueError, LossySetitemError): dtype = self._find_common_type_compat(item) if dtype == self.dtype: raise return self.astype(dtype).insert(loc, item) if arr.dtype != object or not isinstance(item, (tuple, np.datetime64, np.timedelta64)): casted = arr.dtype.type(item) new_values = np.insert(arr, loc, casted) else: new_values = np.insert(arr, loc, None) loc = loc if loc >= 0 else loc - 1 new_values[loc] = item out = Index(new_values, dtype=new_values.dtype, name=self.name) return out def drop(self, labels: Index | np.ndarray | Iterable[Hashable], errors: IgnoreRaise='raise') -> Index: if not isinstance(labels, Index): arr_dtype = 'object' if self.dtype == 'object' else None labels = com.index_labels_to_array(labels, dtype=arr_dtype) indexer = self.get_indexer_for(labels) mask = indexer == -1 if mask.any(): if errors != 'ignore': raise KeyError(f'{labels[mask].tolist()} not found in axis') indexer = indexer[~mask] return self.delete(indexer) @final def infer_objects(self, copy: bool=True) -> Index: if self._is_multi: raise NotImplementedError('infer_objects is not implemented for MultiIndex. Use index.to_frame().infer_objects() instead.') if self.dtype != object: return self.copy() if copy else self values = self._values values = cast('npt.NDArray[np.object_]', values) res_values = lib.maybe_convert_objects(values, convert_non_numeric=True) if copy and res_values is values: return self.copy() result = Index(res_values, name=self.name) if not copy and res_values is values and (self._references is not None): result._references = self._references result._references.add_index_reference(result) return result @final def diff(self, periods: int=1) -> Index: return Index(self.to_series().diff(periods)) def round(self, decimals: int=0) -> Self: return self._constructor(self.to_series().round(decimals)) def _cmp_method(self, other, op): if self.is_(other): if op in {operator.eq, operator.le, operator.ge}: arr = np.ones(len(self), dtype=bool) if self._can_hold_na and (not isinstance(self, ABCMultiIndex)): arr[self.isna()] = False return arr elif op is operator.ne: arr = np.zeros(len(self), dtype=bool) if self._can_hold_na and (not isinstance(self, ABCMultiIndex)): arr[self.isna()] = True return arr if isinstance(other, (np.ndarray, Index, ABCSeries, ExtensionArray)) and len(self) != len(other): raise ValueError('Lengths must match to compare') if not isinstance(other, ABCMultiIndex): other = extract_array(other, extract_numpy=True) else: other = np.asarray(other) if is_object_dtype(self.dtype) and isinstance(other, ExtensionArray): result = op(self._values, other) elif isinstance(self._values, ExtensionArray): result = op(self._values, other) elif is_object_dtype(self.dtype) and (not isinstance(self, ABCMultiIndex)): result = ops.comp_method_OBJECT_ARRAY(op, self._values, other) else: result = ops.comparison_op(self._values, other, op) return result @final def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) @final def _construct_result(self, result, name): if isinstance(result, tuple): return (Index(result[0], name=name, dtype=result[0].dtype), Index(result[1], name=name, dtype=result[1].dtype)) return Index(result, name=name, dtype=result.dtype) def _arith_method(self, other, op): if isinstance(other, Index) and is_object_dtype(other.dtype) and (type(other) is not Index): return NotImplemented return super()._arith_method(other, op) @final def _unary_method(self, op): result = op(self._values) return Index(result, name=self.name) def __abs__(self) -> Index: return self._unary_method(operator.abs) def __neg__(self) -> Index: return self._unary_method(operator.neg) def __pos__(self) -> Index: return self._unary_method(operator.pos) def __invert__(self) -> Index: return self._unary_method(operator.inv) def any(self, *args, **kwargs): nv.validate_any(args, kwargs) self._maybe_disable_logical_methods('any') vals = self._values if not isinstance(vals, np.ndarray): return vals._reduce('any') return np.any(vals) def all(self, *args, **kwargs): nv.validate_all(args, kwargs) self._maybe_disable_logical_methods('all') vals = self._values if not isinstance(vals, np.ndarray): return vals._reduce('all') return np.all(vals) @final def _maybe_disable_logical_methods(self, opname: str_t) -> None: if isinstance(self, ABCMultiIndex): raise TypeError(f'cannot perform {opname} with {type(self).__name__}') @Appender(IndexOpsMixin.argmin.__doc__) def argmin(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: if not skipna: raise ValueError('Encountered an NA value with skipna=False') elif self._isnan.all(): raise ValueError('Encountered all NA values') return super().argmin(skipna=skipna) @Appender(IndexOpsMixin.argmax.__doc__) def argmax(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) nv.validate_minmax_axis(axis) if not self._is_multi and self.hasnans: if not skipna: raise ValueError('Encountered an NA value with skipna=False') elif self._isnan.all(): raise ValueError('Encountered all NA values') return super().argmax(skipna=skipna) def min(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs): nv.validate_min(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: first = self[0] if not isna(first): return first if not self._is_multi and self.hasnans: mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and (not isinstance(self._values, np.ndarray)): return self._values._reduce(name='min', skipna=skipna) return nanops.nanmin(self._values, skipna=skipna) def max(self, axis: AxisInt | None=None, skipna: bool=True, *args, **kwargs): nv.validate_max(args, kwargs) nv.validate_minmax_axis(axis) if not len(self): return self._na_value if len(self) and self.is_monotonic_increasing: last = self[-1] if not isna(last): return last if not self._is_multi and self.hasnans: mask = self._isnan if not skipna or mask.all(): return self._na_value if not self._is_multi and (not isinstance(self._values, np.ndarray)): return self._values._reduce(name='max', skipna=skipna) return nanops.nanmax(self._values, skipna=skipna) @final @property def shape(self) -> Shape: return (len(self),) def maybe_sequence_to_range(sequence) -> Any | range: if isinstance(sequence, (range, ExtensionArray)): return sequence elif len(sequence) == 1 or lib.infer_dtype(sequence, skipna=False) != 'integer': return sequence elif isinstance(sequence, (ABCSeries, Index)) and (not (isinstance(sequence.dtype, np.dtype) and sequence.dtype.kind == 'i')): return sequence if len(sequence) == 0: return range(0) try: np_sequence = np.asarray(sequence, dtype=np.int64) except OverflowError: return sequence diff = np_sequence[1] - np_sequence[0] if diff == 0: return sequence elif len(sequence) == 2 or lib.is_sequence_range(np_sequence, diff): return range(np_sequence[0], np_sequence[-1] + diff, diff) else: return sequence def ensure_index_from_sequences(sequences, names=None) -> Index: from pandas.core.indexes.api import default_index from pandas.core.indexes.multi import MultiIndex if len(sequences) == 0: return default_index(0) elif len(sequences) == 1: if names is not None: names = names[0] return Index(maybe_sequence_to_range(sequences[0]), name=names) else: return MultiIndex.from_arrays(sequences, names=names) def ensure_index(index_like: Axes, copy: bool=False) -> Index: if isinstance(index_like, Index): if copy: index_like = index_like.copy() return index_like if isinstance(index_like, ABCSeries): name = index_like.name return Index(index_like, name=name, copy=copy) if is_iterator(index_like): index_like = list(index_like) if isinstance(index_like, list): if type(index_like) is not list: index_like = list(index_like) if len(index_like) and lib.is_all_arraylike(index_like): from pandas.core.indexes.multi import MultiIndex return MultiIndex.from_arrays(index_like) else: return Index(index_like, copy=copy, tupleize_cols=False) else: return Index(index_like, copy=copy) def ensure_has_len(seq): try: len(seq) except TypeError: return list(seq) else: return seq def trim_front(strings: list[str]) -> list[str]: if not strings: return strings while all(strings) and all((x[0] == ' ' for x in strings)): strings = [x[1:] for x in strings] return strings def _validate_join_method(method: str) -> None: if method not in ['left', 'right', 'inner', 'outer']: raise ValueError(f'do not recognize join method {method}') def maybe_extract_name(name, obj, cls) -> Hashable: if name is None and isinstance(obj, (Index, ABCSeries)): name = obj.name if not is_hashable(name): raise TypeError(f'{cls.__name__}.name must be a hashable type') return name def get_unanimous_names(*indexes: Index) -> tuple[Hashable, ...]: name_tups = (tuple(i.names) for i in indexes) name_sets = ({*ns} for ns in zip_longest(*name_tups)) names = tuple((ns.pop() if len(ns) == 1 else None for ns in name_sets)) return names def _unpack_nested_dtype(other: Index) -> DtypeObj: dtype = other.dtype if isinstance(dtype, CategoricalDtype): return dtype.categories.dtype elif isinstance(dtype, ArrowDtype): import pyarrow as pa if pa.types.is_dictionary(dtype.pyarrow_dtype): other = other[:0].astype(ArrowDtype(dtype.pyarrow_dtype.value_type)) return other.dtype def _maybe_try_sort(result: Index | ArrayLike, sort: bool | None): if sort is not False: try: result = algos.safe_sort(result) except TypeError as err: if sort is True: raise warnings.warn(f'{err}, sort order is undefined for incomparable objects.', RuntimeWarning, stacklevel=find_stack_level()) return result def get_values_for_csv(values: ArrayLike, *, date_format, na_rep: str='nan', quoting=None, float_format=None, decimal: str='.') -> npt.NDArray[np.object_]: if isinstance(values, Categorical) and values.categories.dtype.kind in 'Mm': values = algos.take_nd(values.categories._values, ensure_platform_int(values._codes), fill_value=na_rep) values = ensure_wrapped_if_datetimelike(values) if isinstance(values, (DatetimeArray, TimedeltaArray)): if values.ndim == 1: result = values._format_native_types(na_rep=na_rep, date_format=date_format) result = result.astype(object, copy=False) return result results_converted = [] for i in range(len(values)): result = values[i, :]._format_native_types(na_rep=na_rep, date_format=date_format) results_converted.append(result.astype(object, copy=False)) return np.vstack(results_converted) elif isinstance(values.dtype, PeriodDtype): values = cast('PeriodArray', values) res = values._format_native_types(na_rep=na_rep, date_format=date_format) return res elif isinstance(values.dtype, IntervalDtype): values = cast('IntervalArray', values) mask = values.isna() if not quoting: result = np.asarray(values).astype(str) else: result = np.array(values, dtype=object, copy=True) result[mask] = na_rep return result elif values.dtype.kind == 'f' and (not isinstance(values.dtype, SparseDtype)): if float_format is None and decimal == '.': mask = isna(values) if not quoting: values = values.astype(str) else: values = np.array(values, dtype='object') values[mask] = na_rep values = values.astype(object, copy=False) return values from pandas.io.formats.format import FloatArrayFormatter formatter = FloatArrayFormatter(values, na_rep=na_rep, float_format=float_format, decimal=decimal, quoting=quoting, fixed_width=False) res = formatter.get_result_as_array() res = res.astype(object, copy=False) return res elif isinstance(values, ExtensionArray): mask = isna(values) new_values = np.asarray(values.astype(object)) new_values[mask] = na_rep return new_values else: mask = isna(values) itemsize = writers.word_len(na_rep) if values.dtype != _dtype_obj and (not quoting) and itemsize: values = values.astype(str) if values.dtype.itemsize / np.dtype('U1').itemsize < itemsize: values = values.astype(f' bool: return self.categories._should_fallback_to_positional codes: np.ndarray categories: Index ordered: bool | None _data: Categorical _values: Categorical @property def _engine_type(self) -> type[libindex.IndexEngine]: return {np.int8: libindex.Int8Engine, np.int16: libindex.Int16Engine, np.int32: libindex.Int32Engine, np.int64: libindex.Int64Engine}[self.codes.dtype.type] def __new__(cls, data=None, categories=None, ordered=None, dtype: Dtype | None=None, copy: bool=False, name: Hashable | None=None) -> Self: name = maybe_extract_name(name, data, cls) if is_scalar(data): cls._raise_scalar_data_error(data) data = Categorical(data, categories=categories, ordered=ordered, dtype=dtype, copy=copy) return cls._simple_new(data, name=name) def _is_dtype_compat(self, other: Index) -> Categorical: if isinstance(other.dtype, CategoricalDtype): cat = extract_array(other) cat = cast(Categorical, cat) if not cat._categories_match_up_to_permutation(self._values): raise TypeError('categories must match existing categories when appending') elif other._is_multi: raise TypeError('MultiIndex is not dtype-compatible with CategoricalIndex') else: values = other cat = Categorical(other, dtype=self.dtype) other = CategoricalIndex(cat) if not other.isin(values).all(): raise TypeError('cannot append a non-category item to a CategoricalIndex') cat = other._values if not ((cat == values) | isna(cat) & isna(values)).all(): raise TypeError('categories must match existing categories when appending') return cat def equals(self, other: object) -> bool: if self.is_(other): return True if not isinstance(other, Index): return False try: other = self._is_dtype_compat(other) except (TypeError, ValueError): return False return self._data.equals(other) @property def _formatter_func(self): return self.categories._formatter_func def _format_attrs(self): attrs: list[tuple[str, str | int | bool | None]] attrs = [('categories', f"[{', '.join(self._data._repr_categories())}]"), ('ordered', self.ordered)] extra = super()._format_attrs() return attrs + extra @property def inferred_type(self) -> str: return 'categorical' @doc(Index.__contains__) def __contains__(self, key: Any) -> bool: if is_valid_na_for_dtype(key, self.categories.dtype): return self.hasnans if self.categories._typ == 'rangeindex': container: Index | libindex.IndexEngine | libindex.ExtensionEngine = self.categories else: container = self._engine return contains(self, key, container=container) def reindex(self, target, method=None, level=None, limit: int | None=None, tolerance=None) -> tuple[Index, npt.NDArray[np.intp] | None]: if method is not None: raise NotImplementedError('argument method is not implemented for CategoricalIndex.reindex') if level is not None: raise NotImplementedError('argument level is not implemented for CategoricalIndex.reindex') if limit is not None: raise NotImplementedError('argument limit is not implemented for CategoricalIndex.reindex') return super().reindex(target) def _maybe_cast_indexer(self, key) -> int: try: return self._data._unbox_scalar(key) except KeyError: if is_valid_na_for_dtype(key, self.categories.dtype): return -1 raise def _maybe_cast_listlike_indexer(self, values) -> CategoricalIndex: if isinstance(values, CategoricalIndex): values = values._data if isinstance(values, Categorical): cat = self._data._encode_with_my_categories(values) codes = cat._codes else: codes = self.categories.get_indexer(values) codes = codes.astype(self.codes.dtype, copy=False) cat = self._data._from_backing_data(codes) return type(self)._simple_new(cat) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return self.categories._is_comparable_dtype(dtype) def map(self, mapper, na_action: Literal['ignore'] | None=None): mapped = self._values.map(mapper, na_action=na_action) return Index(mapped, name=self.name) def _concat(self, to_concat: list[Index], name: Hashable) -> Index: try: cat = Categorical._concat_same_type([self._is_dtype_compat(c) for c in to_concat]) except TypeError: res = concat_compat([x._values for x in to_concat]) return Index(res, name=name) else: return type(self)._simple_new(cat, name=name) # File: pandas-main/pandas/core/indexes/datetimelike.py """""" from __future__ import annotations from abc import ABC, abstractmethod from typing import TYPE_CHECKING, Any, cast, final import numpy as np from pandas._libs import NaT, Timedelta, lib from pandas._libs.tslibs import BaseOffset, Resolution, Tick, parsing, to_offset from pandas.compat.numpy import function as nv from pandas.errors import InvalidIndexError, NullFrequencyError from pandas.util._decorators import Appender, cache_readonly, doc from pandas.core.dtypes.common import is_integer, is_list_like from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import CategoricalDtype, PeriodDtype from pandas.core.arrays import DatetimeArray, ExtensionArray, PeriodArray, TimedeltaArray from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs from pandas.core.indexes.extension import NDArrayBackedExtensionIndex from pandas.core.indexes.range import RangeIndex from pandas.core.tools.timedeltas import to_timedelta if TYPE_CHECKING: from collections.abc import Sequence from datetime import datetime from pandas._typing import Axis, JoinHow, Self, npt from pandas import CategoricalIndex _index_doc_kwargs = dict(ibase._index_doc_kwargs) class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC): _can_hold_strings = False _data: DatetimeArray | TimedeltaArray | PeriodArray @doc(DatetimeLikeArrayMixin.mean) def mean(self, *, skipna: bool=True, axis: int | None=0): return self._data.mean(skipna=skipna, axis=axis) @property def freq(self) -> BaseOffset | None: return self._data.freq @freq.setter def freq(self, value) -> None: self._data.freq = value @property def asi8(self) -> npt.NDArray[np.int64]: return self._data.asi8 @property @doc(DatetimeLikeArrayMixin.freqstr) def freqstr(self) -> str: from pandas import PeriodIndex if self._data.freqstr is not None and isinstance(self._data, (PeriodArray, PeriodIndex)): freq = PeriodDtype(self._data.freq)._freqstr return freq else: return self._data.freqstr @cache_readonly @abstractmethod def _resolution_obj(self) -> Resolution: ... @cache_readonly @doc(DatetimeLikeArrayMixin.resolution) def resolution(self) -> str: return self._data.resolution @cache_readonly def hasnans(self) -> bool: return self._data._hasna def equals(self, other: Any) -> bool: if self.is_(other): return True if not isinstance(other, Index): return False elif other.dtype.kind in 'iufc': return False elif not isinstance(other, type(self)): should_try = False inferable = self._data._infer_matches if other.dtype == object: should_try = other.inferred_type in inferable elif isinstance(other.dtype, CategoricalDtype): other = cast('CategoricalIndex', other) should_try = other.categories.inferred_type in inferable if should_try: try: other = type(self)(other) except (ValueError, TypeError, OverflowError): return False if self.dtype != other.dtype: return False return np.array_equal(self.asi8, other.asi8) @Appender(Index.__contains__.__doc__) def __contains__(self, key: Any) -> bool: hash(key) try: self.get_loc(key) except (KeyError, TypeError, ValueError, InvalidIndexError): return False return True def _convert_tolerance(self, tolerance, target): tolerance = np.asarray(to_timedelta(tolerance).to_numpy()) return super()._convert_tolerance(tolerance, target) _default_na_rep = 'NaT' def _format_with_header(self, *, header: list[str], na_rep: str, date_format: str | None=None) -> list[str]: return header + list(self._get_values_for_csv(na_rep=na_rep, date_format=date_format)) @property def _formatter_func(self): return self._data._formatter() def _format_attrs(self): attrs = super()._format_attrs() for attrib in self._attributes: if attrib == 'freq': freq = self.freqstr if freq is not None: freq = repr(freq) attrs.append(('freq', freq)) return attrs @Appender(Index._summary.__doc__) def _summary(self, name=None) -> str: result = super()._summary(name=name) if self.freq: result += f'\nFreq: {self.freqstr}' return result @final def _can_partial_date_slice(self, reso: Resolution) -> bool: return reso > self._resolution_obj def _parsed_string_to_bounds(self, reso: Resolution, parsed): raise NotImplementedError def _parse_with_reso(self, label: str) -> tuple[datetime, Resolution]: try: if self.freq is None or hasattr(self.freq, 'rule_code'): freq = self.freq except NotImplementedError: freq = getattr(self, 'freqstr', getattr(self, 'inferred_freq', None)) freqstr: str | None if freq is not None and (not isinstance(freq, str)): freqstr = freq.rule_code else: freqstr = freq if isinstance(label, np.str_): label = str(label) (parsed, reso_str) = parsing.parse_datetime_string_with_reso(label, freqstr) reso = Resolution.from_attrname(reso_str) return (parsed, reso) def _get_string_slice(self, key: str) -> slice | npt.NDArray[np.intp]: (parsed, reso) = self._parse_with_reso(key) try: return self._partial_date_slice(reso, parsed) except KeyError as err: raise KeyError(key) from err @final def _partial_date_slice(self, reso: Resolution, parsed: datetime) -> slice | npt.NDArray[np.intp]: if not self._can_partial_date_slice(reso): raise ValueError (t1, t2) = self._parsed_string_to_bounds(reso, parsed) vals = self._data._ndarray unbox = self._data._unbox if self.is_monotonic_increasing: if len(self) and (t1 < self[0] and t2 < self[0] or (t1 > self[-1] and t2 > self[-1])): raise KeyError left = vals.searchsorted(unbox(t1), side='left') right = vals.searchsorted(unbox(t2), side='right') return slice(left, right) else: lhs_mask = vals >= unbox(t1) rhs_mask = vals <= unbox(t2) return (lhs_mask & rhs_mask).nonzero()[0] def _maybe_cast_slice_bound(self, label, side: str): if isinstance(label, str): try: (parsed, reso) = self._parse_with_reso(label) except ValueError as err: self._raise_invalid_indexer('slice', label, err) (lower, upper) = self._parsed_string_to_bounds(reso, parsed) return lower if side == 'left' else upper elif not isinstance(label, self._data._recognized_scalars): self._raise_invalid_indexer('slice', label) return label def shift(self, periods: int=1, freq=None) -> Self: raise NotImplementedError @doc(Index._maybe_cast_listlike_indexer) def _maybe_cast_listlike_indexer(self, keyarr): try: res = self._data._validate_listlike(keyarr, allow_object=True) except (ValueError, TypeError): if not isinstance(keyarr, ExtensionArray): res = com.asarray_tuplesafe(keyarr) else: res = keyarr return Index(res, dtype=res.dtype) class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC): _data: DatetimeArray | TimedeltaArray _comparables = ['name', 'freq'] _attributes = ['name', 'freq'] _is_monotonic_increasing = Index.is_monotonic_increasing _is_monotonic_decreasing = Index.is_monotonic_decreasing _is_unique = Index.is_unique @property def unit(self) -> str: return self._data.unit def as_unit(self, unit: str) -> Self: arr = self._data.as_unit(unit) return type(self)._simple_new(arr, name=self.name) def _with_freq(self, freq): arr = self._data._with_freq(freq) return type(self)._simple_new(arr, name=self._name) @property def values(self) -> np.ndarray: data = self._data._ndarray data = data.view() data.flags.writeable = False return data @doc(DatetimeIndexOpsMixin.shift) def shift(self, periods: int=1, freq=None) -> Self: if freq is not None and freq != self.freq: if isinstance(freq, str): freq = to_offset(freq) offset = periods * freq return self + offset if periods == 0 or len(self) == 0: return self.copy() if self.freq is None: raise NullFrequencyError('Cannot shift with no freq') start = self[0] + periods * self.freq end = self[-1] + periods * self.freq result = self._data._generate_range(start=start, end=end, periods=None, freq=self.freq, unit=self.unit) return type(self)._simple_new(result, name=self.name) @cache_readonly @doc(DatetimeLikeArrayMixin.inferred_freq) def inferred_freq(self) -> str | None: return self._data.inferred_freq @cache_readonly def _as_range_index(self) -> RangeIndex: freq = cast(Tick, self.freq) tick = Timedelta(freq).as_unit(self.unit)._value rng = range(self[0]._value, self[-1]._value + tick, tick) return RangeIndex(rng) def _can_range_setop(self, other) -> bool: return isinstance(self.freq, Tick) and isinstance(other.freq, Tick) def _wrap_range_setop(self, other, res_i8) -> Self: new_freq = None if not len(res_i8): new_freq = self.freq elif isinstance(res_i8, RangeIndex): new_freq = to_offset(Timedelta(res_i8.step, unit=self.unit).as_unit(self.unit)) res_values = res_i8.values.view(self._data._ndarray.dtype) result = type(self._data)._simple_new(res_values, dtype=self.dtype, freq=new_freq) return cast('Self', self._wrap_setop_result(other, result)) def _range_intersect(self, other, sort) -> Self: left = self._as_range_index right = other._as_range_index res_i8 = left.intersection(right, sort=sort) return self._wrap_range_setop(other, res_i8) def _range_union(self, other, sort) -> Self: left = self._as_range_index right = other._as_range_index res_i8 = left.union(right, sort=sort) return self._wrap_range_setop(other, res_i8) def _intersection(self, other: Index, sort: bool=False) -> Index: other = cast('DatetimeTimedeltaMixin', other) if self._can_range_setop(other): return self._range_intersect(other, sort=sort) if not self._can_fast_intersect(other): result = Index._intersection(self, other, sort=sort) result = self._wrap_setop_result(other, result) return result._with_freq(None)._with_freq('infer') else: return self._fast_intersect(other, sort) def _fast_intersect(self, other, sort): if self[0] <= other[0]: (left, right) = (self, other) else: (left, right) = (other, self) end = min(left[-1], right[-1]) start = right[0] if end < start: result = self[:0] else: lslice = slice(*left.slice_locs(start, end)) result = left._values[lslice] return result def _can_fast_intersect(self, other: Self) -> bool: if self.freq is None: return False elif other.freq != self.freq: return False elif not self.is_monotonic_increasing: return False return self.freq.n == 1 def _can_fast_union(self, other: Self) -> bool: freq = self.freq if freq is None or freq != other.freq: return False if not self.is_monotonic_increasing: return False if len(self) == 0 or len(other) == 0: return True if self[0] <= other[0]: (left, right) = (self, other) else: (left, right) = (other, self) right_start = right[0] left_end = left[-1] return right_start == left_end + freq or right_start in left def _fast_union(self, other: Self, sort=None) -> Self: if self[0] <= other[0]: (left, right) = (self, other) elif sort is False: (left, right) = (self, other) left_start = left[0] loc = right.searchsorted(left_start, side='left') right_chunk = right._values[:loc] dates = concat_compat((left._values, right_chunk)) result = type(self)._simple_new(dates, name=self.name) return result else: (left, right) = (other, self) left_end = left[-1] right_end = right[-1] if left_end < right_end: loc = right.searchsorted(left_end, side='right') right_chunk = right._values[loc:] dates = concat_compat([left._values, right_chunk]) assert isinstance(dates, type(self._data)) assert dates._freq == self.freq result = type(self)._simple_new(dates) return result else: return left def _union(self, other, sort): assert isinstance(other, type(self)) assert self.dtype == other.dtype if self._can_range_setop(other): return self._range_union(other, sort=sort) if self._can_fast_union(other): result = self._fast_union(other, sort=sort) return result else: return super()._union(other, sort)._with_freq('infer') def _get_join_freq(self, other): freq = None if self._can_fast_union(other): freq = self.freq return freq def _wrap_join_result(self, joined, other, lidx: npt.NDArray[np.intp] | None, ridx: npt.NDArray[np.intp] | None, how: JoinHow) -> tuple[Self, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: assert other.dtype == self.dtype, (other.dtype, self.dtype) (join_index, lidx, ridx) = super()._wrap_join_result(joined, other, lidx, ridx, how) join_index._data._freq = self._get_join_freq(other) return (join_index, lidx, ridx) def _get_engine_target(self) -> np.ndarray: return self._data._ndarray.view('i8') def _from_join_target(self, result: np.ndarray): result = result.view(self._data._ndarray.dtype) return self._data._from_backing_data(result) def _get_delete_freq(self, loc: int | slice | Sequence[int]): freq = None if self.freq is not None: if is_integer(loc): if loc in (0, -len(self), -1, len(self) - 1): freq = self.freq else: if is_list_like(loc): loc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self)) if isinstance(loc, slice) and loc.step in (1, None): if loc.start in (0, None) or loc.stop in (len(self), None): freq = self.freq return freq def _get_insert_freq(self, loc: int, item): value = self._data._validate_scalar(item) item = self._data._box_func(value) freq = None if self.freq is not None: if self.size: if item is NaT: pass elif loc in (0, -len(self)) and item + self.freq == self[0]: freq = self.freq elif loc == len(self) and item - self.freq == self[-1]: freq = self.freq elif isinstance(self.freq, Tick): freq = self.freq elif self.freq.is_on_offset(item): freq = self.freq return freq @doc(NDArrayBackedExtensionIndex.delete) def delete(self, loc) -> Self: result = super().delete(loc) result._data._freq = self._get_delete_freq(loc) return result @doc(NDArrayBackedExtensionIndex.insert) def insert(self, loc: int, item): result = super().insert(loc, item) if isinstance(result, type(self)): result._data._freq = self._get_insert_freq(loc, item) return result @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self, indices, axis: Axis=0, allow_fill: bool=True, fill_value=None, **kwargs) -> Self: nv.validate_take((), kwargs) indices = np.asarray(indices, dtype=np.intp) result = NDArrayBackedExtensionIndex.take(self, indices, axis, allow_fill, fill_value, **kwargs) maybe_slice = lib.maybe_indices_to_slice(indices, len(self)) if isinstance(maybe_slice, slice): freq = self._data._get_getitem_freq(maybe_slice) result._data._freq = freq return result # File: pandas-main/pandas/core/indexes/datetimes.py from __future__ import annotations import datetime as dt import operator from typing import TYPE_CHECKING import warnings import numpy as np from pandas._libs import NaT, Period, Timestamp, index as libindex, lib from pandas._libs.tslibs import Resolution, Tick, Timedelta, periods_per_day, timezones, to_offset from pandas._libs.tslibs.offsets import prefix_mapping from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import is_scalar from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.arrays.datetimes import DatetimeArray, tz_to_dtype import pandas.core.common as com from pandas.core.indexes.base import Index, maybe_extract_name from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin from pandas.core.indexes.extension import inherit_names from pandas.core.tools.times import to_time if TYPE_CHECKING: from collections.abc import Hashable from pandas._typing import Dtype, DtypeObj, Frequency, IntervalClosedType, Self, TimeAmbiguous, TimeNonexistent, npt from pandas.core.api import DataFrame, PeriodIndex from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR def _new_DatetimeIndex(cls, d): if 'data' in d and (not isinstance(d['data'], DatetimeIndex)): data = d.pop('data') if not isinstance(data, DatetimeArray): tz = d.pop('tz') freq = d.pop('freq') dta = DatetimeArray._simple_new(data, dtype=tz_to_dtype(tz), freq=freq) else: dta = data for key in ['tz', 'freq']: if key in d: assert d[key] == getattr(dta, key) d.pop(key) result = cls._simple_new(dta, **d) else: with warnings.catch_warnings(): warnings.simplefilter('ignore') result = cls.__new__(cls, **d) return result @inherit_names(DatetimeArray._field_ops + [method for method in DatetimeArray._datetimelike_methods if method not in ('tz_localize', 'tz_convert', 'strftime')], DatetimeArray, wrap=True) @inherit_names(['is_normalized'], DatetimeArray, cache=True) @inherit_names(['tz', 'tzinfo', 'dtype', 'to_pydatetime', 'date', 'time', 'timetz', 'std'] + DatetimeArray._bool_ops, DatetimeArray) class DatetimeIndex(DatetimeTimedeltaMixin): _typ = 'datetimeindex' _data_cls = DatetimeArray _supports_partial_string_indexing = True @property def _engine_type(self) -> type[libindex.DatetimeEngine]: return libindex.DatetimeEngine _data: DatetimeArray _values: DatetimeArray tz: dt.tzinfo | None @doc(DatetimeArray.strftime) def strftime(self, date_format) -> Index: arr = self._data.strftime(date_format) return Index(arr, name=self.name, dtype=arr.dtype) @doc(DatetimeArray.tz_convert) def tz_convert(self, tz) -> Self: arr = self._data.tz_convert(tz) return type(self)._simple_new(arr, name=self.name, refs=self._references) @doc(DatetimeArray.tz_localize) def tz_localize(self, tz, ambiguous: TimeAmbiguous='raise', nonexistent: TimeNonexistent='raise') -> Self: arr = self._data.tz_localize(tz, ambiguous, nonexistent) return type(self)._simple_new(arr, name=self.name) @doc(DatetimeArray.to_period) def to_period(self, freq=None) -> PeriodIndex: from pandas.core.indexes.api import PeriodIndex arr = self._data.to_period(freq) return PeriodIndex._simple_new(arr, name=self.name) @doc(DatetimeArray.to_julian_date) def to_julian_date(self) -> Index: arr = self._data.to_julian_date() return Index._simple_new(arr, name=self.name) @doc(DatetimeArray.isocalendar) def isocalendar(self) -> DataFrame: df = self._data.isocalendar() return df.set_index(self) @cache_readonly def _resolution_obj(self) -> Resolution: return self._data._resolution_obj def __new__(cls, data=None, freq: Frequency | lib.NoDefault=lib.no_default, tz=lib.no_default, ambiguous: TimeAmbiguous='raise', dayfirst: bool=False, yearfirst: bool=False, dtype: Dtype | None=None, copy: bool=False, name: Hashable | None=None) -> Self: if is_scalar(data): cls._raise_scalar_data_error(data) name = maybe_extract_name(name, data, cls) if isinstance(data, DatetimeArray) and freq is lib.no_default and (tz is lib.no_default) and (dtype is None): if copy: data = data.copy() return cls._simple_new(data, name=name) dtarr = DatetimeArray._from_sequence_not_strict(data, dtype=dtype, copy=copy, tz=tz, freq=freq, dayfirst=dayfirst, yearfirst=yearfirst, ambiguous=ambiguous) refs = None if not copy and isinstance(data, (Index, ABCSeries)): refs = data._references subarr = cls._simple_new(dtarr, name=name, refs=refs) return subarr @cache_readonly def _is_dates_only(self) -> bool: if isinstance(self.freq, Tick): delta = Timedelta(self.freq) if delta % dt.timedelta(days=1) != dt.timedelta(days=0): return False return self._values._is_dates_only def __reduce__(self): d = {'data': self._data, 'name': self.name} return (_new_DatetimeIndex, (type(self), d), None) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: if self.tz is not None: return isinstance(dtype, DatetimeTZDtype) return lib.is_np_dtype(dtype, 'M') @cache_readonly def _formatter_func(self): from pandas.io.formats.format import get_format_datetime64 formatter = get_format_datetime64(is_dates_only=self._is_dates_only) return lambda x: f"'{formatter(x)}'" def _can_range_setop(self, other) -> bool: if self.tz is not None and (not timezones.is_utc(self.tz)) and (not timezones.is_fixed_offset(self.tz)): return False if other.tz is not None and (not timezones.is_utc(other.tz)) and (not timezones.is_fixed_offset(other.tz)): return False return super()._can_range_setop(other) def _get_time_micros(self) -> npt.NDArray[np.int64]: values = self._data._local_timestamps() ppd = periods_per_day(self._data._creso) frac = values % ppd if self.unit == 'ns': micros = frac // 1000 elif self.unit == 'us': micros = frac elif self.unit == 'ms': micros = frac * 1000 elif self.unit == 's': micros = frac * 1000000 else: raise NotImplementedError(self.unit) micros[self._isnan] = -1 return micros def snap(self, freq: Frequency='S') -> DatetimeIndex: freq = to_offset(freq) dta = self._data.copy() for (i, v) in enumerate(self): s = v if not freq.is_on_offset(s): t0 = freq.rollback(s) t1 = freq.rollforward(s) if abs(s - t0) < abs(t1 - s): s = t0 else: s = t1 dta[i] = s return DatetimeIndex._simple_new(dta, name=self.name) def _parsed_string_to_bounds(self, reso: Resolution, parsed: dt.datetime) -> tuple[Timestamp, Timestamp]: freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev) per = Period(parsed, freq=freq) (start, end) = (per.start_time, per.end_time) start = start.as_unit(self.unit) end = end.as_unit(self.unit) start = start.tz_localize(parsed.tzinfo) end = end.tz_localize(parsed.tzinfo) if parsed.tzinfo is not None: if self.tz is None: raise ValueError('The index must be timezone aware when indexing with a date string with a UTC offset') return (start, end) def _parse_with_reso(self, label: str) -> tuple[Timestamp, Resolution]: (parsed, reso) = super()._parse_with_reso(label) parsed = Timestamp(parsed) if self.tz is not None and parsed.tzinfo is None: parsed = parsed.tz_localize(self.tz) return (parsed, reso) def _disallow_mismatched_indexing(self, key) -> None: try: self._data._assert_tzawareness_compat(key) except TypeError as err: raise KeyError(key) from err def get_loc(self, key): self._check_indexing_error(key) orig_key = key if is_valid_na_for_dtype(key, self.dtype): key = NaT if isinstance(key, self._data._recognized_scalars): self._disallow_mismatched_indexing(key) key = Timestamp(key) elif isinstance(key, str): try: (parsed, reso) = self._parse_with_reso(key) except ValueError as err: raise KeyError(key) from err self._disallow_mismatched_indexing(parsed) if self._can_partial_date_slice(reso): try: return self._partial_date_slice(reso, parsed) except KeyError as err: raise KeyError(key) from err key = parsed elif isinstance(key, dt.timedelta): raise TypeError(f'Cannot index {type(self).__name__} with {type(key).__name__}') elif isinstance(key, dt.time): return self.indexer_at_time(key) else: raise KeyError(key) try: return Index.get_loc(self, key) except KeyError as err: raise KeyError(orig_key) from err @doc(DatetimeTimedeltaMixin._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side: str): if isinstance(label, dt.date) and (not isinstance(label, dt.datetime)): label = Timestamp(label).to_pydatetime() label = super()._maybe_cast_slice_bound(label, side) self._data._assert_tzawareness_compat(label) return Timestamp(label) def slice_indexer(self, start=None, end=None, step=None): if isinstance(start, dt.time) and isinstance(end, dt.time): if step is not None and step != 1: raise ValueError('Must have step size of 1 with time slices') return self.indexer_between_time(start, end) if isinstance(start, dt.time) or isinstance(end, dt.time): raise KeyError('Cannot mix time and non-time slice keys') def check_str_or_none(point) -> bool: return point is not None and (not isinstance(point, str)) if check_str_or_none(start) or check_str_or_none(end) or self.is_monotonic_increasing: return Index.slice_indexer(self, start, end, step) mask = np.array(True) in_index = True if start is not None: start_casted = self._maybe_cast_slice_bound(start, 'left') mask = start_casted <= self in_index &= (start_casted == self).any() if end is not None: end_casted = self._maybe_cast_slice_bound(end, 'right') mask = (self <= end_casted) & mask in_index &= (end_casted == self).any() if not in_index: raise KeyError('Value based partial slicing on non-monotonic DatetimeIndexes with non-existing keys is not allowed.') indexer = mask.nonzero()[0][::step] if len(indexer) == len(self): return slice(None) else: return indexer @property def inferred_type(self) -> str: return 'datetime64' def indexer_at_time(self, time, asof: bool=False) -> npt.NDArray[np.intp]: if asof: raise NotImplementedError("'asof' argument is not supported") if isinstance(time, str): from dateutil.parser import parse time = parse(time).time() if time.tzinfo: if self.tz is None: raise ValueError('Index must be timezone aware.') time_micros = self.tz_convert(time.tzinfo)._get_time_micros() else: time_micros = self._get_time_micros() micros = _time_to_micros(time) return (time_micros == micros).nonzero()[0] def indexer_between_time(self, start_time, end_time, include_start: bool=True, include_end: bool=True) -> npt.NDArray[np.intp]: start_time = to_time(start_time) end_time = to_time(end_time) time_micros = self._get_time_micros() start_micros = _time_to_micros(start_time) end_micros = _time_to_micros(end_time) if include_start and include_end: lop = rop = operator.le elif include_start: lop = operator.le rop = operator.lt elif include_end: lop = operator.lt rop = operator.le else: lop = rop = operator.lt if start_time <= end_time: join_op = operator.and_ else: join_op = operator.or_ mask = join_op(lop(start_micros, time_micros), rop(time_micros, end_micros)) return mask.nonzero()[0] def date_range(start=None, end=None, periods=None, freq=None, tz=None, normalize: bool=False, name: Hashable | None=None, inclusive: IntervalClosedType='both', *, unit: str | None=None, **kwargs) -> DatetimeIndex: if freq is None and com.any_none(periods, start, end): freq = 'D' dtarr = DatetimeArray._generate_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, inclusive=inclusive, unit=unit, **kwargs) return DatetimeIndex._simple_new(dtarr, name=name) def bdate_range(start=None, end=None, periods: int | None=None, freq: Frequency | dt.timedelta='B', tz=None, normalize: bool=True, name: Hashable | None=None, weekmask=None, holidays=None, inclusive: IntervalClosedType='both', **kwargs) -> DatetimeIndex: if freq is None: msg = 'freq must be specified for bdate_range; use date_range instead' raise TypeError(msg) if isinstance(freq, str) and freq.startswith('C'): try: weekmask = weekmask or 'Mon Tue Wed Thu Fri' freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask) except (KeyError, TypeError) as err: msg = f'invalid custom frequency string: {freq}' raise ValueError(msg) from err elif holidays or weekmask: msg = f'a custom frequency string is required when holidays or weekmask are passed, got frequency {freq}' raise ValueError(msg) return date_range(start=start, end=end, periods=periods, freq=freq, tz=tz, normalize=normalize, name=name, inclusive=inclusive, **kwargs) def _time_to_micros(time_obj: dt.time) -> int: seconds = time_obj.hour * 60 * 60 + 60 * time_obj.minute + time_obj.second return 1000000 * seconds + time_obj.microsecond # File: pandas-main/pandas/core/indexes/extension.py """""" from __future__ import annotations from inspect import signature from typing import TYPE_CHECKING, TypeVar from pandas.util._decorators import cache_readonly from pandas.core.dtypes.generic import ABCDataFrame from pandas.core.indexes.base import Index if TYPE_CHECKING: from collections.abc import Callable import numpy as np from pandas._typing import ArrayLike, npt from pandas.core.arrays import IntervalArray from pandas.core.arrays._mixins import NDArrayBackedExtensionArray _ExtensionIndexT = TypeVar('_ExtensionIndexT', bound='ExtensionIndex') def _inherit_from_data(name: str, delegate: type, cache: bool=False, wrap: bool=False): attr = getattr(delegate, name) if isinstance(attr, property) or type(attr).__name__ == 'getset_descriptor': if cache: def cached(self): return getattr(self._data, name) cached.__name__ = name cached.__doc__ = attr.__doc__ method = cache_readonly(cached) else: def fget(self): result = getattr(self._data, name) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name, dtype=result.dtype) return result def fset(self, value) -> None: setattr(self._data, name, value) fget.__name__ = name fget.__doc__ = attr.__doc__ method = property(fget, fset) elif not callable(attr): method = attr else: def method(self, *args, **kwargs): if 'inplace' in kwargs: raise ValueError(f'cannot use inplace with {type(self).__name__}') result = attr(self._data, *args, **kwargs) if wrap: if isinstance(result, type(self._data)): return type(self)._simple_new(result, name=self.name) elif isinstance(result, ABCDataFrame): return result.set_index(self) return Index(result, name=self.name, dtype=result.dtype) return result method.__name__ = name method.__doc__ = attr.__doc__ method.__signature__ = signature(attr) return method def inherit_names(names: list[str], delegate: type, cache: bool=False, wrap: bool=False) -> Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]: def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]: for name in names: meth = _inherit_from_data(name, delegate, cache=cache, wrap=wrap) setattr(cls, name, meth) return cls return wrapper class ExtensionIndex(Index): _data: IntervalArray | NDArrayBackedExtensionArray def _validate_fill_value(self, value): return self._data._validate_setitem_value(value) @cache_readonly def _isnan(self) -> npt.NDArray[np.bool_]: return self._data.isna() class NDArrayBackedExtensionIndex(ExtensionIndex): _data: NDArrayBackedExtensionArray def _get_engine_target(self) -> np.ndarray: return self._data._ndarray def _from_join_target(self, result: np.ndarray) -> ArrayLike: assert result.dtype == self._data._ndarray.dtype return self._data._from_backing_data(result) # File: pandas-main/pandas/core/indexes/frozen.py """""" from __future__ import annotations from typing import TYPE_CHECKING, NoReturn from pandas.core.base import PandasObject from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas._typing import Self class FrozenList(PandasObject, list): def union(self, other) -> FrozenList: if isinstance(other, tuple): other = list(other) return type(self)(super().__add__(other)) def difference(self, other) -> FrozenList: other = set(other) temp = [x for x in self if x not in other] return type(self)(temp) __add__ = __iadd__ = union def __getitem__(self, n): if isinstance(n, slice): return type(self)(super().__getitem__(n)) return super().__getitem__(n) def __radd__(self, other) -> Self: if isinstance(other, tuple): other = list(other) return type(self)(other + list(self)) def __eq__(self, other: object) -> bool: if isinstance(other, (tuple, FrozenList)): other = list(other) return super().__eq__(other) __req__ = __eq__ def __mul__(self, other) -> Self: return type(self)(super().__mul__(other)) __imul__ = __mul__ def __reduce__(self): return (type(self), (list(self),)) def __hash__(self) -> int: return hash(tuple(self)) def _disabled(self, *args, **kwargs) -> NoReturn: raise TypeError(f"'{type(self).__name__}' does not support mutable operations.") def __str__(self) -> str: return pprint_thing(self, quote_strings=True, escape_chars=('\t', '\r', '\n')) def __repr__(self) -> str: return f'{type(self).__name__}({self!s})' __setitem__ = __setslice__ = _disabled __delitem__ = __delslice__ = _disabled pop = append = extend = _disabled remove = sort = insert = _disabled # File: pandas-main/pandas/core/indexes/interval.py """""" from __future__ import annotations from operator import le, lt import textwrap from typing import TYPE_CHECKING, Any, Literal import numpy as np from pandas._libs import lib from pandas._libs.interval import Interval, IntervalMixin, IntervalTree from pandas._libs.tslibs import BaseOffset, Period, Timedelta, Timestamp, to_offset from pandas.errors import InvalidIndexError from pandas.util._decorators import Appender, cache_readonly from pandas.util._exceptions import rewrite_exception from pandas.core.dtypes.cast import find_common_type, infer_dtype_from_scalar, maybe_box_datetimelike, maybe_downcast_numeric, maybe_upcast_numeric_to_64bit from pandas.core.dtypes.common import ensure_platform_int, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_object_dtype, is_scalar, pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.algorithms import unique from pandas.core.arrays.datetimelike import validate_periods from pandas.core.arrays.interval import IntervalArray, _interval_shared_docs import pandas.core.common as com from pandas.core.indexers import is_valid_positional_slice import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs, ensure_index, maybe_extract_name from pandas.core.indexes.datetimes import DatetimeIndex, date_range from pandas.core.indexes.extension import ExtensionIndex, inherit_names from pandas.core.indexes.multi import MultiIndex from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range if TYPE_CHECKING: from collections.abc import Hashable from pandas._typing import Dtype, DtypeObj, IntervalClosedType, Self, npt _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update({'klass': 'IntervalIndex', 'qualname': 'IntervalIndex', 'target_klass': 'IntervalIndex or list of Intervals', 'name': textwrap.dedent(' name : object, optional\n Name to be stored in the index.\n ')}) def _get_next_label(label): dtype = getattr(label, 'dtype', type(label)) if isinstance(label, (Timestamp, Timedelta)): dtype = 'datetime64[ns]' dtype = pandas_dtype(dtype) if lib.is_np_dtype(dtype, 'mM') or isinstance(dtype, DatetimeTZDtype): return label + np.timedelta64(1, 'ns') elif is_integer_dtype(dtype): return label + 1 elif is_float_dtype(dtype): return np.nextafter(label, np.inf) else: raise TypeError(f'cannot determine next label for type {type(label)!r}') def _get_prev_label(label): dtype = getattr(label, 'dtype', type(label)) if isinstance(label, (Timestamp, Timedelta)): dtype = 'datetime64[ns]' dtype = pandas_dtype(dtype) if lib.is_np_dtype(dtype, 'mM') or isinstance(dtype, DatetimeTZDtype): return label - np.timedelta64(1, 'ns') elif is_integer_dtype(dtype): return label - 1 elif is_float_dtype(dtype): return np.nextafter(label, -np.inf) else: raise TypeError(f'cannot determine next label for type {type(label)!r}') def _new_IntervalIndex(cls, d): return cls.from_arrays(**d) @Appender(_interval_shared_docs['class'] % {'klass': 'IntervalIndex', 'summary': 'Immutable index of intervals that are closed on the same side.', 'name': _index_doc_kwargs['name'], 'extra_attributes': 'is_overlapping\nvalues\n', 'extra_methods': '', 'examples': textwrap.dedent(" Examples\n --------\n A new ``IntervalIndex`` is typically constructed using\n :func:`interval_range`:\n\n >>> pd.interval_range(start=0, end=5)\n IntervalIndex([(0, 1], (1, 2], (2, 3], (3, 4], (4, 5]],\n dtype='interval[int64, right]')\n\n It may also be constructed using one of the constructor\n methods: :meth:`IntervalIndex.from_arrays`,\n :meth:`IntervalIndex.from_breaks`, and :meth:`IntervalIndex.from_tuples`.\n\n See further examples in the doc strings of ``interval_range`` and the\n mentioned constructor methods.\n ")}) @inherit_names(['set_closed', 'to_tuples'], IntervalArray, wrap=True) @inherit_names(['__array__', 'overlaps', 'contains', 'closed_left', 'closed_right', 'open_left', 'open_right', 'is_empty'], IntervalArray) @inherit_names(['is_non_overlapping_monotonic', 'closed'], IntervalArray, cache=True) class IntervalIndex(ExtensionIndex): _typ = 'intervalindex' closed: IntervalClosedType is_non_overlapping_monotonic: bool closed_left: bool closed_right: bool open_left: bool open_right: bool _data: IntervalArray _values: IntervalArray _can_hold_strings = False _data_cls = IntervalArray def __new__(cls, data, closed: IntervalClosedType | None=None, dtype: Dtype | None=None, copy: bool=False, name: Hashable | None=None, verify_integrity: bool=True) -> Self: name = maybe_extract_name(name, data, cls) with rewrite_exception('IntervalArray', cls.__name__): array = IntervalArray(data, closed=closed, copy=copy, dtype=dtype, verify_integrity=verify_integrity) return cls._simple_new(array, name) @classmethod @Appender(_interval_shared_docs['from_breaks'] % {'klass': 'IntervalIndex', 'name': textwrap.dedent('\n name : str, optional\n Name of the resulting IntervalIndex.'), 'examples': textwrap.dedent(" Examples\n --------\n >>> pd.IntervalIndex.from_breaks([0, 1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n dtype='interval[int64, right]')\n ")}) def from_breaks(cls, breaks, closed: IntervalClosedType | None='right', name: Hashable | None=None, copy: bool=False, dtype: Dtype | None=None) -> IntervalIndex: with rewrite_exception('IntervalArray', cls.__name__): array = IntervalArray.from_breaks(breaks, closed=closed, copy=copy, dtype=dtype) return cls._simple_new(array, name=name) @classmethod @Appender(_interval_shared_docs['from_arrays'] % {'klass': 'IntervalIndex', 'name': textwrap.dedent('\n name : str, optional\n Name of the resulting IntervalIndex.'), 'examples': textwrap.dedent(" Examples\n --------\n >>> pd.IntervalIndex.from_arrays([0, 1, 2], [1, 2, 3])\n IntervalIndex([(0, 1], (1, 2], (2, 3]],\n dtype='interval[int64, right]')\n ")}) def from_arrays(cls, left, right, closed: IntervalClosedType='right', name: Hashable | None=None, copy: bool=False, dtype: Dtype | None=None) -> IntervalIndex: with rewrite_exception('IntervalArray', cls.__name__): array = IntervalArray.from_arrays(left, right, closed, copy=copy, dtype=dtype) return cls._simple_new(array, name=name) @classmethod @Appender(_interval_shared_docs['from_tuples'] % {'klass': 'IntervalIndex', 'name': textwrap.dedent('\n name : str, optional\n Name of the resulting IntervalIndex.'), 'examples': textwrap.dedent(" Examples\n --------\n >>> pd.IntervalIndex.from_tuples([(0, 1), (1, 2)])\n IntervalIndex([(0, 1], (1, 2]],\n dtype='interval[int64, right]')\n ")}) def from_tuples(cls, data, closed: IntervalClosedType='right', name: Hashable | None=None, copy: bool=False, dtype: Dtype | None=None) -> IntervalIndex: with rewrite_exception('IntervalArray', cls.__name__): arr = IntervalArray.from_tuples(data, closed=closed, copy=copy, dtype=dtype) return cls._simple_new(arr, name=name) @cache_readonly def _engine(self) -> IntervalTree: left = self._maybe_convert_i8(self.left) left = maybe_upcast_numeric_to_64bit(left) right = self._maybe_convert_i8(self.right) right = maybe_upcast_numeric_to_64bit(right) return IntervalTree(left, right, closed=self.closed) def __contains__(self, key: Any) -> bool: hash(key) if not isinstance(key, Interval): if is_valid_na_for_dtype(key, self.dtype): return self.hasnans return False try: self.get_loc(key) return True except KeyError: return False def _getitem_slice(self, slobj: slice) -> IntervalIndex: res = self._data[slobj] return type(self)._simple_new(res, name=self._name) @cache_readonly def _multiindex(self) -> MultiIndex: return MultiIndex.from_arrays([self.left, self.right], names=['left', 'right']) def __reduce__(self): d = {'left': self.left, 'right': self.right, 'closed': self.closed, 'name': self.name} return (_new_IntervalIndex, (type(self), d), None) @property def inferred_type(self) -> str: return 'interval' @Appender(Index.memory_usage.__doc__) def memory_usage(self, deep: bool=False) -> int: return self.left.memory_usage(deep=deep) + self.right.memory_usage(deep=deep) @cache_readonly def is_monotonic_decreasing(self) -> bool: return self[::-1].is_monotonic_increasing @cache_readonly def is_unique(self) -> bool: left = self.left right = self.right if self.isna().sum() > 1: return False if left.is_unique or right.is_unique: return True seen_pairs = set() check_idx = np.where(left.duplicated(keep=False))[0] for idx in check_idx: pair = (left[idx], right[idx]) if pair in seen_pairs: return False seen_pairs.add(pair) return True @property def is_overlapping(self) -> bool: return self._engine.is_overlapping def _needs_i8_conversion(self, key) -> bool: key_dtype = getattr(key, 'dtype', None) if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval): return self._needs_i8_conversion(key.left) i8_types = (Timestamp, Timedelta, DatetimeIndex, TimedeltaIndex) return isinstance(key, i8_types) def _maybe_convert_i8(self, key): if is_list_like(key): key = ensure_index(key) key = maybe_upcast_numeric_to_64bit(key) if not self._needs_i8_conversion(key): return key scalar = is_scalar(key) key_dtype = getattr(key, 'dtype', None) if isinstance(key_dtype, IntervalDtype) or isinstance(key, Interval): left = self._maybe_convert_i8(key.left) right = self._maybe_convert_i8(key.right) constructor = Interval if scalar else IntervalIndex.from_arrays return constructor(left, right, closed=self.closed) if scalar: (key_dtype, key_i8) = infer_dtype_from_scalar(key) if isinstance(key, Period): key_i8 = key.ordinal elif isinstance(key_i8, Timestamp): key_i8 = key_i8._value elif isinstance(key_i8, (np.datetime64, np.timedelta64)): key_i8 = key_i8.view('i8') else: (key_dtype, key_i8) = (key.dtype, Index(key.asi8)) if key.hasnans: key_i8 = key_i8.where(~key._isnan) subtype = self.dtype.subtype if subtype != key_dtype: raise ValueError(f'Cannot index an IntervalIndex of subtype {subtype} with values of dtype {key_dtype}') return key_i8 def _searchsorted_monotonic(self, label, side: Literal['left', 'right']='left'): if not self.is_non_overlapping_monotonic: raise KeyError('can only get slices from an IntervalIndex if bounds are non-overlapping and all monotonic increasing or decreasing') if isinstance(label, (IntervalMixin, IntervalIndex)): raise NotImplementedError('Interval objects are not currently supported') if side == 'left' and self.left.is_monotonic_increasing or (side == 'right' and (not self.left.is_monotonic_increasing)): sub_idx = self.right if self.open_right: label = _get_next_label(label) else: sub_idx = self.left if self.open_left: label = _get_prev_label(label) return sub_idx._searchsorted_monotonic(label, side) def get_loc(self, key) -> int | slice | np.ndarray: self._check_indexing_error(key) if isinstance(key, Interval): if self.closed != key.closed: raise KeyError(key) mask = (self.left == key.left) & (self.right == key.right) elif is_valid_na_for_dtype(key, self.dtype): mask = self.isna() else: op_left = le if self.closed_left else lt op_right = le if self.closed_right else lt try: mask = op_left(self.left, key) & op_right(key, self.right) except TypeError as err: raise KeyError(key) from err matches = mask.sum() if matches == 0: raise KeyError(key) if matches == 1: return mask.argmax() res = lib.maybe_booleans_to_slice(mask.view('u1')) if isinstance(res, slice) and res.stop is None: res = slice(res.start, len(self), res.step) return res def _get_indexer(self, target: Index, method: str | None=None, limit: int | None=None, tolerance: Any | None=None) -> npt.NDArray[np.intp]: if isinstance(target, IntervalIndex): indexer = self._get_indexer_unique_sides(target) elif not is_object_dtype(target.dtype): target = self._maybe_convert_i8(target) indexer = self._engine.get_indexer(target.values) else: return self._get_indexer_pointwise(target)[0] return ensure_platform_int(indexer) @Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs) def get_indexer_non_unique(self, target: Index) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: target = ensure_index(target) if not self._should_compare(target) and (not self._should_partial_index(target)): return self._get_indexer_non_comparable(target, None, unique=False) elif isinstance(target, IntervalIndex): if self.left.is_unique and self.right.is_unique: indexer = self._get_indexer_unique_sides(target) missing = (indexer == -1).nonzero()[0] else: return self._get_indexer_pointwise(target) elif is_object_dtype(target.dtype) or not self._should_partial_index(target): return self._get_indexer_pointwise(target) else: target = self._maybe_convert_i8(target) (indexer, missing) = self._engine.get_indexer_non_unique(target.values) return (ensure_platform_int(indexer), ensure_platform_int(missing)) def _get_indexer_unique_sides(self, target: IntervalIndex) -> npt.NDArray[np.intp]: left_indexer = self.left.get_indexer(target.left) right_indexer = self.right.get_indexer(target.right) indexer = np.where(left_indexer == right_indexer, left_indexer, -1) return indexer def _get_indexer_pointwise(self, target: Index) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: (indexer, missing) = ([], []) for (i, key) in enumerate(target): try: locs = self.get_loc(key) if isinstance(locs, slice): locs = np.arange(locs.start, locs.stop, locs.step, dtype='intp') elif lib.is_integer(locs): locs = np.array(locs, ndmin=1) else: locs = np.where(locs)[0] except KeyError: missing.append(i) locs = np.array([-1]) except InvalidIndexError: missing.append(i) locs = np.array([-1]) indexer.append(locs) indexer = np.concatenate(indexer) return (ensure_platform_int(indexer), ensure_platform_int(missing)) @cache_readonly def _index_as_unique(self) -> bool: return not self.is_overlapping and self._engine._na_count < 2 _requires_unique_msg = 'cannot handle overlapping indices; use IntervalIndex.get_indexer_non_unique' def _convert_slice_indexer(self, key: slice, kind: Literal['loc', 'getitem']): if not (key.step is None or key.step == 1): msg = 'label-based slicing with step!=1 is not supported for IntervalIndex' if kind == 'loc': raise ValueError(msg) if kind == 'getitem': if not is_valid_positional_slice(key): raise ValueError(msg) return super()._convert_slice_indexer(key, kind) @cache_readonly def _should_fallback_to_positional(self) -> bool: return self.dtype.subtype.kind in 'mM' def _maybe_cast_slice_bound(self, label, side: str): return getattr(self, side)._maybe_cast_slice_bound(label, side) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: if not isinstance(dtype, IntervalDtype): return False common_subtype = find_common_type([self.dtype, dtype]) return not is_object_dtype(common_subtype) @cache_readonly def left(self) -> Index: return Index(self._data.left, copy=False) @cache_readonly def right(self) -> Index: return Index(self._data.right, copy=False) @cache_readonly def mid(self) -> Index: return Index(self._data.mid, copy=False) @property def length(self) -> Index: return Index(self._data.length, copy=False) def _intersection(self, other, sort): if self.left.is_unique and self.right.is_unique: taken = self._intersection_unique(other) elif other.left.is_unique and other.right.is_unique and (self.isna().sum() <= 1): taken = other._intersection_unique(self) else: taken = self._intersection_non_unique(other) if sort is None: taken = taken.sort_values() return taken def _intersection_unique(self, other: IntervalIndex) -> IntervalIndex: lindexer = self.left.get_indexer(other.left) rindexer = self.right.get_indexer(other.right) match = (lindexer == rindexer) & (lindexer != -1) indexer = lindexer.take(match.nonzero()[0]) indexer = unique(indexer) return self.take(indexer) def _intersection_non_unique(self, other: IntervalIndex) -> IntervalIndex: mask = np.zeros(len(self), dtype=bool) if self.hasnans and other.hasnans: first_nan_loc = np.arange(len(self))[self.isna()][0] mask[first_nan_loc] = True other_tups = set(zip(other.left, other.right)) for (i, tup) in enumerate(zip(self.left, self.right)): if tup in other_tups: mask[i] = True return self[mask] def _get_engine_target(self) -> np.ndarray: raise NotImplementedError('IntervalIndex does not use libjoin fastpaths or pass values to IndexEngine objects') def _from_join_target(self, result): raise NotImplementedError('IntervalIndex does not use libjoin fastpaths') def _is_valid_endpoint(endpoint) -> bool: return any([is_number(endpoint), isinstance(endpoint, Timestamp), isinstance(endpoint, Timedelta), endpoint is None]) def _is_type_compatible(a, b) -> bool: is_ts_compat = lambda x: isinstance(x, (Timestamp, BaseOffset)) is_td_compat = lambda x: isinstance(x, (Timedelta, BaseOffset)) return is_number(a) and is_number(b) or (is_ts_compat(a) and is_ts_compat(b)) or (is_td_compat(a) and is_td_compat(b)) or com.any_none(a, b) def interval_range(start=None, end=None, periods=None, freq=None, name: Hashable | None=None, closed: IntervalClosedType='right') -> IntervalIndex: start = maybe_box_datetimelike(start) end = maybe_box_datetimelike(end) endpoint = start if start is not None else end if freq is None and com.any_none(periods, start, end): freq = 1 if is_number(endpoint) else 'D' if com.count_not_none(start, end, periods, freq) != 3: raise ValueError('Of the four parameters: start, end, periods, and freq, exactly three must be specified') if not _is_valid_endpoint(start): raise ValueError(f'start must be numeric or datetime-like, got {start}') if not _is_valid_endpoint(end): raise ValueError(f'end must be numeric or datetime-like, got {end}') periods = validate_periods(periods) if freq is not None and (not is_number(freq)): try: freq = to_offset(freq) except ValueError as err: raise ValueError(f'freq must be numeric or convertible to DateOffset, got {freq}') from err if not all([_is_type_compatible(start, end), _is_type_compatible(start, freq), _is_type_compatible(end, freq)]): raise TypeError('start, end, freq need to be type compatible') if periods is not None: periods += 1 breaks: np.ndarray | TimedeltaIndex | DatetimeIndex if is_number(endpoint): dtype: np.dtype = np.dtype('int64') if com.all_not_none(start, end, freq): if isinstance(start, (float, np.float16)) or isinstance(end, (float, np.float16)) or isinstance(freq, (float, np.float16)): dtype = np.dtype('float64') elif isinstance(start, (np.integer, np.floating)) and isinstance(end, (np.integer, np.floating)) and (start.dtype == end.dtype): dtype = start.dtype breaks = np.arange(start, end + freq * 0.1, freq) breaks = maybe_downcast_numeric(breaks, dtype) else: if periods is None: periods = int((end - start) // freq) + 1 elif start is None: start = end - (periods - 1) * freq elif end is None: end = start + (periods - 1) * freq breaks = np.linspace(start, end, periods) if all((is_integer(x) for x in com.not_none(start, end, freq))): breaks = maybe_downcast_numeric(breaks, dtype) elif isinstance(endpoint, Timestamp): breaks = date_range(start=start, end=end, periods=periods, freq=freq) else: breaks = timedelta_range(start=start, end=end, periods=periods, freq=freq) return IntervalIndex.from_breaks(breaks, name=name, closed=closed, dtype=IntervalDtype(subtype=breaks.dtype, closed=closed)) # File: pandas-main/pandas/core/indexes/multi.py from __future__ import annotations from collections.abc import Callable, Collection, Generator, Hashable, Iterable, Sequence from functools import wraps from sys import getsizeof from typing import TYPE_CHECKING, Any, Literal, cast import warnings import numpy as np from pandas._config import get_option from pandas._libs import algos as libalgos, index as libindex, lib from pandas._libs.hashtable import duplicated from pandas._typing import AnyAll, AnyArrayLike, Axis, DropKeep, DtypeObj, F, IgnoreRaise, IndexLabel, IndexT, Scalar, Self, Shape, npt from pandas.compat.numpy import function as nv from pandas.errors import InvalidIndexError, PerformanceWarning, UnsortedIndexError from pandas.util._decorators import Appender, cache_readonly, doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import coerce_indexer_dtype from pandas.core.dtypes.common import ensure_int64, ensure_platform_int, is_hashable, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.inference import is_array_like from pandas.core.dtypes.missing import array_equivalent, isna import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import validate_putmask from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.categorical import factorize_from_iterables, recode_for_categories import pandas.core.common as com from pandas.core.construction import sanitize_array import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, _index_shared_docs, ensure_index, get_unanimous_names from pandas.core.indexes.frozen import FrozenList from pandas.core.ops.invalid import make_invalid_op from pandas.core.sorting import get_group_index, lexsort_indexer from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas import CategoricalIndex, DataFrame, Series _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update({'klass': 'MultiIndex', 'target_klass': 'MultiIndex or list of tuples'}) class MultiIndexUInt64Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine): _base = libindex.UInt64Engine _codes_dtype = 'uint64' class MultiIndexUInt32Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt32Engine): _base = libindex.UInt32Engine _codes_dtype = 'uint32' class MultiIndexUInt16Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt16Engine): _base = libindex.UInt16Engine _codes_dtype = 'uint16' class MultiIndexUInt8Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt8Engine): _base = libindex.UInt8Engine _codes_dtype = 'uint8' class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine): _base = libindex.ObjectEngine _codes_dtype = 'object' def names_compat(meth: F) -> F: @wraps(meth) def new_meth(self_or_cls, *args, **kwargs): if 'name' in kwargs and 'names' in kwargs: raise TypeError('Can only provide one of `names` and `name`') if 'name' in kwargs: kwargs['names'] = kwargs.pop('name') return meth(self_or_cls, *args, **kwargs) return cast(F, new_meth) class MultiIndex(Index): _hidden_attrs = Index._hidden_attrs | frozenset() _typ = 'multiindex' _names: list[Hashable | None] = [] _levels = FrozenList() _codes = FrozenList() _comparables = ['names'] sortorder: int | None def __new__(cls, levels=None, codes=None, sortorder=None, names=None, dtype=None, copy: bool=False, name=None, verify_integrity: bool=True) -> Self: if name is not None: names = name if levels is None or codes is None: raise TypeError('Must pass both levels and codes') if len(levels) != len(codes): raise ValueError('Length of levels and codes must be the same.') if len(levels) == 0: raise ValueError('Must pass non-zero number of levels/codes') result = object.__new__(cls) result._cache = {} result._set_levels(levels, copy=copy, validate=False) result._set_codes(codes, copy=copy, validate=False) result._names = [None] * len(levels) if names is not None: result._set_names(names) if sortorder is not None: result.sortorder = int(sortorder) else: result.sortorder = sortorder if verify_integrity: new_codes = result._verify_integrity() result._codes = new_codes result._reset_identity() result._references = None return result def _validate_codes(self, level: Index, code: np.ndarray) -> np.ndarray: null_mask = isna(level) if np.any(null_mask): code = np.where(null_mask[code], -1, code) return code def _verify_integrity(self, codes: list | None=None, levels: list | None=None, levels_to_verify: list[int] | range | None=None) -> FrozenList: codes = codes or self.codes levels = levels or self.levels if levels_to_verify is None: levels_to_verify = range(len(levels)) if len(levels) != len(codes): raise ValueError('Length of levels and codes must match. NOTE: this index is in an inconsistent state.') codes_length = len(codes[0]) for i in levels_to_verify: level = levels[i] level_codes = codes[i] if len(level_codes) != codes_length: raise ValueError(f'Unequal code lengths: {[len(code_) for code_ in codes]}') if len(level_codes) and level_codes.max() >= len(level): raise ValueError(f'On level {i}, code max ({level_codes.max()}) >= length of level ({len(level)}). NOTE: this index is in an inconsistent state') if len(level_codes) and level_codes.min() < -1: raise ValueError(f'On level {i}, code value ({level_codes.min()}) < -1') if not level.is_unique: raise ValueError(f'Level values must be unique: {list(level)} on level {i}') if self.sortorder is not None: if self.sortorder > _lexsort_depth(self.codes, self.nlevels): raise ValueError(f'Value for sortorder must be inferior or equal to actual lexsort_depth: sortorder {self.sortorder} with lexsort_depth {_lexsort_depth(self.codes, self.nlevels)}') result_codes = [] for i in range(len(levels)): if i in levels_to_verify: result_codes.append(self._validate_codes(levels[i], codes[i])) else: result_codes.append(codes[i]) new_codes = FrozenList(result_codes) return new_codes @classmethod def from_arrays(cls, arrays, sortorder: int | None=None, names: Sequence[Hashable] | Hashable | lib.NoDefault=lib.no_default) -> MultiIndex: error_msg = 'Input must be a list / sequence of array-likes.' if not is_list_like(arrays): raise TypeError(error_msg) if is_iterator(arrays): arrays = list(arrays) for array in arrays: if not is_list_like(array): raise TypeError(error_msg) for i in range(1, len(arrays)): if len(arrays[i]) != len(arrays[i - 1]): raise ValueError('all arrays must be same length') (codes, levels) = factorize_from_iterables(arrays) if names is lib.no_default: names = [getattr(arr, 'name', None) for arr in arrays] return cls(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False) @classmethod @names_compat def from_tuples(cls, tuples: Iterable[tuple[Hashable, ...]], sortorder: int | None=None, names: Sequence[Hashable] | Hashable | None=None) -> MultiIndex: if not is_list_like(tuples): raise TypeError('Input must be a list / sequence of tuple-likes.') if is_iterator(tuples): tuples = list(tuples) tuples = cast(Collection[tuple[Hashable, ...]], tuples) if len(tuples) and all((isinstance(e, tuple) and (not e) for e in tuples)): codes = [np.zeros(len(tuples))] levels = [Index(com.asarray_tuplesafe(tuples, dtype=np.dtype('object')))] return cls(levels=levels, codes=codes, sortorder=sortorder, names=names, verify_integrity=False) arrays: list[Sequence[Hashable]] if len(tuples) == 0: if names is None: raise TypeError('Cannot infer number of levels from empty list') arrays = [[]] * len(names) elif isinstance(tuples, (np.ndarray, Index)): if isinstance(tuples, Index): tuples = np.asarray(tuples._values) arrays = list(lib.tuples_to_object_array(tuples).T) elif isinstance(tuples, list): arrays = list(lib.to_object_array_tuples(tuples).T) else: arrs = zip(*tuples) arrays = cast(list[Sequence[Hashable]], arrs) return cls.from_arrays(arrays, sortorder=sortorder, names=names) @classmethod def from_product(cls, iterables: Sequence[Iterable[Hashable]], sortorder: int | None=None, names: Sequence[Hashable] | Hashable | lib.NoDefault=lib.no_default) -> MultiIndex: if not is_list_like(iterables): raise TypeError('Input must be a list / sequence of iterables.') if is_iterator(iterables): iterables = list(iterables) (codes, levels) = factorize_from_iterables(iterables) if names is lib.no_default: names = [getattr(it, 'name', None) for it in iterables] codes = cartesian_product(codes) return cls(levels, codes, sortorder=sortorder, names=names) @classmethod def from_frame(cls, df: DataFrame, sortorder: int | None=None, names: Sequence[Hashable] | Hashable | None=None) -> MultiIndex: if not isinstance(df, ABCDataFrame): raise TypeError('Input must be a DataFrame') (column_names, columns) = zip(*df.items()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder=sortorder, names=names) @cache_readonly def _values(self) -> np.ndarray: values = [] for i in range(self.nlevels): index = self.levels[i] codes = self.codes[i] vals = index if isinstance(vals.dtype, CategoricalDtype): vals = cast('CategoricalIndex', vals) vals = vals._data._internal_get_values() if isinstance(vals.dtype, ExtensionDtype) or lib.is_np_dtype(vals.dtype, 'mM'): vals = vals.astype(object) array_vals = np.asarray(vals) array_vals = algos.take_nd(array_vals, codes, fill_value=index._na_value) values.append(array_vals) arr = lib.fast_zip(values) return arr @property def values(self) -> np.ndarray: return self._values @property def array(self): raise ValueError("MultiIndex has no single backing array. Use 'MultiIndex.to_numpy()' to get a NumPy array of tuples.") @cache_readonly def dtypes(self) -> Series: from pandas import Series names = com.fill_missing_names(self.names) return Series([level.dtype for level in self.levels], index=Index(names)) def __len__(self) -> int: return len(self.codes[0]) @property def size(self) -> int: return len(self) @cache_readonly def levels(self) -> FrozenList: result = [x._rename(name=name) for (x, name) in zip(self._levels, self._names)] for level in result: level._no_setting_name = True return FrozenList(result) def _set_levels(self, levels, *, level=None, copy: bool=False, validate: bool=True, verify_integrity: bool=False) -> None: if validate: if len(levels) == 0: raise ValueError('Must set non-zero number of levels.') if level is None and len(levels) != self.nlevels: raise ValueError('Length of levels must match number of levels.') if level is not None and len(levels) != len(level): raise ValueError('Length of levels must match length of level.') if level is None: new_levels = FrozenList((ensure_index(lev, copy=copy)._view() for lev in levels)) level_numbers: range | list[int] = range(len(new_levels)) else: level_numbers = [self._get_level_number(lev) for lev in level] new_levels_list = list(self._levels) for (lev_num, lev) in zip(level_numbers, levels): new_levels_list[lev_num] = ensure_index(lev, copy=copy)._view() new_levels = FrozenList(new_levels_list) if verify_integrity: new_codes = self._verify_integrity(levels=new_levels, levels_to_verify=level_numbers) self._codes = new_codes names = self.names self._levels = new_levels if any(names): self._set_names(names) self._reset_cache() def set_levels(self, levels, *, level=None, verify_integrity: bool=True) -> MultiIndex: if isinstance(levels, Index): pass elif is_array_like(levels): levels = Index(levels) elif is_list_like(levels): levels = list(levels) (level, levels) = _require_listlike(level, levels, 'Levels') idx = self._view() idx._reset_identity() idx._set_levels(levels, level=level, validate=True, verify_integrity=verify_integrity) return idx @property def nlevels(self) -> int: return len(self._levels) @property def levshape(self) -> Shape: return tuple((len(x) for x in self.levels)) @property def codes(self) -> FrozenList: return self._codes def _set_codes(self, codes, *, level=None, copy: bool=False, validate: bool=True, verify_integrity: bool=False) -> None: if validate: if level is None and len(codes) != self.nlevels: raise ValueError('Length of codes must match number of levels') if level is not None and len(codes) != len(level): raise ValueError('Length of codes must match length of levels.') level_numbers: list[int] | range if level is None: new_codes = FrozenList((_coerce_indexer_frozen(level_codes, lev, copy=copy).view() for (lev, level_codes) in zip(self._levels, codes))) level_numbers = range(len(new_codes)) else: level_numbers = [self._get_level_number(lev) for lev in level] new_codes_list = list(self._codes) for (lev_num, level_codes) in zip(level_numbers, codes): lev = self.levels[lev_num] new_codes_list[lev_num] = _coerce_indexer_frozen(level_codes, lev, copy=copy) new_codes = FrozenList(new_codes_list) if verify_integrity: new_codes = self._verify_integrity(codes=new_codes, levels_to_verify=level_numbers) self._codes = new_codes self._reset_cache() def set_codes(self, codes, *, level=None, verify_integrity: bool=True) -> MultiIndex: (level, codes) = _require_listlike(level, codes, 'Codes') idx = self._view() idx._reset_identity() idx._set_codes(codes, level=level, verify_integrity=verify_integrity) return idx @cache_readonly def _engine(self): sizes = np.ceil(np.log2([len(level) + libindex.multiindex_nulls_shift for level in self.levels])) lev_bits = np.cumsum(sizes[::-1])[::-1] offsets = np.concatenate([lev_bits[1:], [0]]) offsets = offsets.astype(np.min_scalar_type(int(offsets[0]))) if lev_bits[0] > 64: return MultiIndexPyIntEngine(self.levels, self.codes, offsets) if lev_bits[0] > 32: return MultiIndexUInt64Engine(self.levels, self.codes, offsets) if lev_bits[0] > 16: return MultiIndexUInt32Engine(self.levels, self.codes, offsets) if lev_bits[0] > 8: return MultiIndexUInt16Engine(self.levels, self.codes, offsets) return MultiIndexUInt8Engine(self.levels, self.codes, offsets) @property def _constructor(self) -> Callable[..., MultiIndex]: return type(self).from_tuples @doc(Index._shallow_copy) def _shallow_copy(self, values: np.ndarray, name=lib.no_default) -> MultiIndex: names = name if name is not lib.no_default else self.names return type(self).from_tuples(values, sortorder=None, names=names) def _view(self) -> MultiIndex: result = type(self)(levels=self.levels, codes=self.codes, sortorder=self.sortorder, names=self.names, verify_integrity=False) result._cache = self._cache.copy() result._reset_cache('levels') return result def copy(self, names=None, deep: bool=False, name=None) -> Self: names = self._validate_names(name=name, names=names, deep=deep) keep_id = not deep (levels, codes) = (None, None) if deep: from copy import deepcopy levels = deepcopy(self.levels) codes = deepcopy(self.codes) levels = levels if levels is not None else self.levels codes = codes if codes is not None else self.codes new_index = type(self)(levels=levels, codes=codes, sortorder=self.sortorder, names=names, verify_integrity=False) new_index._cache = self._cache.copy() new_index._reset_cache('levels') if keep_id: new_index._id = self._id return new_index def __array__(self, dtype=None, copy=None) -> np.ndarray: return self.values def view(self, cls=None) -> Self: result = self.copy() result._id = self._id return result @doc(Index.__contains__) def __contains__(self, key: Any) -> bool: hash(key) try: self.get_loc(key) return True except (LookupError, TypeError, ValueError): return False @cache_readonly def dtype(self) -> np.dtype: return np.dtype('O') @cache_readonly def _is_memory_usage_qualified(self) -> bool: def f(level) -> bool: return 'mixed' in level or 'string' in level or 'unicode' in level return any((f(level.inferred_type) for level in self.levels)) @doc(Index.memory_usage) def memory_usage(self, deep: bool=False) -> int: return self._nbytes(deep) @cache_readonly def nbytes(self) -> int: return self._nbytes(False) def _nbytes(self, deep: bool=False) -> int: objsize = 24 level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels)) label_nbytes = sum((i.nbytes for i in self.codes)) names_nbytes = sum((getsizeof(i, objsize) for i in self.names)) result = level_nbytes + label_nbytes + names_nbytes if '_engine' in self._cache: result += self._engine.sizeof(deep=deep) return result def _formatter_func(self, tup): formatter_funcs = (level._formatter_func for level in self.levels) return tuple((func(val) for (func, val) in zip(formatter_funcs, tup))) def _get_values_for_csv(self, *, na_rep: str='nan', **kwargs) -> npt.NDArray[np.object_]: new_levels = [] new_codes = [] for (level, level_codes) in zip(self.levels, self.codes): level_strs = level._get_values_for_csv(na_rep=na_rep, **kwargs) mask = level_codes == -1 if mask.any(): nan_index = len(level_strs) level_strs = level_strs.astype(str) level_strs = np.append(level_strs, na_rep) assert not level_codes.flags.writeable level_codes = level_codes.copy() level_codes[mask] = nan_index new_levels.append(level_strs) new_codes.append(level_codes) if len(new_levels) == 1: return Index(new_levels[0].take(new_codes[0]))._get_values_for_csv() else: mi = MultiIndex(levels=new_levels, codes=new_codes, names=self.names, sortorder=self.sortorder, verify_integrity=False) return mi._values def _format_multi(self, *, include_names: bool, sparsify: bool | None | lib.NoDefault, formatter: Callable | None=None) -> list: if len(self) == 0: return [] stringified_levels = [] for (lev, level_codes) in zip(self.levels, self.codes): na = _get_na_rep(lev.dtype) if len(lev) > 0: taken = formatted = lev.take(level_codes) formatted = taken._format_flat(include_name=False, formatter=formatter) mask = level_codes == -1 if mask.any(): formatted = np.array(formatted, dtype=object) formatted[mask] = na formatted = formatted.tolist() else: formatted = [pprint_thing(na if isna(x) else x, escape_chars=('\t', '\r', '\n')) for x in algos.take_nd(lev._values, level_codes)] stringified_levels.append(formatted) result_levels = [] for (lev, lev_name) in zip(stringified_levels, self.names): level = [] if include_names: level.append(pprint_thing(lev_name, escape_chars=('\t', '\r', '\n')) if lev_name is not None else '') level.extend(np.array(lev, dtype=object)) result_levels.append(level) if sparsify is None: sparsify = get_option('display.multi_sparse') if sparsify: sentinel: Literal[''] | bool | lib.NoDefault = '' assert isinstance(sparsify, bool) or sparsify is lib.no_default if sparsify is lib.no_default: sentinel = sparsify result_levels = sparsify_labels(result_levels, start=int(include_names), sentinel=sentinel) return result_levels def _get_names(self) -> FrozenList: return FrozenList(self._names) def _set_names(self, names, *, level=None) -> None: if names is not None and (not is_list_like(names)): raise ValueError('Names should be list-like for a MultiIndex') names = list(names) if level is not None and len(names) != len(level): raise ValueError('Length of names must match length of level.') if level is None and len(names) != self.nlevels: raise ValueError('Length of names must match number of levels in MultiIndex.') if level is None: level = range(self.nlevels) else: level = (self._get_level_number(lev) for lev in level) for (lev, name) in zip(level, names): if name is not None: if not is_hashable(name): raise TypeError(f'{type(self).__name__}.name must be a hashable type') self._names[lev] = name self._reset_cache('levels') names = property(fset=_set_names, fget=_get_names, doc="\n Names of levels in MultiIndex.\n\n This attribute provides access to the names of the levels in a `MultiIndex`.\n The names are stored as a `FrozenList`, which is an immutable list-like\n container. Each name corresponds to a level in the `MultiIndex`, and can be\n used to identify or manipulate the levels individually.\n\n See Also\n --------\n MultiIndex.set_names : Set Index or MultiIndex name.\n MultiIndex.rename : Rename specific levels in a MultiIndex.\n Index.names : Get names on index.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays(\n ... [[1, 2], [3, 4], [5, 6]], names=['x', 'y', 'z']\n ... )\n >>> mi\n MultiIndex([(1, 3, 5),\n (2, 4, 6)],\n names=['x', 'y', 'z'])\n >>> mi.names\n FrozenList(['x', 'y', 'z'])\n ") @cache_readonly def inferred_type(self) -> str: return 'mixed' def _get_level_number(self, level) -> int: count = self.names.count(level) if count > 1 and (not is_integer(level)): raise ValueError(f'The name {level} occurs multiple times, use a level number') try: level = self.names.index(level) except ValueError as err: if not is_integer(level): raise KeyError(f'Level {level} not found') from err if level < 0: level += self.nlevels if level < 0: orig_level = level - self.nlevels raise IndexError(f'Too many levels: Index has only {self.nlevels} levels, {orig_level} is not a valid level number') from err elif level >= self.nlevels: raise IndexError(f'Too many levels: Index has only {self.nlevels} levels, not {level + 1}') from err return level @cache_readonly def is_monotonic_increasing(self) -> bool: if any((-1 in code for code in self.codes)): return False if all((level.is_monotonic_increasing for level in self.levels)): return libalgos.is_lexsorted([x.astype('int64', copy=False) for x in self.codes]) values = [self._get_level_values(i)._values for i in reversed(range(len(self.levels)))] try: sort_order = np.lexsort(values) return Index(sort_order).is_monotonic_increasing except TypeError: return Index(self._values).is_monotonic_increasing @cache_readonly def is_monotonic_decreasing(self) -> bool: return self[::-1].is_monotonic_increasing @doc(Index.duplicated) def duplicated(self, keep: DropKeep='first') -> npt.NDArray[np.bool_]: shape = tuple((len(lev) for lev in self.levels)) ids = get_group_index(self.codes, shape, sort=False, xnull=False) return duplicated(ids, keep) _duplicated = duplicated def fillna(self, value): raise NotImplementedError('isna is not defined for MultiIndex') @doc(Index.dropna) def dropna(self, how: AnyAll='any') -> MultiIndex: nans = [level_codes == -1 for level_codes in self.codes] if how == 'any': indexer = np.any(nans, axis=0) elif how == 'all': indexer = np.all(nans, axis=0) else: raise ValueError(f'invalid how option: {how}') new_codes = [level_codes[~indexer] for level_codes in self.codes] return self.set_codes(codes=new_codes) def _get_level_values(self, level: int, unique: bool=False) -> Index: lev = self.levels[level] level_codes = self.codes[level] name = self._names[level] if unique: level_codes = algos.unique(level_codes) filled = algos.take_nd(lev._values, level_codes, fill_value=lev._na_value) return lev._shallow_copy(filled, name=name) def get_level_values(self, level) -> Index: level = self._get_level_number(level) values = self._get_level_values(level) return values @doc(Index.unique) def unique(self, level=None): if level is None: return self.drop_duplicates() else: level = self._get_level_number(level) return self._get_level_values(level=level, unique=True) def to_frame(self, index: bool=True, name=lib.no_default, allow_duplicates: bool=False) -> DataFrame: from pandas import DataFrame if name is not lib.no_default: if not is_list_like(name): raise TypeError("'name' must be a list / sequence of column names.") if len(name) != len(self.levels): raise ValueError("'name' should have same length as number of levels on index.") idx_names = name else: idx_names = self._get_level_names() if not allow_duplicates and len(set(idx_names)) != len(idx_names): raise ValueError('Cannot create duplicate column labels if allow_duplicates is False') result = DataFrame({level: self._get_level_values(level) for level in range(len(self.levels))}, copy=False) result.columns = idx_names if index: result.index = self return result def to_flat_index(self) -> Index: return Index(self._values, tupleize_cols=False) def _is_lexsorted(self) -> bool: return self._lexsort_depth == self.nlevels @cache_readonly def _lexsort_depth(self) -> int: if self.sortorder is not None: return self.sortorder return _lexsort_depth(self.codes, self.nlevels) def _sort_levels_monotonic(self, raise_if_incomparable: bool=False) -> MultiIndex: if self._is_lexsorted() and self.is_monotonic_increasing: return self new_levels = [] new_codes = [] for (lev, level_codes) in zip(self.levels, self.codes): if not lev.is_monotonic_increasing: try: indexer = lev.argsort() except TypeError: if raise_if_incomparable: raise else: lev = lev.take(indexer) indexer = ensure_platform_int(indexer) ri = lib.get_reverse_indexer(indexer, len(indexer)) level_codes = algos.take_nd(ri, level_codes, fill_value=-1) new_levels.append(lev) new_codes.append(level_codes) return MultiIndex(new_levels, new_codes, names=self.names, sortorder=self.sortorder, verify_integrity=False) def remove_unused_levels(self) -> MultiIndex: new_levels = [] new_codes = [] changed = False for (lev, level_codes) in zip(self.levels, self.codes): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and uniques[0] == -1) if len(uniques) != len(lev) + has_na: if lev.isna().any() and len(uniques) == len(lev): break changed = True uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques == -1)[0] uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] code_mapping = np.zeros(len(lev) + has_na) code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self.view() if changed: result._reset_identity() result._set_levels(new_levels, validate=False) result._set_codes(new_codes, validate=False) return result def __reduce__(self): d = {'levels': list(self.levels), 'codes': list(self.codes), 'sortorder': self.sortorder, 'names': list(self.names)} return (ibase._new_Index, (type(self), d), None) def __getitem__(self, key): if is_scalar(key): key = com.cast_scalar_indexer(key) retval = [] for (lev, level_codes) in zip(self.levels, self.codes): if level_codes[key] == -1: retval.append(np.nan) else: retval.append(lev[level_codes[key]]) return tuple(retval) else: sortorder = None if com.is_bool_indexer(key): key = np.asarray(key, dtype=bool) sortorder = self.sortorder elif isinstance(key, slice): if key.step is None or key.step > 0: sortorder = self.sortorder elif isinstance(key, Index): key = np.asarray(key) new_codes = [level_codes[key] for level_codes in self.codes] return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, sortorder=sortorder, verify_integrity=False) def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex: sortorder = None if slobj.step is None or slobj.step > 0: sortorder = self.sortorder new_codes = [level_codes[slobj] for level_codes in self.codes] return type(self)(levels=self.levels, codes=new_codes, names=self._names, sortorder=sortorder, verify_integrity=False) @Appender(_index_shared_docs['take'] % _index_doc_kwargs) def take(self: MultiIndex, indices, axis: Axis=0, allow_fill: bool=True, fill_value=None, **kwargs) -> MultiIndex: nv.validate_take((), kwargs) indices = ensure_platform_int(indices) allow_fill = self._maybe_disallow_fill(allow_fill, fill_value, indices) if indices.ndim == 1 and lib.is_range_indexer(indices, len(self)): return self.copy() na_value = -1 taken = [lab.take(indices) for lab in self.codes] if allow_fill: mask = indices == -1 if mask.any(): masked = [] for new_label in taken: label_values = new_label label_values[mask] = na_value masked.append(np.asarray(label_values)) taken = masked return MultiIndex(levels=self.levels, codes=taken, names=self.names, verify_integrity=False) def append(self, other): if not isinstance(other, (list, tuple)): other = [other] if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels for o in other)): codes = [] levels = [] names = [] for i in range(self.nlevels): level_values = self.levels[i] for mi in other: level_values = level_values.union(mi.levels[i]) level_codes = [recode_for_categories(mi.codes[i], mi.levels[i], level_values, copy=False) for mi in [self, *other]] level_name = self.names[i] if any((mi.names[i] != level_name for mi in other)): level_name = None codes.append(np.concatenate(level_codes)) levels.append(level_values) names.append(level_name) return MultiIndex(codes=codes, levels=levels, names=names, verify_integrity=False) to_concat = (self._values,) + tuple((k._values for k in other)) new_tuples = np.concatenate(to_concat) try: return MultiIndex.from_tuples(new_tuples) except (TypeError, IndexError): return Index(new_tuples) def argsort(self, *args, na_position: str='last', **kwargs) -> npt.NDArray[np.intp]: target = self._sort_levels_monotonic(raise_if_incomparable=True) keys = [lev.codes for lev in target._get_codes_for_sorting()] return lexsort_indexer(keys, na_position=na_position, codes_given=True) @Appender(_index_shared_docs['repeat'] % _index_doc_kwargs) def repeat(self, repeats: int, axis=None) -> MultiIndex: nv.validate_repeat((), {'axis': axis}) repeats = ensure_platform_int(repeats) return MultiIndex(levels=self.levels, codes=[level_codes.view(np.ndarray).astype(np.intp, copy=False).repeat(repeats) for level_codes in self.codes], names=self.names, sortorder=self.sortorder, verify_integrity=False) def drop(self, codes, level: Index | np.ndarray | Iterable[Hashable] | None=None, errors: IgnoreRaise='raise') -> MultiIndex: if level is not None: return self._drop_from_level(codes, level, errors) if not isinstance(codes, (np.ndarray, Index)): try: codes = com.index_labels_to_array(codes, dtype=np.dtype('object')) except ValueError: pass inds = [] for level_codes in codes: try: loc = self.get_loc(level_codes) if isinstance(loc, int): inds.append(loc) elif isinstance(loc, slice): step = loc.step if loc.step is not None else 1 inds.extend(range(loc.start, loc.stop, step)) elif com.is_bool_indexer(loc): if get_option('performance_warnings') and self._lexsort_depth == 0: warnings.warn('dropping on a non-lexsorted multi-index without a level parameter may impact performance.', PerformanceWarning, stacklevel=find_stack_level()) loc = loc.nonzero()[0] inds.extend(loc) else: msg = f'unsupported indexer of type {type(loc)}' raise AssertionError(msg) except KeyError: if errors != 'ignore': raise return self.delete(inds) def _drop_from_level(self, codes, level, errors: IgnoreRaise='raise') -> MultiIndex: codes = com.index_labels_to_array(codes) i = self._get_level_number(level) index = self.levels[i] values = index.get_indexer(codes) nan_codes = isna(codes) values[np.equal(nan_codes, False) & (values == -1)] = -2 if index.shape[0] == self.shape[0]: values[np.equal(nan_codes, True)] = -2 not_found = codes[values == -2] if len(not_found) != 0 and errors != 'ignore': raise KeyError(f'labels {not_found} not found in level') mask = ~algos.isin(self.codes[i], values) return self[mask] def swaplevel(self, i=-2, j=-1) -> MultiIndex: new_levels = list(self.levels) new_codes = list(self.codes) new_names = list(self.names) i = self._get_level_number(i) j = self._get_level_number(j) (new_levels[i], new_levels[j]) = (new_levels[j], new_levels[i]) (new_codes[i], new_codes[j]) = (new_codes[j], new_codes[i]) (new_names[i], new_names[j]) = (new_names[j], new_names[i]) return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) def reorder_levels(self, order) -> MultiIndex: order = [self._get_level_number(i) for i in order] result = self._reorder_ilevels(order) return result def _reorder_ilevels(self, order) -> MultiIndex: if len(order) != self.nlevels: raise AssertionError(f'Length of order must be same as number of levels ({self.nlevels}), got {len(order)}') new_levels = [self.levels[i] for i in order] new_codes = [self.codes[i] for i in order] new_names = [self.names[i] for i in order] return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) def _recode_for_new_levels(self, new_levels, copy: bool=True) -> Generator[np.ndarray, None, None]: if len(new_levels) > self.nlevels: raise AssertionError(f'Length of new_levels ({len(new_levels)}) must be <= self.nlevels ({self.nlevels})') for i in range(len(new_levels)): yield recode_for_categories(self.codes[i], self.levels[i], new_levels[i], copy=copy) def _get_codes_for_sorting(self) -> list[Categorical]: def cats(level_codes: np.ndarray) -> np.ndarray: return np.arange(level_codes.max() + 1 if len(level_codes) else 0, dtype=level_codes.dtype) return [Categorical.from_codes(level_codes, cats(level_codes), True, validate=False) for level_codes in self.codes] def sortlevel(self, level: IndexLabel=0, ascending: bool | list[bool]=True, sort_remaining: bool=True, na_position: str='first') -> tuple[MultiIndex, npt.NDArray[np.intp]]: if not is_list_like(level): level = [level] level = [self._get_level_number(lev) for lev in level] sortorder = None codes = [self.codes[lev] for lev in level] if isinstance(ascending, list): if not len(level) == len(ascending): raise ValueError('level must have same length as ascending') elif sort_remaining: codes.extend([self.codes[lev] for lev in range(len(self.levels)) if lev not in level]) else: sortorder = level[0] indexer = lexsort_indexer(codes, orders=ascending, na_position=na_position, codes_given=True) indexer = ensure_platform_int(indexer) new_codes = [level_codes.take(indexer) for level_codes in self.codes] new_index = MultiIndex(codes=new_codes, levels=self.levels, names=self.names, sortorder=sortorder, verify_integrity=False) return (new_index, indexer) def _wrap_reindex_result(self, target, indexer, preserve_names: bool): if not isinstance(target, MultiIndex): if indexer is None: target = self elif (indexer >= 0).all(): target = self.take(indexer) else: try: target = MultiIndex.from_tuples(target) except TypeError: return target target = self._maybe_preserve_names(target, preserve_names) return target def _maybe_preserve_names(self, target: IndexT, preserve_names: bool) -> IndexT: if preserve_names and target.nlevels == self.nlevels and (target.names != self.names): target = target.copy(deep=False) target.names = self.names return target def _check_indexing_error(self, key) -> None: if not is_hashable(key) or is_iterator(key): raise InvalidIndexError(key) @cache_readonly def _should_fallback_to_positional(self) -> bool: return self.levels[0]._should_fallback_to_positional def _get_indexer_strict(self, key, axis_name: str) -> tuple[Index, npt.NDArray[np.intp]]: keyarr = key if not isinstance(keyarr, Index): keyarr = com.asarray_tuplesafe(keyarr) if len(keyarr) and (not isinstance(keyarr[0], tuple)): indexer = self._get_indexer_level_0(keyarr) self._raise_if_missing(key, indexer, axis_name) return (self[indexer], indexer) return super()._get_indexer_strict(key, axis_name) def _raise_if_missing(self, key, indexer, axis_name: str) -> None: keyarr = key if not isinstance(key, Index): keyarr = com.asarray_tuplesafe(key) if len(keyarr) and (not isinstance(keyarr[0], tuple)): mask = indexer == -1 if mask.any(): check = self.levels[0].get_indexer(keyarr) cmask = check == -1 if cmask.any(): raise KeyError(f'{keyarr[cmask]} not in index') raise KeyError(f'{keyarr} not in index') else: return super()._raise_if_missing(key, indexer, axis_name) def _get_indexer_level_0(self, target) -> npt.NDArray[np.intp]: lev = self.levels[0] codes = self._codes[0] cat = Categorical.from_codes(codes=codes, categories=lev, validate=False) ci = Index(cat) return ci.get_indexer_for(target) def get_slice_bound(self, label: Hashable | Sequence[Hashable], side: Literal['left', 'right']) -> int: if not isinstance(label, tuple): label = (label,) return self._partial_tup_index(label, side=side) def slice_locs(self, start=None, end=None, step=None) -> tuple[int, int]: return super().slice_locs(start, end, step) def _partial_tup_index(self, tup: tuple, side: Literal['left', 'right']='left'): if len(tup) > self._lexsort_depth: raise UnsortedIndexError(f'Key length ({len(tup)}) was greater than MultiIndex lexsort depth ({self._lexsort_depth})') n = len(tup) (start, end) = (0, len(self)) zipped = zip(tup, self.levels, self.codes) for (k, (lab, lev, level_codes)) in enumerate(zipped): section = level_codes[start:end] loc: npt.NDArray[np.intp] | np.intp | int if lab not in lev and (not isna(lab)): try: loc = algos.searchsorted(lev, lab, side=side) except TypeError as err: raise TypeError(f'Level type mismatch: {lab}') from err if not is_integer(loc): raise TypeError(f'Level type mismatch: {lab}') if side == 'right' and loc >= 0: loc -= 1 return start + algos.searchsorted(section, loc, side=side) idx = self._get_loc_single_level_index(lev, lab) if isinstance(idx, slice) and k < n - 1: start = idx.start end = idx.stop elif k < n - 1: end = start + algos.searchsorted(section, idx, side='right') start = start + algos.searchsorted(section, idx, side='left') elif isinstance(idx, slice): idx = idx.start return start + algos.searchsorted(section, idx, side=side) else: return start + algos.searchsorted(section, idx, side=side) def _get_loc_single_level_index(self, level_index: Index, key: Hashable) -> int: if is_scalar(key) and isna(key): return -1 else: return level_index.get_loc(key) def get_loc(self, key): self._check_indexing_error(key) def _maybe_to_slice(loc): if not isinstance(loc, np.ndarray) or loc.dtype != np.intp: return loc loc = lib.maybe_indices_to_slice(loc, len(self)) if isinstance(loc, slice): return loc mask = np.empty(len(self), dtype='bool') mask.fill(False) mask[loc] = True return mask if not isinstance(key, tuple): loc = self._get_level_indexer(key, level=0) return _maybe_to_slice(loc) keylen = len(key) if self.nlevels < keylen: raise KeyError(f'Key length ({keylen}) exceeds index depth ({self.nlevels})') if keylen == self.nlevels and self.is_unique: try: return self._engine.get_loc(key) except KeyError as err: raise KeyError(key) from err except TypeError: (loc, _) = self.get_loc_level(key, range(self.nlevels)) return loc i = self._lexsort_depth (lead_key, follow_key) = (key[:i], key[i:]) if not lead_key: start = 0 stop = len(self) else: try: (start, stop) = self.slice_locs(lead_key, lead_key) except TypeError as err: raise KeyError(key) from err if start == stop: raise KeyError(key) if not follow_key: return slice(start, stop) if get_option('performance_warnings'): warnings.warn('indexing past lexsort depth may impact performance.', PerformanceWarning, stacklevel=find_stack_level()) loc = np.arange(start, stop, dtype=np.intp) for (i, k) in enumerate(follow_key, len(lead_key)): mask = self.codes[i][loc] == self._get_loc_single_level_index(self.levels[i], k) if not mask.all(): loc = loc[mask] if not len(loc): raise KeyError(key) return _maybe_to_slice(loc) if len(loc) != stop - start else slice(start, stop) def get_loc_level(self, key, level: IndexLabel=0, drop_level: bool=True): if not isinstance(level, (range, list, tuple)): level = self._get_level_number(level) else: level = [self._get_level_number(lev) for lev in level] (loc, mi) = self._get_loc_level(key, level=level) if not drop_level: if lib.is_integer(loc): mi = self[loc:loc + 1] else: mi = self[loc] return (loc, mi) def _get_loc_level(self, key, level: int | list[int]=0): def maybe_mi_droplevels(indexer, levels): new_index = self[indexer] for i in sorted(levels, reverse=True): new_index = new_index._drop_level_numbers([i]) return new_index if isinstance(level, (tuple, list)): if len(key) != len(level): raise AssertionError('Key for location must have same length as number of levels') result = None for (lev, k) in zip(level, key): (loc, new_index) = self._get_loc_level(k, level=lev) if isinstance(loc, slice): mask = np.zeros(len(self), dtype=bool) mask[loc] = True loc = mask result = loc if result is None else result & loc try: mi = maybe_mi_droplevels(result, level) except ValueError: mi = self[result] return (result, mi) if isinstance(key, list): key = tuple(key) if isinstance(key, tuple) and level == 0: try: if key in self.levels[0]: indexer = self._get_level_indexer(key, level=level) new_index = maybe_mi_droplevels(indexer, [0]) return (indexer, new_index) except (TypeError, InvalidIndexError): pass if not any((isinstance(k, slice) for k in key)): if len(key) == self.nlevels and self.is_unique: try: return (self._engine.get_loc(key), None) except KeyError as err: raise KeyError(key) from err except TypeError: pass indexer = self.get_loc(key) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] if len(ilevels) == self.nlevels: if is_integer(indexer): return (indexer, None) ilevels = [i for i in range(len(key)) if (not isinstance(key[i], str) or not self.levels[i]._supports_partial_string_indexing) and key[i] != slice(None, None)] if len(ilevels) == self.nlevels: ilevels = [] return (indexer, maybe_mi_droplevels(indexer, ilevels)) else: indexer = None for (i, k) in enumerate(key): if not isinstance(k, slice): loc_level = self._get_level_indexer(k, level=i) if isinstance(loc_level, slice): if com.is_null_slice(loc_level) or com.is_full_slice(loc_level, len(self)): continue k_index = np.zeros(len(self), dtype=bool) k_index[loc_level] = True else: k_index = loc_level elif com.is_null_slice(k): continue else: raise TypeError(f'Expected label or tuple of labels, got {key}') if indexer is None: indexer = k_index else: indexer &= k_index if indexer is None: indexer = slice(None, None) ilevels = [i for i in range(len(key)) if key[i] != slice(None, None)] return (indexer, maybe_mi_droplevels(indexer, ilevels)) else: indexer = self._get_level_indexer(key, level=level) if isinstance(key, str) and self.levels[level]._supports_partial_string_indexing: check = self.levels[level].get_loc(key) if not is_integer(check): return (indexer, self[indexer]) try: result_index = maybe_mi_droplevels(indexer, [level]) except ValueError: result_index = self[indexer] return (indexer, result_index) def _get_level_indexer(self, key, level: int=0, indexer: npt.NDArray[np.bool_] | None=None): level_index = self.levels[level] level_codes = self.codes[level] def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes): if indexer is not None: codes = codes[indexer] if step is None or step == 1: new_indexer = (codes >= start) & (codes < stop) else: r = np.arange(start, stop, step, dtype=codes.dtype) new_indexer = algos.isin(codes, r) if indexer is None: return new_indexer indexer = indexer.copy() indexer[indexer] = new_indexer return indexer if isinstance(key, slice): step = key.step is_negative_step = step is not None and step < 0 try: if key.start is not None: start = level_index.get_loc(key.start) elif is_negative_step: start = len(level_index) - 1 else: start = 0 if key.stop is not None: stop = level_index.get_loc(key.stop) elif is_negative_step: stop = 0 elif isinstance(start, slice): stop = len(level_index) else: stop = len(level_index) - 1 except KeyError: start = stop = level_index.slice_indexer(key.start, key.stop, key.step) step = start.step if isinstance(start, slice) or isinstance(stop, slice): start = getattr(start, 'start', start) stop = getattr(stop, 'stop', stop) return convert_indexer(start, stop, step) elif level > 0 or self._lexsort_depth == 0 or step is not None: stop = stop - 1 if is_negative_step else stop + 1 return convert_indexer(start, stop, step) else: i = algos.searchsorted(level_codes, start, side='left') j = algos.searchsorted(level_codes, stop, side='right') return slice(i, j, step) else: idx = self._get_loc_single_level_index(level_index, key) if level > 0 or self._lexsort_depth == 0: if isinstance(idx, slice): locs = (level_codes >= idx.start) & (level_codes < idx.stop) return locs locs = np.asarray(level_codes == idx, dtype=bool) if not locs.any(): raise KeyError(key) return locs if isinstance(idx, slice): start = algos.searchsorted(level_codes, idx.start, side='left') end = algos.searchsorted(level_codes, idx.stop, side='left') else: start = algos.searchsorted(level_codes, idx, side='left') end = algos.searchsorted(level_codes, idx, side='right') if start == end: raise KeyError(key) return slice(start, end) def get_locs(self, seq) -> npt.NDArray[np.intp]: true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s] if true_slices and true_slices[-1] >= self._lexsort_depth: raise UnsortedIndexError(f'MultiIndex slicing requires the index to be lexsorted: slicing on levels {true_slices}, lexsort depth {self._lexsort_depth}') if any((x is Ellipsis for x in seq)): raise NotImplementedError('MultiIndex does not support indexing with Ellipsis') n = len(self) def _to_bool_indexer(indexer) -> npt.NDArray[np.bool_]: if isinstance(indexer, slice): new_indexer = np.zeros(n, dtype=np.bool_) new_indexer[indexer] = True return new_indexer return indexer indexer: npt.NDArray[np.bool_] | None = None for (i, k) in enumerate(seq): lvl_indexer: npt.NDArray[np.bool_] | slice | None = None if com.is_bool_indexer(k): if len(k) != n: raise ValueError('cannot index with a boolean indexer that is not the same length as the index') if isinstance(k, (ABCSeries, Index)): k = k._values lvl_indexer = np.asarray(k) if indexer is None: lvl_indexer = lvl_indexer.copy() elif is_list_like(k): try: lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer) except (InvalidIndexError, TypeError, KeyError) as err: for x in k: if not is_hashable(x): raise err item_indexer = self._get_level_indexer(x, level=i, indexer=indexer) if lvl_indexer is None: lvl_indexer = _to_bool_indexer(item_indexer) elif isinstance(item_indexer, slice): lvl_indexer[item_indexer] = True else: lvl_indexer |= item_indexer if lvl_indexer is None: return np.array([], dtype=np.intp) elif com.is_null_slice(k): if indexer is None and i == len(seq) - 1: return np.arange(n, dtype=np.intp) continue else: lvl_indexer = self._get_level_indexer(k, level=i, indexer=indexer) lvl_indexer = _to_bool_indexer(lvl_indexer) if indexer is None: indexer = lvl_indexer else: indexer &= lvl_indexer if not np.any(indexer) and np.any(lvl_indexer): raise KeyError(seq) if indexer is None: return np.array([], dtype=np.intp) pos_indexer = indexer.nonzero()[0] return self._reorder_indexer(seq, pos_indexer) def _reorder_indexer(self, seq: tuple[Scalar | Iterable | AnyArrayLike, ...], indexer: npt.NDArray[np.intp]) -> npt.NDArray[np.intp]: need_sort = False for (i, k) in enumerate(seq): if com.is_null_slice(k) or com.is_bool_indexer(k) or is_scalar(k): pass elif is_list_like(k): if len(k) <= 1: pass elif self._is_lexsorted(): k_codes = self.levels[i].get_indexer(k) k_codes = k_codes[k_codes >= 0] need_sort = (k_codes[:-1] > k_codes[1:]).any() else: need_sort = True elif isinstance(k, slice): if self._is_lexsorted(): need_sort = k.step is not None and k.step < 0 else: need_sort = True else: need_sort = True if need_sort: break if not need_sort: return indexer n = len(self) keys: tuple[np.ndarray, ...] = () for (i, k) in enumerate(seq): if is_scalar(k): k = [k] if com.is_bool_indexer(k): new_order = np.arange(n)[indexer] elif is_list_like(k): if not isinstance(k, (np.ndarray, ExtensionArray, Index, ABCSeries)): k = sanitize_array(k, None) k = algos.unique(k) key_order_map = np.ones(len(self.levels[i]), dtype=np.uint64) * len(self.levels[i]) level_indexer = self.levels[i].get_indexer(k) level_indexer = level_indexer[level_indexer >= 0] key_order_map[level_indexer] = np.arange(len(level_indexer)) new_order = key_order_map[self.codes[i][indexer]] elif isinstance(k, slice) and k.step is not None and (k.step < 0): new_order = np.arange(n - 1, -1, -1)[indexer] elif isinstance(k, slice) and k.start is None and (k.stop is None): new_order = np.ones((n,), dtype=np.intp)[indexer] else: new_order = np.arange(n)[indexer] keys = (new_order,) + keys ind = np.lexsort(keys) return indexer[ind] def truncate(self, before=None, after=None) -> MultiIndex: if after and before and (after < before): raise ValueError('after < before') (i, j) = self.levels[0].slice_locs(before, after) (left, right) = self.slice_locs(before, after) new_levels = list(self.levels) new_levels[0] = new_levels[0][i:j] new_codes = [level_codes[left:right] for level_codes in self.codes] new_codes[0] = new_codes[0] - i return MultiIndex(levels=new_levels, codes=new_codes, names=self._names, verify_integrity=False) def equals(self, other: object) -> bool: if self.is_(other): return True if not isinstance(other, Index): return False if len(self) != len(other): return False if not isinstance(other, MultiIndex): if not self._should_compare(other): return False return array_equivalent(self._values, other._values) if self.nlevels != other.nlevels: return False for i in range(self.nlevels): self_codes = self.codes[i] other_codes = other.codes[i] self_mask = self_codes == -1 other_mask = other_codes == -1 if not np.array_equal(self_mask, other_mask): return False self_level = self.levels[i] other_level = other.levels[i] new_codes = recode_for_categories(other_codes, other_level, self_level, copy=False) if not np.array_equal(self_codes, new_codes): return False if not self_level[:0].equals(other_level[:0]): return False return True def equal_levels(self, other: MultiIndex) -> bool: if self.nlevels != other.nlevels: return False for i in range(self.nlevels): if not self.levels[i].equals(other.levels[i]): return False return True def _union(self, other, sort) -> MultiIndex: (other, result_names) = self._convert_can_do_setop(other) if other.has_duplicates: result = super()._union(other, sort) if isinstance(result, MultiIndex): return result return MultiIndex.from_arrays(zip(*result), sortorder=None, names=result_names) else: right_missing = other.difference(self, sort=False) if len(right_missing): result = self.append(right_missing) else: result = self._get_reconciled_name_object(other) if sort is not False: try: result = result.sort_values() except TypeError: if sort is True: raise warnings.warn('The values in the array are unorderable. Pass `sort=False` to suppress this warning.', RuntimeWarning, stacklevel=find_stack_level()) return result def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return is_object_dtype(dtype) def _get_reconciled_name_object(self, other) -> MultiIndex: names = self._maybe_match_names(other) if self.names != names: return self.rename(names) return self def _maybe_match_names(self, other): if len(self.names) != len(other.names): return [None] * len(self.names) names = [] for (a_name, b_name) in zip(self.names, other.names): if a_name == b_name: names.append(a_name) else: names.append(None) return names def _wrap_intersection_result(self, other, result) -> MultiIndex: (_, result_names) = self._convert_can_do_setop(other) return result.set_names(result_names) def _wrap_difference_result(self, other, result: MultiIndex) -> MultiIndex: (_, result_names) = self._convert_can_do_setop(other) if len(result) == 0: return result.remove_unused_levels().set_names(result_names) else: return result.set_names(result_names) def _convert_can_do_setop(self, other): result_names = self.names if not isinstance(other, Index): if len(other) == 0: return (self[:0], self.names) else: msg = 'other must be a MultiIndex or a list of tuples' try: other = MultiIndex.from_tuples(other, names=self.names) except (ValueError, TypeError) as err: raise TypeError(msg) from err else: result_names = get_unanimous_names(self, other) return (other, result_names) @doc(Index.astype) def astype(self, dtype, copy: bool=True): dtype = pandas_dtype(dtype) if isinstance(dtype, CategoricalDtype): msg = '> 1 ndim Categorical are not supported at this time' raise NotImplementedError(msg) if not is_object_dtype(dtype): raise TypeError('Setting a MultiIndex dtype to anything other than object is not supported') if copy is True: return self._view() return self def _validate_fill_value(self, item): if isinstance(item, MultiIndex): if item.nlevels != self.nlevels: raise ValueError('Item must have length equal to number of levels.') return item._values elif not isinstance(item, tuple): item = (item,) + ('',) * (self.nlevels - 1) elif len(item) != self.nlevels: raise ValueError('Item must have length equal to number of levels.') return item def putmask(self, mask, value: MultiIndex) -> MultiIndex: (mask, noop) = validate_putmask(self, mask) if noop: return self.copy() if len(mask) == len(value): subset = value[mask].remove_unused_levels() else: subset = value.remove_unused_levels() new_levels = [] new_codes = [] for (i, (value_level, level, level_codes)) in enumerate(zip(subset.levels, self.levels, self.codes)): new_level = level.union(value_level, sort=False) value_codes = new_level.get_indexer_for(subset.get_level_values(i)) new_code = ensure_int64(level_codes) new_code[mask] = value_codes new_levels.append(new_level) new_codes.append(new_code) return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False) def insert(self, loc: int, item) -> MultiIndex: item = self._validate_fill_value(item) new_levels = [] new_codes = [] for (k, level, level_codes) in zip(item, self.levels, self.codes): if k not in level: if isna(k): lev_loc = -1 else: lev_loc = len(level) level = level.insert(lev_loc, k) else: lev_loc = level.get_loc(k) new_levels.append(level) new_codes.append(np.insert(ensure_int64(level_codes), loc, lev_loc)) return MultiIndex(levels=new_levels, codes=new_codes, names=self.names, verify_integrity=False) def delete(self, loc) -> MultiIndex: new_codes = [np.delete(level_codes, loc) for level_codes in self.codes] return MultiIndex(levels=self.levels, codes=new_codes, names=self.names, verify_integrity=False) @doc(Index.isin) def isin(self, values, level=None) -> npt.NDArray[np.bool_]: if isinstance(values, Generator): values = list(values) if level is None: if len(values) == 0: return np.zeros((len(self),), dtype=np.bool_) if not isinstance(values, MultiIndex): values = MultiIndex.from_tuples(values) return values.unique().get_indexer_for(self) != -1 else: num = self._get_level_number(level) levs = self.get_level_values(num) if levs.size == 0: return np.zeros(len(levs), dtype=np.bool_) return levs.isin(values) rename = Index.set_names __add__ = make_invalid_op('__add__') __radd__ = make_invalid_op('__radd__') __iadd__ = make_invalid_op('__iadd__') __sub__ = make_invalid_op('__sub__') __rsub__ = make_invalid_op('__rsub__') __isub__ = make_invalid_op('__isub__') __pow__ = make_invalid_op('__pow__') __rpow__ = make_invalid_op('__rpow__') __mul__ = make_invalid_op('__mul__') __rmul__ = make_invalid_op('__rmul__') __floordiv__ = make_invalid_op('__floordiv__') __rfloordiv__ = make_invalid_op('__rfloordiv__') __truediv__ = make_invalid_op('__truediv__') __rtruediv__ = make_invalid_op('__rtruediv__') __mod__ = make_invalid_op('__mod__') __rmod__ = make_invalid_op('__rmod__') __divmod__ = make_invalid_op('__divmod__') __rdivmod__ = make_invalid_op('__rdivmod__') __neg__ = make_invalid_op('__neg__') __pos__ = make_invalid_op('__pos__') __abs__ = make_invalid_op('__abs__') __invert__ = make_invalid_op('__invert__') def _lexsort_depth(codes: list[np.ndarray], nlevels: int) -> int: int64_codes = [ensure_int64(level_codes) for level_codes in codes] for k in range(nlevels, 0, -1): if libalgos.is_lexsorted(int64_codes[:k]): return k return 0 def sparsify_labels(label_list, start: int=0, sentinel: object=''): pivoted = list(zip(*label_list)) k = len(label_list) result = pivoted[:start + 1] prev = pivoted[start] for cur in pivoted[start + 1:]: sparse_cur = [] for (i, (p, t)) in enumerate(zip(prev, cur)): if i == k - 1: sparse_cur.append(t) result.append(sparse_cur) break if p == t: sparse_cur.append(sentinel) else: sparse_cur.extend(cur[i:]) result.append(sparse_cur) break prev = cur return list(zip(*result)) def _get_na_rep(dtype: DtypeObj) -> str: if isinstance(dtype, ExtensionDtype): return f'{dtype.na_value}' else: dtype_type = dtype.type return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype_type, 'NaN') def maybe_droplevels(index: Index, key) -> Index: original_index = index if isinstance(key, tuple): for _ in key: try: index = index._drop_level_numbers([0]) except ValueError: return original_index else: try: index = index._drop_level_numbers([0]) except ValueError: pass return index def _coerce_indexer_frozen(array_like, categories, copy: bool=False) -> np.ndarray: array_like = coerce_indexer_dtype(array_like, categories) if copy: array_like = array_like.copy() array_like.flags.writeable = False return array_like def _require_listlike(level, arr, arrname: str): if level is not None and (not is_list_like(level)): if not is_list_like(arr): raise TypeError(f'{arrname} must be list-like') if len(arr) > 0 and is_list_like(arr[0]): raise TypeError(f'{arrname} must be list-like') level = [level] arr = [arr] elif level is None or is_list_like(level): if not is_list_like(arr) or not is_list_like(arr[0]): raise TypeError(f'{arrname} must be list of lists-like') return (level, arr) def cartesian_product(X: list[np.ndarray]) -> list[np.ndarray]: msg = 'Input must be a list-like of list-likes' if not is_list_like(X): raise TypeError(msg) for x in X: if not is_list_like(x): raise TypeError(msg) if len(X) == 0: return [] lenX = np.fromiter((len(x) for x in X), dtype=np.intp) cumprodX = np.cumprod(lenX) if np.any(cumprodX < 0): raise ValueError('Product space too large to allocate arrays!') a = np.roll(cumprodX, 1) a[0] = 1 if cumprodX[-1] != 0: b = cumprodX[-1] / cumprodX else: b = np.zeros_like(cumprodX) return [np.tile(np.repeat(x, b[i]), np.prod(a[i])) for (i, x) in enumerate(X)] # File: pandas-main/pandas/core/indexes/period.py from __future__ import annotations from datetime import datetime, timedelta from typing import TYPE_CHECKING import numpy as np from pandas._libs import index as libindex from pandas._libs.tslibs import BaseOffset, NaT, Period, Resolution, Tick from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import is_integer from pandas.core.dtypes.dtypes import PeriodDtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.arrays.period import PeriodArray, period_array, raise_on_incompatible, validate_dtype_freq import pandas.core.common as com import pandas.core.indexes.base as ibase from pandas.core.indexes.base import maybe_extract_name from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin from pandas.core.indexes.datetimes import DatetimeIndex, Index from pandas.core.indexes.extension import inherit_names if TYPE_CHECKING: from collections.abc import Hashable from pandas._typing import Dtype, DtypeObj, Self, npt _index_doc_kwargs = dict(ibase._index_doc_kwargs) _index_doc_kwargs.update({'target_klass': 'PeriodIndex or list of Periods'}) _shared_doc_kwargs = {'klass': 'PeriodArray'} def _new_PeriodIndex(cls, **d): values = d.pop('data') if values.dtype == 'int64': freq = d.pop('freq', None) dtype = PeriodDtype(freq) values = PeriodArray(values, dtype=dtype) return cls._simple_new(values, **d) else: return cls(values, **d) @inherit_names(['strftime', 'start_time', 'end_time'] + PeriodArray._field_ops, PeriodArray, wrap=True) @inherit_names(['is_leap_year'], PeriodArray) class PeriodIndex(DatetimeIndexOpsMixin): _typ = 'periodindex' _data: PeriodArray freq: BaseOffset dtype: PeriodDtype _data_cls = PeriodArray _supports_partial_string_indexing = True @property def _engine_type(self) -> type[libindex.PeriodEngine]: return libindex.PeriodEngine @cache_readonly def _resolution_obj(self) -> Resolution: return self.dtype._resolution_obj @doc(PeriodArray.asfreq, other='arrays.PeriodArray', other_name='PeriodArray', **_shared_doc_kwargs) def asfreq(self, freq=None, how: str='E') -> Self: arr = self._data.asfreq(freq, how) return type(self)._simple_new(arr, name=self.name) @doc(PeriodArray.to_timestamp) def to_timestamp(self, freq=None, how: str='start') -> DatetimeIndex: arr = self._data.to_timestamp(freq, how) return DatetimeIndex._simple_new(arr, name=self.name) @property @doc(PeriodArray.hour.fget) def hour(self) -> Index: return Index(self._data.hour, name=self.name) @property @doc(PeriodArray.minute.fget) def minute(self) -> Index: return Index(self._data.minute, name=self.name) @property @doc(PeriodArray.second.fget) def second(self) -> Index: return Index(self._data.second, name=self.name) def __new__(cls, data=None, freq=None, dtype: Dtype | None=None, copy: bool=False, name: Hashable | None=None) -> Self: refs = None if not copy and isinstance(data, (Index, ABCSeries)): refs = data._references name = maybe_extract_name(name, data, cls) freq = validate_dtype_freq(dtype, freq) if freq and isinstance(data, cls) and (data.freq != freq): data = data.asfreq(freq) data = period_array(data=data, freq=freq) if copy: data = data.copy() return cls._simple_new(data, name=name, refs=refs) @classmethod def from_fields(cls, *, year=None, quarter=None, month=None, day=None, hour=None, minute=None, second=None, freq=None) -> Self: fields = {'year': year, 'quarter': quarter, 'month': month, 'day': day, 'hour': hour, 'minute': minute, 'second': second} fields = {key: value for (key, value) in fields.items() if value is not None} arr = PeriodArray._from_fields(fields=fields, freq=freq) return cls._simple_new(arr) @classmethod def from_ordinals(cls, ordinals, *, freq, name=None) -> Self: ordinals = np.asarray(ordinals, dtype=np.int64) dtype = PeriodDtype(freq) data = PeriodArray._simple_new(ordinals, dtype=dtype) return cls._simple_new(data, name=name) @property def values(self) -> npt.NDArray[np.object_]: return np.asarray(self, dtype=object) def _maybe_convert_timedelta(self, other) -> int | npt.NDArray[np.int64]: if isinstance(other, (timedelta, np.timedelta64, Tick, np.ndarray)): if isinstance(self.freq, Tick): delta = self._data._check_timedeltalike_freq_compat(other) return delta elif isinstance(other, BaseOffset): if other.base == self.freq.base: return other.n raise raise_on_incompatible(self, other) elif is_integer(other): assert isinstance(other, int) return other raise raise_on_incompatible(self, None) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return self.dtype == dtype def asof_locs(self, where: Index, mask: npt.NDArray[np.bool_]) -> np.ndarray: if isinstance(where, DatetimeIndex): where = PeriodIndex(where._values, freq=self.freq) elif not isinstance(where, PeriodIndex): raise TypeError('asof_locs `where` must be DatetimeIndex or PeriodIndex') return super().asof_locs(where, mask) @property def is_full(self) -> bool: if len(self) == 0: return True if not self.is_monotonic_increasing: raise ValueError('Index is not monotonic') values = self.asi8 return bool((values[1:] - values[:-1] < 2).all()) @property def inferred_type(self) -> str: return 'period' def _convert_tolerance(self, tolerance, target): tolerance = super()._convert_tolerance(tolerance, target) if self.dtype == target.dtype: tolerance = self._maybe_convert_timedelta(tolerance) return tolerance def get_loc(self, key): orig_key = key self._check_indexing_error(key) if is_valid_na_for_dtype(key, self.dtype): key = NaT elif isinstance(key, str): try: (parsed, reso) = self._parse_with_reso(key) except ValueError as err: raise KeyError(f"Cannot interpret '{key}' as period") from err if self._can_partial_date_slice(reso): try: return self._partial_date_slice(reso, parsed) except KeyError as err: raise KeyError(key) from err if reso == self._resolution_obj: key = self._cast_partial_indexing_scalar(parsed) else: raise KeyError(key) elif isinstance(key, Period): self._disallow_mismatched_indexing(key) elif isinstance(key, datetime): key = self._cast_partial_indexing_scalar(key) else: raise KeyError(key) try: return Index.get_loc(self, key) except KeyError as err: raise KeyError(orig_key) from err def _disallow_mismatched_indexing(self, key: Period) -> None: if key._dtype != self.dtype: raise KeyError(key) def _cast_partial_indexing_scalar(self, label: datetime) -> Period: try: period = Period(label, freq=self.freq) except ValueError as err: raise KeyError(label) from err return period @doc(DatetimeIndexOpsMixin._maybe_cast_slice_bound) def _maybe_cast_slice_bound(self, label, side: str): if isinstance(label, datetime): label = self._cast_partial_indexing_scalar(label) return super()._maybe_cast_slice_bound(label, side) def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime): freq = OFFSET_TO_PERIOD_FREQSTR.get(reso.attr_abbrev, reso.attr_abbrev) iv = Period(parsed, freq=freq) return (iv.asfreq(self.freq, how='start'), iv.asfreq(self.freq, how='end')) @doc(DatetimeIndexOpsMixin.shift) def shift(self, periods: int=1, freq=None) -> Self: if freq is not None: raise TypeError(f'`freq` argument is not supported for {type(self).__name__}.shift') return self + periods def period_range(start=None, end=None, periods: int | None=None, freq=None, name: Hashable | None=None) -> PeriodIndex: if com.count_not_none(start, end, periods) != 2: raise ValueError('Of the three parameters: start, end, and periods, exactly two must be specified') if freq is None and (not isinstance(start, Period) and (not isinstance(end, Period))): freq = 'D' (data, freq) = PeriodArray._generate_range(start, end, periods, freq) dtype = PeriodDtype(freq) data = PeriodArray(data, dtype=dtype) return PeriodIndex(data, name=name) # File: pandas-main/pandas/core/indexes/range.py from __future__ import annotations from collections.abc import Callable, Hashable, Iterator from datetime import timedelta import operator from sys import getsizeof from typing import TYPE_CHECKING, Any, Literal, cast, overload import numpy as np from pandas._libs import index as libindex, lib from pandas._libs.lib import no_default from pandas.compat.numpy import function as nv from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.common import ensure_platform_int, ensure_python_int, is_float, is_integer, is_scalar, is_signed_integer_dtype from pandas.core.dtypes.generic import ABCTimedeltaIndex from pandas.core import ops import pandas.core.common as com from pandas.core.construction import extract_array from pandas.core.indexers import check_array_indexer import pandas.core.indexes.base as ibase from pandas.core.indexes.base import Index, maybe_extract_name from pandas.core.ops.common import unpack_zerodim_and_defer if TYPE_CHECKING: from pandas._typing import Axis, Dtype, JoinHow, NaPosition, NumpySorter, Self, npt from pandas import Series _empty_range = range(0) _dtype_int64 = np.dtype(np.int64) def min_fitting_element(start: int, step: int, lower_limit: int) -> int: no_steps = -(-(lower_limit - start) // abs(step)) return start + abs(step) * no_steps class RangeIndex(Index): _typ = 'rangeindex' _dtype_validation_metadata = (is_signed_integer_dtype, 'signed integer') _range: range _values: np.ndarray @property def _engine_type(self) -> type[libindex.Int64Engine]: return libindex.Int64Engine def __new__(cls, start=None, stop=None, step=None, dtype: Dtype | None=None, copy: bool=False, name: Hashable | None=None) -> Self: cls._validate_dtype(dtype) name = maybe_extract_name(name, start, cls) if isinstance(start, cls): return start.copy(name=name) elif isinstance(start, range): return cls._simple_new(start, name=name) if com.all_none(start, stop, step): raise TypeError('RangeIndex(...) must be called with integers') start = ensure_python_int(start) if start is not None else 0 if stop is None: (start, stop) = (0, start) else: stop = ensure_python_int(stop) step = ensure_python_int(step) if step is not None else 1 if step == 0: raise ValueError('Step must not be zero') rng = range(start, stop, step) return cls._simple_new(rng, name=name) @classmethod def from_range(cls, data: range, name=None, dtype: Dtype | None=None) -> Self: if not isinstance(data, range): raise TypeError(f'{cls.__name__}(...) must be called with object coercible to a range, {data!r} was passed') cls._validate_dtype(dtype) return cls._simple_new(data, name=name) @classmethod def _simple_new(cls, values: range, name: Hashable | None=None) -> Self: result = object.__new__(cls) assert isinstance(values, range) result._range = values result._name = name result._cache = {} result._reset_identity() result._references = None return result @classmethod def _validate_dtype(cls, dtype: Dtype | None) -> None: if dtype is None: return (validation_func, expected) = cls._dtype_validation_metadata if not validation_func(dtype): raise ValueError(f'Incorrect `dtype` passed: expected {expected}, received {dtype}') @cache_readonly def _constructor(self) -> type[Index]: return Index @cache_readonly def _data(self) -> np.ndarray: return np.arange(self.start, self.stop, self.step, dtype=np.int64) def _get_data_as_items(self) -> list[tuple[str, int]]: rng = self._range return [('start', rng.start), ('stop', rng.stop), ('step', rng.step)] def __reduce__(self): d = {'name': self._name} d.update(dict(self._get_data_as_items())) return (ibase._new_Index, (type(self), d), None) def _format_attrs(self): attrs = cast('list[tuple[str, str | int]]', self._get_data_as_items()) if self._name is not None: attrs.append(('name', ibase.default_pprint(self._name))) return attrs def _format_with_header(self, *, header: list[str], na_rep: str) -> list[str]: if not len(self._range): return header first_val_str = str(self._range[0]) last_val_str = str(self._range[-1]) max_length = max(len(first_val_str), len(last_val_str)) return header + [f'{x:<{max_length}}' for x in self._range] @property def start(self) -> int: return self._range.start @property def stop(self) -> int: return self._range.stop @property def step(self) -> int: return self._range.step @cache_readonly def nbytes(self) -> int: rng = self._range return getsizeof(rng) + sum((getsizeof(getattr(rng, attr_name)) for attr_name in ['start', 'stop', 'step'])) def memory_usage(self, deep: bool=False) -> int: return self.nbytes @property def dtype(self) -> np.dtype: return _dtype_int64 @property def is_unique(self) -> bool: return True @cache_readonly def is_monotonic_increasing(self) -> bool: return self._range.step > 0 or len(self) <= 1 @cache_readonly def is_monotonic_decreasing(self) -> bool: return self._range.step < 0 or len(self) <= 1 def __contains__(self, key: Any) -> bool: hash(key) try: key = ensure_python_int(key) except (TypeError, OverflowError): return False return key in self._range @property def inferred_type(self) -> str: return 'integer' @doc(Index.get_loc) def get_loc(self, key) -> int: if is_integer(key) or (is_float(key) and key.is_integer()): new_key = int(key) try: return self._range.index(new_key) except ValueError as err: raise KeyError(key) from err if isinstance(key, Hashable): raise KeyError(key) self._check_indexing_error(key) raise KeyError(key) def _get_indexer(self, target: Index, method: str | None=None, limit: int | None=None, tolerance=None) -> npt.NDArray[np.intp]: if com.any_not_none(method, tolerance, limit): return super()._get_indexer(target, method=method, tolerance=tolerance, limit=limit) if self.step > 0: (start, stop, step) = (self.start, self.stop, self.step) else: reverse = self._range[::-1] (start, stop, step) = (reverse.start, reverse.stop, reverse.step) target_array = np.asarray(target) locs = target_array - start valid = (locs % step == 0) & (locs >= 0) & (target_array < stop) locs[~valid] = -1 locs[valid] = locs[valid] / step if step != self.step: locs[valid] = len(self) - 1 - locs[valid] return ensure_platform_int(locs) @cache_readonly def _should_fallback_to_positional(self) -> bool: return False def tolist(self) -> list[int]: return list(self._range) @doc(Index.__iter__) def __iter__(self) -> Iterator[int]: yield from self._range @doc(Index._shallow_copy) def _shallow_copy(self, values, name: Hashable=no_default): name = self._name if name is no_default else name if values.dtype.kind == 'f': return Index(values, name=name, dtype=np.float64) if values.dtype.kind == 'i' and values.ndim == 1: if len(values) == 1: start = values[0] new_range = range(start, start + self.step, self.step) return type(self)._simple_new(new_range, name=name) maybe_range = ibase.maybe_sequence_to_range(values) if isinstance(maybe_range, range): return type(self)._simple_new(maybe_range, name=name) return self._constructor._simple_new(values, name=name) def _view(self) -> Self: result = type(self)._simple_new(self._range, name=self._name) result._cache = self._cache return result def _wrap_reindex_result(self, target, indexer, preserve_names: bool): if not isinstance(target, type(self)) and target.dtype.kind == 'i': target = self._shallow_copy(target._values, name=target.name) return super()._wrap_reindex_result(target, indexer, preserve_names) @doc(Index.copy) def copy(self, name: Hashable | None=None, deep: bool=False) -> Self: name = self._validate_names(name=name, deep=deep)[0] new_index = self._rename(name=name) return new_index def _minmax(self, meth: Literal['min', 'max']) -> int | float: no_steps = len(self) - 1 if no_steps == -1: return np.nan elif meth == 'min' and self.step > 0 or (meth == 'max' and self.step < 0): return self.start return self.start + self.step * no_steps def min(self, axis=None, skipna: bool=True, *args, **kwargs) -> int | float: nv.validate_minmax_axis(axis) nv.validate_min(args, kwargs) return self._minmax('min') def max(self, axis=None, skipna: bool=True, *args, **kwargs) -> int | float: nv.validate_minmax_axis(axis) nv.validate_max(args, kwargs) return self._minmax('max') def _argminmax(self, meth: Literal['min', 'max'], axis=None, skipna: bool=True) -> int: nv.validate_minmax_axis(axis) if len(self) == 0: return getattr(super(), f'arg{meth}')(axis=axis, skipna=skipna) elif meth == 'min': if self.step > 0: return 0 else: return len(self) - 1 elif meth == 'max': if self.step > 0: return len(self) - 1 else: return 0 else: raise ValueError(f'meth={meth!r} must be max or min') def argmin(self, axis=None, skipna: bool=True, *args, **kwargs) -> int: nv.validate_argmin(args, kwargs) return self._argminmax('min', axis=axis, skipna=skipna) def argmax(self, axis=None, skipna: bool=True, *args, **kwargs) -> int: nv.validate_argmax(args, kwargs) return self._argminmax('max', axis=axis, skipna=skipna) def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]: ascending = kwargs.pop('ascending', True) kwargs.pop('kind', None) nv.validate_argsort(args, kwargs) (start, stop, step) = (None, None, None) if self._range.step > 0: if ascending: start = len(self) else: (start, stop, step) = (len(self) - 1, -1, -1) elif ascending: (start, stop, step) = (len(self) - 1, -1, -1) else: start = len(self) return np.arange(start, stop, step, dtype=np.intp) def factorize(self, sort: bool=False, use_na_sentinel: bool=True) -> tuple[npt.NDArray[np.intp], RangeIndex]: if sort and self.step < 0: codes = np.arange(len(self) - 1, -1, -1, dtype=np.intp) uniques = self[::-1] else: codes = np.arange(len(self), dtype=np.intp) uniques = self return (codes, uniques) def equals(self, other: object) -> bool: if isinstance(other, RangeIndex): return self._range == other._range return super().equals(other) @overload def sort_values(self, *, return_indexer: Literal[False]=..., ascending: bool=..., na_position: NaPosition=..., key: Callable | None=...) -> Self: ... @overload def sort_values(self, *, return_indexer: Literal[True], ascending: bool=..., na_position: NaPosition=..., key: Callable | None=...) -> tuple[Self, np.ndarray | RangeIndex]: ... @overload def sort_values(self, *, return_indexer: bool=..., ascending: bool=..., na_position: NaPosition=..., key: Callable | None=...) -> Self | tuple[Self, np.ndarray | RangeIndex]: ... def sort_values(self, *, return_indexer: bool=False, ascending: bool=True, na_position: NaPosition='last', key: Callable | None=None) -> Self | tuple[Self, np.ndarray | RangeIndex]: if key is not None: return super().sort_values(return_indexer=return_indexer, ascending=ascending, na_position=na_position, key=key) else: sorted_index = self inverse_indexer = False if ascending: if self.step < 0: sorted_index = self[::-1] inverse_indexer = True elif self.step > 0: sorted_index = self[::-1] inverse_indexer = True if return_indexer: if inverse_indexer: rng = range(len(self) - 1, -1, -1) else: rng = range(len(self)) return (sorted_index, RangeIndex(rng)) else: return sorted_index def _intersection(self, other: Index, sort: bool=False): if not isinstance(other, RangeIndex): return super()._intersection(other, sort=sort) first = self._range[::-1] if self.step < 0 else self._range second = other._range[::-1] if other.step < 0 else other._range int_low = max(first.start, second.start) int_high = min(first.stop, second.stop) if int_high <= int_low: return self._simple_new(_empty_range) (gcd, s, _) = self._extended_gcd(first.step, second.step) if (first.start - second.start) % gcd: return self._simple_new(_empty_range) tmp_start = first.start + (second.start - first.start) * first.step // gcd * s new_step = first.step * second.step // gcd new_start = min_fitting_element(tmp_start, new_step, int_low) new_range = range(new_start, int_high, new_step) if (self.step < 0 and other.step < 0) is not (new_range.step < 0): new_range = new_range[::-1] return self._simple_new(new_range) def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]: (s, old_s) = (0, 1) (t, old_t) = (1, 0) (r, old_r) = (b, a) while r: quotient = old_r // r (old_r, r) = (r, old_r - quotient * r) (old_s, s) = (s, old_s - quotient * s) (old_t, t) = (t, old_t - quotient * t) return (old_r, old_s, old_t) def _range_in_self(self, other: range) -> bool: if not other: return True if not self._range: return False if len(other) > 1 and other.step % self._range.step: return False return other.start in self._range and other[-1] in self._range def _union(self, other: Index, sort: bool | None): if isinstance(other, RangeIndex): if sort in (None, True) or (sort is False and self.step > 0 and self._range_in_self(other._range)): (start_s, step_s) = (self.start, self.step) end_s = self.start + self.step * (len(self) - 1) (start_o, step_o) = (other.start, other.step) end_o = other.start + other.step * (len(other) - 1) if self.step < 0: (start_s, step_s, end_s) = (end_s, -step_s, start_s) if other.step < 0: (start_o, step_o, end_o) = (end_o, -step_o, start_o) if len(self) == 1 and len(other) == 1: step_s = step_o = abs(self.start - other.start) elif len(self) == 1: step_s = step_o elif len(other) == 1: step_o = step_s start_r = min(start_s, start_o) end_r = max(end_s, end_o) if step_o == step_s: if (start_s - start_o) % step_s == 0 and start_s - end_o <= step_s and (start_o - end_s <= step_s): return type(self)(start_r, end_r + step_s, step_s) if step_s % 2 == 0 and abs(start_s - start_o) == step_s / 2 and (abs(end_s - end_o) == step_s / 2): return type(self)(start_r, end_r + step_s / 2, step_s / 2) elif step_o % step_s == 0: if (start_o - start_s) % step_s == 0 and start_o + step_s >= start_s and (end_o - step_s <= end_s): return type(self)(start_r, end_r + step_s, step_s) elif step_s % step_o == 0: if (start_s - start_o) % step_o == 0 and start_s + step_o >= start_o and (end_s - step_o <= end_o): return type(self)(start_r, end_r + step_o, step_o) return super()._union(other, sort=sort) def _difference(self, other, sort=None): self._validate_sort_keyword(sort) self._assert_can_do_setop(other) (other, result_name) = self._convert_can_do_setop(other) if not isinstance(other, RangeIndex): return super()._difference(other, sort=sort) if sort is not False and self.step < 0: return self[::-1]._difference(other) res_name = ops.get_op_result_name(self, other) first = self._range[::-1] if self.step < 0 else self._range overlap = self.intersection(other) if overlap.step < 0: overlap = overlap[::-1] if len(overlap) == 0: return self.rename(name=res_name) if len(overlap) == len(self): return self[:0].rename(res_name) if len(overlap) == 1: if overlap[0] == self[0]: return self[1:] elif overlap[0] == self[-1]: return self[:-1] elif len(self) == 3 and overlap[0] == self[1]: return self[::2] else: return super()._difference(other, sort=sort) elif len(overlap) == 2 and overlap[0] == first[0] and (overlap[-1] == first[-1]): return self[1:-1] if overlap.step == first.step: if overlap[0] == first.start: new_rng = range(overlap[-1] + first.step, first.stop, first.step) elif overlap[-1] == first[-1]: new_rng = range(first.start, overlap[0], first.step) elif overlap._range == first[1:-1]: step = len(first) - 1 new_rng = first[::step] else: return super()._difference(other, sort=sort) else: assert len(self) > 1 if overlap.step == first.step * 2: if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]): new_rng = first[1::2] elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]): new_rng = first[::2] else: return super()._difference(other, sort=sort) else: return super()._difference(other, sort=sort) if first is not self._range: new_rng = new_rng[::-1] new_index = type(self)._simple_new(new_rng, name=res_name) return new_index def symmetric_difference(self, other, result_name: Hashable | None=None, sort=None) -> Index: if not isinstance(other, RangeIndex) or sort is not None: return super().symmetric_difference(other, result_name, sort) left = self.difference(other) right = other.difference(self) result = left.union(right) if result_name is not None: result = result.rename(result_name) return result def _join_empty(self, other: Index, how: JoinHow, sort: bool) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: if not isinstance(other, RangeIndex) and other.dtype.kind == 'i': other = self._shallow_copy(other._values, name=other.name) return super()._join_empty(other, how=how, sort=sort) def _join_monotonic(self, other: Index, how: JoinHow='left') -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: if not isinstance(other, type(self)): maybe_ri = self._shallow_copy(other._values, name=other.name) if not isinstance(maybe_ri, type(self)): return super()._join_monotonic(other, how=how) other = maybe_ri if self.equals(other): ret_index = other if how == 'right' else self return (ret_index, None, None) if how == 'left': join_index = self lidx = None ridx = other.get_indexer(join_index) elif how == 'right': join_index = other lidx = self.get_indexer(join_index) ridx = None elif how == 'inner': join_index = self.intersection(other) lidx = self.get_indexer(join_index) ridx = other.get_indexer(join_index) elif how == 'outer': join_index = self.union(other) lidx = self.get_indexer(join_index) ridx = other.get_indexer(join_index) lidx = None if lidx is None else ensure_platform_int(lidx) ridx = None if ridx is None else ensure_platform_int(ridx) return (join_index, lidx, ridx) def delete(self, loc) -> Index: if is_integer(loc): if loc in (0, -len(self)): return self[1:] if loc in (-1, len(self) - 1): return self[:-1] if len(self) == 3 and loc in (1, -2): return self[::2] elif lib.is_list_like(loc): slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self)) if isinstance(slc, slice): other = self[slc] return self.difference(other, sort=False) return super().delete(loc) def insert(self, loc: int, item) -> Index: if is_integer(item) or is_float(item): if len(self) == 0 and loc == 0 and is_integer(item): new_rng = range(item, item + self.step, self.step) return type(self)._simple_new(new_rng, name=self._name) elif len(self): rng = self._range if loc == 0 and item == self[0] - self.step: new_rng = range(rng.start - rng.step, rng.stop, rng.step) return type(self)._simple_new(new_rng, name=self._name) elif loc == len(self) and item == self[-1] + self.step: new_rng = range(rng.start, rng.stop + rng.step, rng.step) return type(self)._simple_new(new_rng, name=self._name) elif len(self) == 2 and item == self[0] + self.step / 2: step = int(self.step / 2) new_rng = range(self.start, self.stop, step) return type(self)._simple_new(new_rng, name=self._name) return super().insert(loc, item) def _concat(self, indexes: list[Index], name: Hashable) -> Index: if not all((isinstance(x, RangeIndex) for x in indexes)): result = super()._concat(indexes, name) if result.dtype.kind == 'i': return self._shallow_copy(result._values) return result elif len(indexes) == 1: return indexes[0] rng_indexes = cast(list[RangeIndex], indexes) start = step = next_ = None non_empty_indexes = [] all_same_index = True prev: RangeIndex | None = None for obj in rng_indexes: if len(obj): non_empty_indexes.append(obj) if all_same_index: if prev is not None: all_same_index = prev.equals(obj) else: prev = obj for obj in non_empty_indexes: rng = obj._range if start is None: start = rng.start if step is None and len(rng) > 1: step = rng.step elif step is None: if rng.start == start: if all_same_index: values = np.tile(non_empty_indexes[0]._values, len(non_empty_indexes)) else: values = np.concatenate([x._values for x in rng_indexes]) result = self._constructor(values) return result.rename(name) step = rng.start - start non_consecutive = step != rng.step and len(rng) > 1 or (next_ is not None and rng.start != next_) if non_consecutive: if all_same_index: values = np.tile(non_empty_indexes[0]._values, len(non_empty_indexes)) else: values = np.concatenate([x._values for x in rng_indexes]) result = self._constructor(values) return result.rename(name) if step is not None: next_ = rng[-1] + step if non_empty_indexes: stop = non_empty_indexes[-1].stop if next_ is None else next_ if len(non_empty_indexes) == 1: step = non_empty_indexes[0].step return RangeIndex(start, stop, step, name=name) return RangeIndex(_empty_range, name=name) def __len__(self) -> int: return len(self._range) @property def size(self) -> int: return len(self) def __getitem__(self, key): if key is Ellipsis: key = slice(None) if isinstance(key, slice): return self._getitem_slice(key) elif is_integer(key): new_key = int(key) try: return self._range[new_key] except IndexError as err: raise IndexError(f'index {key} is out of bounds for axis 0 with size {len(self)}') from err elif is_scalar(key): raise IndexError('only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices') elif com.is_bool_indexer(key): if isinstance(getattr(key, 'dtype', None), ExtensionDtype): key = key.to_numpy(dtype=bool, na_value=False) else: key = np.asarray(key, dtype=bool) check_array_indexer(self._range, key) key = np.flatnonzero(key) try: return self.take(key) except (TypeError, ValueError): return super().__getitem__(key) def _getitem_slice(self, slobj: slice) -> Self: res = self._range[slobj] return type(self)._simple_new(res, name=self._name) @unpack_zerodim_and_defer('__floordiv__') def __floordiv__(self, other): if is_integer(other) and other != 0: if len(self) == 0 or (self.start % other == 0 and self.step % other == 0): start = self.start // other step = self.step // other stop = start + len(self) * step new_range = range(start, stop, step or 1) return self._simple_new(new_range, name=self._name) if len(self) == 1: start = self.start // other new_range = range(start, start + 1, 1) return self._simple_new(new_range, name=self._name) return super().__floordiv__(other) def all(self, *args, **kwargs) -> bool: return 0 not in self._range def any(self, *args, **kwargs) -> bool: return any(self._range) def round(self, decimals: int=0) -> Self | Index: if decimals >= 0: return self.copy() elif self.start % 10 ** (-decimals) == 0 and self.step % 10 ** (-decimals) == 0: return self.copy() else: return super().round(decimals=decimals) def _cmp_method(self, other, op): if isinstance(other, RangeIndex) and self._range == other._range: return super()._cmp_method(self, op) return super()._cmp_method(other, op) def _arith_method(self, other, op): if isinstance(other, ABCTimedeltaIndex): return NotImplemented elif isinstance(other, (timedelta, np.timedelta64)): return super()._arith_method(other, op) elif lib.is_np_dtype(getattr(other, 'dtype', None), 'm'): return super()._arith_method(other, op) if op in [operator.pow, ops.rpow, operator.mod, ops.rmod, operator.floordiv, ops.rfloordiv, divmod, ops.rdivmod]: return super()._arith_method(other, op) step: Callable | None = None if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]: step = op right = extract_array(other, extract_numpy=True, extract_range=True) left = self try: if step: with np.errstate(all='ignore'): rstep = step(left.step, right) if not is_integer(rstep) or not rstep: raise ValueError else: rstep = -left.step if op == ops.rsub else left.step with np.errstate(all='ignore'): rstart = op(left.start, right) rstop = op(left.stop, right) res_name = ops.get_op_result_name(self, other) result = type(self)(rstart, rstop, rstep, name=res_name) if not all((is_integer(x) for x in [rstart, rstop, rstep])): result = result.astype('float64') return result except (ValueError, TypeError, ZeroDivisionError): return super()._arith_method(other, op) def __abs__(self) -> Self | Index: if len(self) == 0 or self.min() >= 0: return self.copy() elif self.max() <= 0: return -self else: return super().__abs__() def __neg__(self) -> Self: rng = range(-self.start, -self.stop, -self.step) return self._simple_new(rng, name=self.name) def __pos__(self) -> Self: return self.copy() def __invert__(self) -> Self: if len(self) == 0: return self.copy() rng = range(~self.start, ~self.stop, -self.step) return self._simple_new(rng, name=self.name) def take(self, indices, axis: Axis=0, allow_fill: bool=True, fill_value=None, **kwargs) -> Self | Index: if kwargs: nv.validate_take((), kwargs) if is_scalar(indices): raise TypeError('Expected indices to be array-like') indices = ensure_platform_int(indices) self._maybe_disallow_fill(allow_fill, fill_value, indices) if len(indices) == 0: return type(self)(_empty_range, name=self.name) else: ind_max = indices.max() if ind_max >= len(self): raise IndexError(f'index {ind_max} is out of bounds for axis 0 with size {len(self)}') ind_min = indices.min() if ind_min < -len(self): raise IndexError(f'index {ind_min} is out of bounds for axis 0 with size {len(self)}') taken = indices.astype(self.dtype, casting='safe') if ind_min < 0: taken %= len(self) if self.step != 1: taken *= self.step if self.start != 0: taken += self.start return self._shallow_copy(taken, name=self.name) def value_counts(self, normalize: bool=False, sort: bool=True, ascending: bool=False, bins=None, dropna: bool=True) -> Series: from pandas import Series if bins is not None: return super().value_counts(normalize=normalize, sort=sort, ascending=ascending, bins=bins, dropna=dropna) name = 'proportion' if normalize else 'count' data: npt.NDArray[np.floating] | npt.NDArray[np.signedinteger] = np.ones(len(self), dtype=np.int64) if normalize: data = data / len(self) return Series(data, index=self.copy(), name=name) def searchsorted(self, value, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: if side not in {'left', 'right'} or sorter is not None: return super().searchsorted(value=value, side=side, sorter=sorter) was_scalar = False if is_scalar(value): was_scalar = True array_value = np.array([value]) else: array_value = np.asarray(value) if array_value.dtype.kind not in 'iu': return super().searchsorted(value=value, side=side, sorter=sorter) if (flip := (self.step < 0)): rng = self._range[::-1] start = rng.start step = rng.step shift = side == 'right' else: start = self.start step = self.step shift = side == 'left' result = (array_value - start - int(shift)) // step + 1 if flip: result = len(self) - result result = np.maximum(np.minimum(result, len(self)), 0) if was_scalar: return np.intp(result.item()) return result.astype(np.intp, copy=False) # File: pandas-main/pandas/core/indexes/timedeltas.py """""" from __future__ import annotations from typing import TYPE_CHECKING from pandas._libs import index as libindex, lib from pandas._libs.tslibs import Resolution, Timedelta, to_offset from pandas.core.dtypes.common import is_scalar, pandas_dtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.arrays.timedeltas import TimedeltaArray import pandas.core.common as com from pandas.core.indexes.base import Index, maybe_extract_name from pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin from pandas.core.indexes.extension import inherit_names if TYPE_CHECKING: from pandas._libs import NaTType from pandas._typing import DtypeObj @inherit_names(['__neg__', '__pos__', '__abs__', 'total_seconds', 'round', 'floor', 'ceil'] + TimedeltaArray._field_ops, TimedeltaArray, wrap=True) @inherit_names(['components', 'to_pytimedelta', 'sum', 'std', 'median'], TimedeltaArray) class TimedeltaIndex(DatetimeTimedeltaMixin): _typ = 'timedeltaindex' _data_cls = TimedeltaArray @property def _engine_type(self) -> type[libindex.TimedeltaEngine]: return libindex.TimedeltaEngine _data: TimedeltaArray _get_string_slice = Index._get_string_slice @property def _resolution_obj(self) -> Resolution | None: return self._data._resolution_obj def __new__(cls, data=None, freq=lib.no_default, dtype=None, copy: bool=False, name=None): name = maybe_extract_name(name, data, cls) if is_scalar(data): cls._raise_scalar_data_error(data) if dtype is not None: dtype = pandas_dtype(dtype) if isinstance(data, TimedeltaArray) and freq is lib.no_default and (dtype is None or dtype == data.dtype): if copy: data = data.copy() return cls._simple_new(data, name=name) if isinstance(data, TimedeltaIndex) and freq is lib.no_default and (name is None) and (dtype is None or dtype == data.dtype): if copy: return data.copy() else: return data._view() tdarr = TimedeltaArray._from_sequence_not_strict(data, freq=freq, unit=None, dtype=dtype, copy=copy) refs = None if not copy and isinstance(data, (ABCSeries, Index)): refs = data._references return cls._simple_new(tdarr, name=name, refs=refs) def _is_comparable_dtype(self, dtype: DtypeObj) -> bool: return lib.is_np_dtype(dtype, 'm') def get_loc(self, key): self._check_indexing_error(key) try: key = self._data._validate_scalar(key, unbox=False) except TypeError as err: raise KeyError(key) from err return Index.get_loc(self, key) def _parse_with_reso(self, label: str) -> tuple[Timedelta | NaTType, None]: parsed = Timedelta(label) return (parsed, None) def _parsed_string_to_bounds(self, reso, parsed: Timedelta): lbound = parsed.round(parsed.resolution_string) rbound = lbound + to_offset(parsed.resolution_string) - Timedelta(1, 'ns') return (lbound, rbound) @property def inferred_type(self) -> str: return 'timedelta64' def timedelta_range(start=None, end=None, periods: int | None=None, freq=None, name=None, closed=None, *, unit: str | None=None) -> TimedeltaIndex: if freq is None and com.any_none(periods, start, end): freq = 'D' freq = to_offset(freq) tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed, unit=unit) return TimedeltaIndex._simple_new(tdarr, name=name) # File: pandas-main/pandas/core/indexing.py from __future__ import annotations from contextlib import suppress import sys from typing import TYPE_CHECKING, Any, TypeVar, cast, final import warnings import numpy as np from pandas._libs.indexing import NDFrameIndexerBase from pandas._libs.lib import item_from_zerodim from pandas.compat import PYPY from pandas.errors import AbstractMethodError, ChainedAssignmentError, IndexingError, InvalidIndexError, LossySetitemError from pandas.errors.cow import _chained_assignment_msg from pandas.util._decorators import doc from pandas.core.dtypes.cast import can_hold_element, maybe_promote from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_hashable, is_integer, is_iterator, is_list_like, is_numeric_dtype, is_object_dtype, is_scalar, is_sequence from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.missing import construct_1d_array_from_inferred_fill_value, infer_fill_value, is_valid_na_for_dtype, isna, na_value_for_dtype from pandas.core import algorithms as algos import pandas.core.common as com from pandas.core.construction import array as pd_array, extract_array from pandas.core.indexers import check_array_indexer, is_list_like_indexer, is_scalar_indexer, length_of_indexer from pandas.core.indexes.api import Index, MultiIndex if TYPE_CHECKING: from collections.abc import Hashable, Sequence from pandas._typing import Axis, AxisInt, Self, npt from pandas import DataFrame, Series T = TypeVar('T') _NS = slice(None, None) _one_ellipsis_message = "indexer may only contain one '...' entry" class _IndexSlice: def __getitem__(self, arg): return arg IndexSlice = _IndexSlice() class IndexingMixin: @property def iloc(self) -> _iLocIndexer: return _iLocIndexer('iloc', self) @property def loc(self) -> _LocIndexer: return _LocIndexer('loc', self) @property def at(self) -> _AtIndexer: return _AtIndexer('at', self) @property def iat(self) -> _iAtIndexer: return _iAtIndexer('iat', self) class _LocationIndexer(NDFrameIndexerBase): _valid_types: str axis: AxisInt | None = None _takeable: bool @final def __call__(self, axis: Axis | None=None) -> Self: new_self = type(self)(self.name, self.obj) if axis is not None: axis_int_none = self.obj._get_axis_number(axis) else: axis_int_none = axis new_self.axis = axis_int_none return new_self def _get_setitem_indexer(self, key): if self.name == 'loc': self._ensure_listlike_indexer(key, axis=self.axis) if isinstance(key, tuple): for x in key: check_dict_or_set_indexers(x) if self.axis is not None: key = _tupleize_axis_indexer(self.ndim, self.axis, key) ax = self.obj._get_axis(0) if isinstance(ax, MultiIndex) and self.name != 'iloc' and is_hashable(key) and (not isinstance(key, slice)): with suppress(KeyError, InvalidIndexError): return ax.get_loc(key) if isinstance(key, tuple): with suppress(IndexingError): return self._convert_tuple(key) if isinstance(key, range): key = list(key) return self._convert_to_indexer(key, axis=0) @final def _maybe_mask_setitem_value(self, indexer, value): if isinstance(indexer, tuple) and len(indexer) == 2 and isinstance(value, (ABCSeries, ABCDataFrame)): (pi, icols) = indexer ndim = value.ndim if com.is_bool_indexer(pi) and len(value) == len(pi): newkey = pi.nonzero()[0] if is_scalar_indexer(icols, self.ndim - 1) and ndim == 1: if len(newkey) == 0: value = value.iloc[:0] else: value = self.obj.iloc._align_series(indexer, value) indexer = (newkey, icols) elif isinstance(icols, np.ndarray) and icols.dtype.kind == 'i' and (len(icols) == 1): if ndim == 1: value = self.obj.iloc._align_series(indexer, value) indexer = (newkey, icols) elif ndim == 2 and value.shape[1] == 1: if len(newkey) == 0: value = value.iloc[:0] else: value = self.obj.iloc._align_frame(indexer, value) indexer = (newkey, icols) elif com.is_bool_indexer(indexer): indexer = indexer.nonzero()[0] return (indexer, value) @final def _ensure_listlike_indexer(self, key, axis=None, value=None) -> None: column_axis = 1 if self.ndim != 2: return if isinstance(key, tuple) and len(key) > 1: if axis is None: axis = column_axis key = key[axis] if axis == column_axis and (not isinstance(self.obj.columns, MultiIndex)) and is_list_like_indexer(key) and (not com.is_bool_indexer(key)) and all((is_hashable(k) for k in key)): keys = self.obj.columns.union(key, sort=False) diff = Index(key).difference(self.obj.columns, sort=False) if len(diff): indexer = np.arange(len(keys), dtype=np.intp) indexer[len(self.obj.columns):] = -1 new_mgr = self.obj._mgr.reindex_indexer(keys, indexer=indexer, axis=0, only_slice=True, use_na_proxy=True) self.obj._mgr = new_mgr return self.obj._mgr = self.obj._mgr.reindex_axis(keys, axis=0, only_slice=True) @final def __setitem__(self, key, value) -> None: if not PYPY: if sys.getrefcount(self.obj) <= 2: warnings.warn(_chained_assignment_msg, ChainedAssignmentError, stacklevel=2) check_dict_or_set_indexers(key) if isinstance(key, tuple): key = (list(x) if is_iterator(x) else x for x in key) key = tuple((com.apply_if_callable(x, self.obj) for x in key)) else: maybe_callable = com.apply_if_callable(key, self.obj) key = self._raise_callable_usage(key, maybe_callable) indexer = self._get_setitem_indexer(key) self._has_valid_setitem_indexer(key) iloc = self if self.name == 'iloc' else self.obj.iloc iloc._setitem_with_indexer(indexer, value, self.name) def _validate_key(self, key, axis: AxisInt) -> None: raise AbstractMethodError(self) @final def _expand_ellipsis(self, tup: tuple) -> tuple: if any((x is Ellipsis for x in tup)): if tup.count(Ellipsis) > 1: raise IndexingError(_one_ellipsis_message) if len(tup) == self.ndim: i = tup.index(Ellipsis) new_key = tup[:i] + (_NS,) + tup[i + 1:] return new_key return tup @final def _validate_tuple_indexer(self, key: tuple) -> tuple: key = self._validate_key_length(key) key = self._expand_ellipsis(key) for (i, k) in enumerate(key): try: self._validate_key(k, i) except ValueError as err: raise ValueError(f'Location based indexing can only have [{self._valid_types}] types') from err return key @final def _is_nested_tuple_indexer(self, tup: tuple) -> bool: if any((isinstance(ax, MultiIndex) for ax in self.obj.axes)): return any((is_nested_tuple(tup, ax) for ax in self.obj.axes)) return False @final def _convert_tuple(self, key: tuple) -> tuple: self._validate_key_length(key) keyidx = [self._convert_to_indexer(k, axis=i) for (i, k) in enumerate(key)] return tuple(keyidx) @final def _validate_key_length(self, key: tuple) -> tuple: if len(key) > self.ndim: if key[0] is Ellipsis: key = key[1:] if Ellipsis in key: raise IndexingError(_one_ellipsis_message) return self._validate_key_length(key) raise IndexingError('Too many indexers') return key @final def _getitem_tuple_same_dim(self, tup: tuple): retval = self.obj start_val = self.ndim - len(tup) + 1 for (i, key) in enumerate(reversed(tup)): i = self.ndim - i - start_val if com.is_null_slice(key): continue retval = getattr(retval, self.name)._getitem_axis(key, axis=i) assert retval.ndim == self.ndim if retval is self.obj: retval = retval.copy(deep=False) return retval @final def _getitem_lowerdim(self, tup: tuple): if self.axis is not None: axis = self.obj._get_axis_number(self.axis) return self._getitem_axis(tup, axis=axis) if self._is_nested_tuple_indexer(tup): return self._getitem_nested_tuple(tup) ax0 = self.obj._get_axis(0) if isinstance(ax0, MultiIndex) and self.name != 'iloc' and (not any((isinstance(x, slice) for x in tup))): with suppress(IndexingError): return cast(_LocIndexer, self)._handle_lowerdim_multi_index_axis0(tup) tup = self._validate_key_length(tup) for (i, key) in enumerate(tup): if is_label_like(key): section = self._getitem_axis(key, axis=i) if section.ndim == self.ndim: new_key = tup[:i] + (_NS,) + tup[i + 1:] else: new_key = tup[:i] + tup[i + 1:] if len(new_key) == 1: new_key = new_key[0] if com.is_null_slice(new_key): return section return getattr(section, self.name)[new_key] raise IndexingError('not applicable') @final def _getitem_nested_tuple(self, tup: tuple): def _contains_slice(x: object) -> bool: if isinstance(x, tuple): return any((isinstance(v, slice) for v in x)) elif isinstance(x, slice): return True return False for key in tup: check_dict_or_set_indexers(key) if len(tup) > self.ndim: if self.name != 'loc': raise ValueError('Too many indices') if all((is_hashable(x) and (not _contains_slice(x)) or com.is_null_slice(x) for x in tup)): with suppress(IndexingError): return cast(_LocIndexer, self)._handle_lowerdim_multi_index_axis0(tup) elif isinstance(self.obj, ABCSeries) and any((isinstance(k, tuple) for k in tup)): raise IndexingError('Too many indexers') axis = self.axis or 0 return self._getitem_axis(tup, axis=axis) obj = self.obj axis = len(tup) - 1 for key in reversed(tup): if com.is_null_slice(key): axis -= 1 continue obj = getattr(obj, self.name)._getitem_axis(key, axis=axis) axis -= 1 if is_scalar(obj) or not hasattr(obj, 'ndim'): break return obj def _convert_to_indexer(self, key, axis: AxisInt): raise AbstractMethodError(self) def _raise_callable_usage(self, key: Any, maybe_callable: T) -> T: if self.name == 'iloc' and callable(key) and isinstance(maybe_callable, tuple): raise ValueError('Returning a tuple from a callable with iloc is not allowed.') return maybe_callable @final def __getitem__(self, key): check_dict_or_set_indexers(key) if type(key) is tuple: key = (list(x) if is_iterator(x) else x for x in key) key = tuple((com.apply_if_callable(x, self.obj) for x in key)) if self._is_scalar_access(key): return self.obj._get_value(*key, takeable=self._takeable) return self._getitem_tuple(key) else: axis = self.axis or 0 maybe_callable = com.apply_if_callable(key, self.obj) maybe_callable = self._raise_callable_usage(key, maybe_callable) return self._getitem_axis(maybe_callable, axis=axis) def _is_scalar_access(self, key: tuple): raise NotImplementedError def _getitem_tuple(self, tup: tuple): raise AbstractMethodError(self) def _getitem_axis(self, key, axis: AxisInt): raise NotImplementedError def _has_valid_setitem_indexer(self, indexer) -> bool: raise AbstractMethodError(self) @final def _getbool_axis(self, key, axis: AxisInt): labels = self.obj._get_axis(axis) key = check_bool_indexer(labels, key) inds = key.nonzero()[0] return self.obj.take(inds, axis=axis) @doc(IndexingMixin.loc) class _LocIndexer(_LocationIndexer): _takeable: bool = False _valid_types = 'labels (MUST BE IN THE INDEX), slices of labels (BOTH endpoints included! Can be slices of integers if the index is integers), listlike of labels, boolean' @doc(_LocationIndexer._validate_key) def _validate_key(self, key, axis: Axis) -> None: ax = self.obj._get_axis(axis) if isinstance(key, bool) and (not (is_bool_dtype(ax.dtype) or ax.dtype.name == 'boolean' or (isinstance(ax, MultiIndex) and is_bool_dtype(ax.get_level_values(0).dtype)))): raise KeyError(f'{key}: boolean label can not be used without a boolean index') if isinstance(key, slice) and (isinstance(key.start, bool) or isinstance(key.stop, bool)): raise TypeError(f'{key}: boolean values can not be used in a slice') def _has_valid_setitem_indexer(self, indexer) -> bool: return True def _is_scalar_access(self, key: tuple) -> bool: if len(key) != self.ndim: return False for (i, k) in enumerate(key): if not is_scalar(k): return False ax = self.obj.axes[i] if isinstance(ax, MultiIndex): return False if isinstance(k, str) and ax._supports_partial_string_indexing: return False if not ax._index_as_unique: return False return True def _multi_take_opportunity(self, tup: tuple) -> bool: if not all((is_list_like_indexer(x) for x in tup)): return False return not any((com.is_bool_indexer(x) for x in tup)) def _multi_take(self, tup: tuple): d = {axis: self._get_listlike_indexer(key, axis) for (key, axis) in zip(tup, self.obj._AXIS_ORDERS)} return self.obj._reindex_with_indexers(d, allow_dups=True) def _getitem_iterable(self, key, axis: AxisInt): self._validate_key(key, axis) (keyarr, indexer) = self._get_listlike_indexer(key, axis) return self.obj._reindex_with_indexers({axis: [keyarr, indexer]}, allow_dups=True) def _getitem_tuple(self, tup: tuple): with suppress(IndexingError): tup = self._expand_ellipsis(tup) return self._getitem_lowerdim(tup) tup = self._validate_tuple_indexer(tup) if self._multi_take_opportunity(tup): return self._multi_take(tup) return self._getitem_tuple_same_dim(tup) def _get_label(self, label, axis: AxisInt): return self.obj.xs(label, axis=axis) def _handle_lowerdim_multi_index_axis0(self, tup: tuple): axis = self.axis or 0 try: return self._get_label(tup, axis=axis) except KeyError as ek: if self.ndim < len(tup) <= self.obj.index.nlevels: raise ek raise IndexingError('No label returned') from ek def _getitem_axis(self, key, axis: AxisInt): key = item_from_zerodim(key) if is_iterator(key): key = list(key) if key is Ellipsis: key = slice(None) labels = self.obj._get_axis(axis) if isinstance(key, tuple) and isinstance(labels, MultiIndex): key = tuple(key) if isinstance(key, slice): self._validate_key(key, axis) return self._get_slice_axis(key, axis=axis) elif com.is_bool_indexer(key): return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): if not (isinstance(key, tuple) and isinstance(labels, MultiIndex)): if hasattr(key, 'ndim') and key.ndim > 1: raise ValueError('Cannot index with multidimensional key') return self._getitem_iterable(key, axis=axis) if is_nested_tuple(key, labels): locs = labels.get_locs(key) indexer: list[slice | npt.NDArray[np.intp]] = [slice(None)] * self.ndim indexer[axis] = locs return self.obj.iloc[tuple(indexer)] self._validate_key(key, axis) return self._get_label(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: AxisInt): obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop, slice_obj.step) if isinstance(indexer, slice): return self.obj._slice(indexer, axis=axis) else: return self.obj.take(indexer, axis=axis) def _convert_to_indexer(self, key, axis: AxisInt): labels = self.obj._get_axis(axis) if isinstance(key, slice): return labels._convert_slice_indexer(key, kind='loc') if isinstance(key, tuple) and (not isinstance(labels, MultiIndex)) and (self.ndim < 2) and (len(key) > 1): raise IndexingError('Too many indexers') contains_slice = False if isinstance(key, tuple): contains_slice = any((isinstance(v, slice) for v in key)) if is_scalar(key) or (isinstance(labels, MultiIndex) and is_hashable(key) and (not contains_slice)): try: return labels.get_loc(key) except LookupError: if isinstance(key, tuple) and isinstance(labels, MultiIndex): if len(key) == labels.nlevels: return {'key': key} raise except InvalidIndexError: if not isinstance(labels, MultiIndex): raise except ValueError: if not is_integer(key): raise return {'key': key} if is_nested_tuple(key, labels): if self.ndim == 1 and any((isinstance(k, tuple) for k in key)): raise IndexingError('Too many indexers') return labels.get_locs(key) elif is_list_like_indexer(key): if is_iterator(key): key = list(key) if com.is_bool_indexer(key): key = check_bool_indexer(labels, key) return key else: return self._get_listlike_indexer(key, axis)[1] else: try: return labels.get_loc(key) except LookupError: if not is_list_like_indexer(key): return {'key': key} raise def _get_listlike_indexer(self, key, axis: AxisInt): ax = self.obj._get_axis(axis) axis_name = self.obj._get_axis_name(axis) (keyarr, indexer) = ax._get_indexer_strict(key, axis_name) return (keyarr, indexer) @doc(IndexingMixin.iloc) class _iLocIndexer(_LocationIndexer): _valid_types = 'integer, integer slice (START point is INCLUDED, END point is EXCLUDED), listlike of integers, boolean array' _takeable = True def _validate_key(self, key, axis: AxisInt) -> None: if com.is_bool_indexer(key): if hasattr(key, 'index') and isinstance(key.index, Index): if key.index.inferred_type == 'integer': raise NotImplementedError('iLocation based boolean indexing on an integer type is not available') raise ValueError('iLocation based boolean indexing cannot use an indexable as a mask') return if isinstance(key, slice): return elif is_integer(key): self._validate_integer(key, axis) elif isinstance(key, tuple): raise IndexingError('Too many indexers') elif is_list_like_indexer(key): if isinstance(key, ABCSeries): arr = key._values elif is_array_like(key): arr = key else: arr = np.array(key) len_axis = len(self.obj._get_axis(axis)) if not is_numeric_dtype(arr.dtype): raise IndexError(f'.iloc requires numeric indexers, got {arr}') if len(arr) and (arr.max() >= len_axis or arr.min() < -len_axis): raise IndexError('positional indexers are out-of-bounds') else: raise ValueError(f'Can only index by location with a [{self._valid_types}]') def _has_valid_setitem_indexer(self, indexer) -> bool: if isinstance(indexer, dict): raise IndexError('iloc cannot enlarge its target object') if isinstance(indexer, ABCDataFrame): raise TypeError('DataFrame indexer for .iloc is not supported. Consider using .loc with a DataFrame indexer for automatic alignment.') if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) for (ax, i) in zip(self.obj.axes, indexer): if isinstance(i, slice): pass elif is_list_like_indexer(i): pass elif is_integer(i): if i >= len(ax): raise IndexError('iloc cannot enlarge its target object') elif isinstance(i, dict): raise IndexError('iloc cannot enlarge its target object') return True def _is_scalar_access(self, key: tuple) -> bool: if len(key) != self.ndim: return False return all((is_integer(k) for k in key)) def _validate_integer(self, key: int | np.integer, axis: AxisInt) -> None: len_axis = len(self.obj._get_axis(axis)) if key >= len_axis or key < -len_axis: raise IndexError('single positional indexer is out-of-bounds') def _getitem_tuple(self, tup: tuple): tup = self._validate_tuple_indexer(tup) with suppress(IndexingError): return self._getitem_lowerdim(tup) return self._getitem_tuple_same_dim(tup) def _get_list_axis(self, key, axis: AxisInt): try: return self.obj.take(key, axis=axis) except IndexError as err: raise IndexError('positional indexers are out-of-bounds') from err def _getitem_axis(self, key, axis: AxisInt): if key is Ellipsis: key = slice(None) elif isinstance(key, ABCDataFrame): raise IndexError('DataFrame indexer is not allowed for .iloc\nConsider using .loc for automatic alignment.') if isinstance(key, slice): return self._get_slice_axis(key, axis=axis) if is_iterator(key): key = list(key) if isinstance(key, list): key = np.asarray(key) if com.is_bool_indexer(key): self._validate_key(key, axis) return self._getbool_axis(key, axis=axis) elif is_list_like_indexer(key): return self._get_list_axis(key, axis=axis) else: key = item_from_zerodim(key) if not is_integer(key): raise TypeError('Cannot index by location index with a non-integer key') self._validate_integer(key, axis) return self.obj._ixs(key, axis=axis) def _get_slice_axis(self, slice_obj: slice, axis: AxisInt): obj = self.obj if not need_slice(slice_obj): return obj.copy(deep=False) labels = obj._get_axis(axis) labels._validate_positional_slice(slice_obj) return self.obj._slice(slice_obj, axis=axis) def _convert_to_indexer(self, key: T, axis: AxisInt) -> T: return key def _get_setitem_indexer(self, key): if is_iterator(key): key = list(key) if self.axis is not None: key = _tupleize_axis_indexer(self.ndim, self.axis, key) return key def _setitem_with_indexer(self, indexer, value, name: str='iloc') -> None: info_axis = self.obj._info_axis_number take_split_path = not self.obj._mgr.is_single_block if not take_split_path and isinstance(value, ABCDataFrame): take_split_path = not value._mgr.is_single_block if not take_split_path and len(self.obj._mgr.blocks) and (self.ndim > 1): val = list(value.values()) if isinstance(value, dict) else value arr = self.obj._mgr.blocks[0].values take_split_path = not can_hold_element(arr, extract_array(val, extract_numpy=True)) if isinstance(indexer, tuple) and len(indexer) == len(self.obj.axes): for (i, ax) in zip(indexer, self.obj.axes): if isinstance(ax, MultiIndex) and (not (is_integer(i) or com.is_null_slice(i))): take_split_path = True break if isinstance(indexer, tuple): nindexer = [] for (i, idx) in enumerate(indexer): if isinstance(idx, dict): (key, _) = convert_missing_indexer(idx) if self.ndim > 1 and i == info_axis: if not len(self.obj): if not is_list_like_indexer(value): raise ValueError('cannot set a frame with no defined index and a scalar') self.obj[key] = value return if com.is_null_slice(indexer[0]): self.obj[key] = value return elif is_array_like(value): arr = extract_array(value, extract_numpy=True) taker = -1 * np.ones(len(self.obj), dtype=np.intp) empty_value = algos.take_nd(arr, taker) if not isinstance(value, ABCSeries): if isinstance(arr, np.ndarray) and arr.ndim == 1 and (len(arr) == 1): arr = arr[0, ...] empty_value[indexer[0]] = arr self.obj[key] = empty_value return self.obj[key] = empty_value elif not is_list_like(value): self.obj[key] = construct_1d_array_from_inferred_fill_value(value, len(self.obj)) else: self.obj[key] = infer_fill_value(value) new_indexer = convert_from_missing_indexer_tuple(indexer, self.obj.axes) self._setitem_with_indexer(new_indexer, value, name) return index = self.obj._get_axis(i) labels = index.insert(len(index), key) taker = np.arange(len(index) + 1, dtype=np.intp) taker[-1] = -1 reindexers = {i: (labels, taker)} new_obj = self.obj._reindex_with_indexers(reindexers, allow_dups=True) self.obj._mgr = new_obj._mgr nindexer.append(labels.get_loc(key)) else: nindexer.append(idx) indexer = tuple(nindexer) else: (indexer, missing) = convert_missing_indexer(indexer) if missing: self._setitem_with_indexer_missing(indexer, value) return if name == 'loc': (indexer, value) = self._maybe_mask_setitem_value(indexer, value) if take_split_path: self._setitem_with_indexer_split_path(indexer, value, name) else: self._setitem_single_block(indexer, value, name) def _setitem_with_indexer_split_path(self, indexer, value, name: str): assert self.ndim == 2 if not isinstance(indexer, tuple): indexer = _tuplify(self.ndim, indexer) if len(indexer) > self.ndim: raise IndexError('too many indices for array') if isinstance(indexer[0], np.ndarray) and indexer[0].ndim > 2: raise ValueError('Cannot set values with ndim > 2') if isinstance(value, ABCSeries) and name != 'iloc' or isinstance(value, dict): from pandas import Series value = self._align_series(indexer, Series(value)) info_axis = indexer[1] ilocs = self._ensure_iterable_column_indexer(info_axis) pi = indexer[0] lplane_indexer = length_of_indexer(pi, self.obj.index) if is_list_like_indexer(value) and getattr(value, 'ndim', 1) > 0: if isinstance(value, ABCDataFrame): self._setitem_with_indexer_frame_value(indexer, value, name) elif np.ndim(value) == 2: self._setitem_with_indexer_2d_value(indexer, value) elif len(ilocs) == 1 and lplane_indexer == len(value) and (not is_scalar(pi)): self._setitem_single_column(ilocs[0], value, pi) elif len(ilocs) == 1 and 0 != lplane_indexer != len(value): if len(value) == 1 and (not is_integer(info_axis)): return self._setitem_with_indexer((pi, info_axis[0]), value[0]) raise ValueError('Must have equal len keys and value when setting with an iterable') elif lplane_indexer == 0 and len(value) == len(self.obj.index): pass elif self._is_scalar_access(indexer) and is_object_dtype(self.obj.dtypes._values[ilocs[0]]): self._setitem_single_column(indexer[1], value, pi) elif len(ilocs) == len(value): for (loc, v) in zip(ilocs, value): self._setitem_single_column(loc, v, pi) elif len(ilocs) == 1 and com.is_null_slice(pi) and (len(self.obj) == 0): self._setitem_single_column(ilocs[0], value, pi) else: raise ValueError('Must have equal len keys and value when setting with an iterable') else: for loc in ilocs: self._setitem_single_column(loc, value, pi) def _setitem_with_indexer_2d_value(self, indexer, value) -> None: pi = indexer[0] ilocs = self._ensure_iterable_column_indexer(indexer[1]) if not is_array_like(value): value = np.array(value, dtype=object) if len(ilocs) != value.shape[1]: raise ValueError('Must have equal len keys and value when setting with an ndarray') for (i, loc) in enumerate(ilocs): value_col = value[:, i] if is_object_dtype(value_col.dtype): value_col = value_col.tolist() self._setitem_single_column(loc, value_col, pi) def _setitem_with_indexer_frame_value(self, indexer, value: DataFrame, name: str) -> None: ilocs = self._ensure_iterable_column_indexer(indexer[1]) sub_indexer = list(indexer) pi = indexer[0] multiindex_indexer = isinstance(self.obj.columns, MultiIndex) unique_cols = value.columns.is_unique if name == 'iloc': for (i, loc) in enumerate(ilocs): val = value.iloc[:, i] self._setitem_single_column(loc, val, pi) elif not unique_cols and value.columns.equals(self.obj.columns): for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series(tuple(sub_indexer), value.iloc[:, loc], multiindex_indexer) else: val = np.nan self._setitem_single_column(loc, val, pi) elif not unique_cols: raise ValueError('Setting with non-unique columns is not allowed.') else: for loc in ilocs: item = self.obj.columns[loc] if item in value: sub_indexer[1] = item val = self._align_series(tuple(sub_indexer), value[item], multiindex_indexer, using_cow=True) else: val = np.nan self._setitem_single_column(loc, val, pi) def _setitem_single_column(self, loc: int, value, plane_indexer) -> None: pi = plane_indexer is_full_setter = com.is_null_slice(pi) or com.is_full_slice(pi, len(self.obj)) is_null_setter = com.is_empty_slice(pi) or (is_array_like(pi) and len(pi) == 0) if is_null_setter: return elif is_full_setter: try: self.obj._mgr.column_setitem(loc, plane_indexer, value, inplace_only=True) except (ValueError, TypeError, LossySetitemError) as exc: dtype = self.obj.dtypes.iloc[loc] if dtype not in (np.void, object) and (not self.obj.empty): raise TypeError(f"Invalid value '{value}' for dtype '{dtype}'") from exc self.obj.isetitem(loc, value) else: dtype = self.obj.dtypes.iloc[loc] if dtype == np.void: self.obj.iloc[:, loc] = construct_1d_array_from_inferred_fill_value(value, len(self.obj)) self.obj._mgr.column_setitem(loc, plane_indexer, value) def _setitem_single_block(self, indexer, value, name: str) -> None: from pandas import Series if isinstance(value, ABCSeries) and name != 'iloc' or isinstance(value, dict): value = self._align_series(indexer, Series(value)) info_axis = self.obj._info_axis_number item_labels = self.obj._get_axis(info_axis) if isinstance(indexer, tuple): if self.ndim == len(indexer) == 2 and is_integer(indexer[1]) and com.is_null_slice(indexer[0]): col = item_labels[indexer[info_axis]] if len(item_labels.get_indexer_for([col])) == 1: loc = item_labels.get_loc(col) self._setitem_single_column(loc, value, indexer[0]) return indexer = maybe_convert_ix(*indexer) if isinstance(value, ABCDataFrame) and name != 'iloc': value = self._align_frame(indexer, value)._values self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value) def _setitem_with_indexer_missing(self, indexer, value): from pandas import Series if self.ndim == 1: index = self.obj.index new_index = index.insert(len(index), indexer) if index.is_unique: new_indexer = index.get_indexer(new_index[-1:]) if (new_indexer != -1).any(): return self._setitem_with_indexer(new_indexer, value, 'loc') if not is_scalar(value): new_dtype = None elif is_valid_na_for_dtype(value, self.obj.dtype): if not is_object_dtype(self.obj.dtype): value = na_value_for_dtype(self.obj.dtype, compat=False) new_dtype = maybe_promote(self.obj.dtype, value)[0] elif isna(value): new_dtype = None elif not self.obj.empty and (not is_object_dtype(self.obj.dtype)): curr_dtype = self.obj.dtype curr_dtype = getattr(curr_dtype, 'numpy_dtype', curr_dtype) new_dtype = maybe_promote(curr_dtype, value)[0] else: new_dtype = None new_values = Series([value], dtype=new_dtype)._values if len(self.obj._values): new_values = concat_compat([self.obj._values, new_values]) self.obj._mgr = self.obj._constructor(new_values, index=new_index, name=self.obj.name)._mgr elif self.ndim == 2: if not len(self.obj.columns): raise ValueError('cannot set a frame with no defined columns') has_dtype = hasattr(value, 'dtype') if isinstance(value, ABCSeries): value = value.reindex(index=self.obj.columns) value.name = indexer elif isinstance(value, dict): value = Series(value, index=self.obj.columns, name=indexer, dtype=object) else: if is_list_like_indexer(value): if len(value) != len(self.obj.columns): raise ValueError('cannot set a row with mismatched columns') value = Series(value, index=self.obj.columns, name=indexer) if not len(self.obj): df = value.to_frame().T idx = self.obj.index if isinstance(idx, MultiIndex): name = idx.names else: name = idx.name df.index = Index([indexer], name=name) if not has_dtype: df = df.infer_objects() self.obj._mgr = df._mgr else: self.obj._mgr = self.obj._append(value)._mgr def _ensure_iterable_column_indexer(self, column_indexer): ilocs: Sequence[int | np.integer] | np.ndarray | range if is_integer(column_indexer): ilocs = [column_indexer] elif isinstance(column_indexer, slice): ilocs = range(len(self.obj.columns))[column_indexer] elif isinstance(column_indexer, np.ndarray) and column_indexer.dtype.kind == 'b': ilocs = np.arange(len(column_indexer))[column_indexer] else: ilocs = column_indexer return ilocs def _align_series(self, indexer, ser: Series, multiindex_indexer: bool=False, using_cow: bool=False): if isinstance(indexer, (slice, np.ndarray, list, Index)): indexer = (indexer,) if isinstance(indexer, tuple): def ravel(i): return i.ravel() if isinstance(i, np.ndarray) else i indexer = tuple(map(ravel, indexer)) aligners = [not com.is_null_slice(idx) for idx in indexer] sum_aligners = sum(aligners) single_aligner = sum_aligners == 1 is_frame = self.ndim == 2 obj = self.obj if is_frame: single_aligner = single_aligner and aligners[0] if sum_aligners == self.ndim and all((is_sequence(_) for _ in indexer)): ser_values = ser.reindex(obj.axes[0][indexer[0]])._values if len(indexer) > 1 and (not multiindex_indexer): len_indexer = len(indexer[1]) ser_values = np.tile(ser_values, len_indexer).reshape(len_indexer, -1).T return ser_values for (i, idx) in enumerate(indexer): ax = obj.axes[i] if is_sequence(idx) or isinstance(idx, slice): if single_aligner and com.is_null_slice(idx): continue new_ix = ax[idx] if not is_list_like_indexer(new_ix): new_ix = Index([new_ix]) else: new_ix = Index(new_ix) if not len(new_ix) or ser.index.equals(new_ix): if using_cow: return ser return ser._values.copy() return ser.reindex(new_ix)._values elif single_aligner: ax = self.obj.axes[1] if ser.index.equals(ax) or not len(ax): return ser._values.copy() return ser.reindex(ax)._values elif is_integer(indexer) and self.ndim == 1: if is_object_dtype(self.obj.dtype): return ser ax = self.obj._get_axis(0) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values[indexer] elif is_integer(indexer): ax = self.obj._get_axis(1) if ser.index.equals(ax): return ser._values.copy() return ser.reindex(ax)._values raise ValueError('Incompatible indexer with Series') def _align_frame(self, indexer, df: DataFrame) -> DataFrame: is_frame = self.ndim == 2 if isinstance(indexer, tuple): (idx, cols) = (None, None) sindexers = [] for (i, ix) in enumerate(indexer): ax = self.obj.axes[i] if is_sequence(ix) or isinstance(ix, slice): if isinstance(ix, np.ndarray): ix = ix.reshape(-1) if idx is None: idx = ax[ix] elif cols is None: cols = ax[ix] else: break else: sindexers.append(i) if idx is not None and cols is not None: if df.index.equals(idx) and df.columns.equals(cols): val = df.copy() else: val = df.reindex(idx, columns=cols) return val elif (isinstance(indexer, slice) or is_list_like_indexer(indexer)) and is_frame: ax = self.obj.index[indexer] if df.index.equals(ax): val = df.copy() else: if isinstance(ax, MultiIndex) and isinstance(df.index, MultiIndex) and (ax.nlevels != df.index.nlevels): raise TypeError('cannot align on a multi-index with out specifying the join levels') val = df.reindex(index=ax) return val raise ValueError('Incompatible indexer with DataFrame') class _ScalarAccessIndexer(NDFrameIndexerBase): _takeable: bool def _convert_key(self, key): raise AbstractMethodError(self) def __getitem__(self, key): if not isinstance(key, tuple): if not is_list_like_indexer(key): key = (key,) else: raise ValueError('Invalid call for scalar access (getting)!') key = self._convert_key(key) return self.obj._get_value(*key, takeable=self._takeable) def __setitem__(self, key, value) -> None: if isinstance(key, tuple): key = tuple((com.apply_if_callable(x, self.obj) for x in key)) else: key = com.apply_if_callable(key, self.obj) if not isinstance(key, tuple): key = _tuplify(self.ndim, key) key = list(self._convert_key(key)) if len(key) != self.ndim: raise ValueError('Not enough indexers for scalar access (setting)!') self.obj._set_value(*key, value=value, takeable=self._takeable) @doc(IndexingMixin.at) class _AtIndexer(_ScalarAccessIndexer): _takeable = False def _convert_key(self, key): if self.ndim == 1 and len(key) > 1: key = (key,) return key @property def _axes_are_unique(self) -> bool: assert self.ndim == 2 return self.obj.index.is_unique and self.obj.columns.is_unique def __getitem__(self, key): if self.ndim == 2 and (not self._axes_are_unique): if not isinstance(key, tuple) or not all((is_scalar(x) for x in key)): raise ValueError('Invalid call for scalar access (getting)!') return self.obj.loc[key] return super().__getitem__(key) def __setitem__(self, key, value) -> None: if self.ndim == 2 and (not self._axes_are_unique): if not isinstance(key, tuple) or not all((is_scalar(x) for x in key)): raise ValueError('Invalid call for scalar access (setting)!') self.obj.loc[key] = value return return super().__setitem__(key, value) @doc(IndexingMixin.iat) class _iAtIndexer(_ScalarAccessIndexer): _takeable = True def _convert_key(self, key): for i in key: if not is_integer(i): raise ValueError('iAt based indexing can only have integer indexers') return key def _tuplify(ndim: int, loc: Hashable) -> tuple[Hashable | slice, ...]: _tup: list[Hashable | slice] _tup = [slice(None, None) for _ in range(ndim)] _tup[0] = loc return tuple(_tup) def _tupleize_axis_indexer(ndim: int, axis: AxisInt, key) -> tuple: new_key = [slice(None)] * ndim new_key[axis] = key return tuple(new_key) def check_bool_indexer(index: Index, key) -> np.ndarray: result = key if isinstance(key, ABCSeries) and (not key.index.equals(index)): indexer = result.index.get_indexer_for(index) if -1 in indexer: raise IndexingError('Unalignable boolean Series provided as indexer (index of the boolean Series and of the indexed object do not match).') result = result.take(indexer) if not isinstance(result.dtype, ExtensionDtype): return result.astype(bool)._values if is_object_dtype(key): result = np.asarray(result, dtype=bool) elif not is_array_like(result): result = pd_array(result, dtype=bool) return check_array_indexer(index, result) def convert_missing_indexer(indexer): if isinstance(indexer, dict): indexer = indexer['key'] if isinstance(indexer, bool): raise KeyError('cannot use a single bool to index into setitem') return (indexer, True) return (indexer, False) def convert_from_missing_indexer_tuple(indexer, axes): def get_indexer(_i, _idx): return axes[_i].get_loc(_idx['key']) if isinstance(_idx, dict) else _idx return tuple((get_indexer(_i, _idx) for (_i, _idx) in enumerate(indexer))) def maybe_convert_ix(*args): for arg in args: if not isinstance(arg, (np.ndarray, list, ABCSeries, Index)): return args return np.ix_(*args) def is_nested_tuple(tup, labels) -> bool: if not isinstance(tup, tuple): return False for k in tup: if is_list_like(k) or isinstance(k, slice): return isinstance(labels, MultiIndex) return False def is_label_like(key) -> bool: return not isinstance(key, slice) and (not is_list_like_indexer(key)) and (key is not Ellipsis) def need_slice(obj: slice) -> bool: return obj.start is not None or obj.stop is not None or (obj.step is not None and obj.step != 1) def check_dict_or_set_indexers(key) -> None: if isinstance(key, set) or (isinstance(key, tuple) and any((isinstance(x, set) for x in key))): raise TypeError('Passing a set as an indexer is not supported. Use a list instead.') if isinstance(key, dict) or (isinstance(key, tuple) and any((isinstance(x, dict) for x in key))): raise TypeError('Passing a dict as an indexer is not supported. Use a list instead.') # File: pandas-main/pandas/core/interchange/buffer.py from __future__ import annotations from typing import TYPE_CHECKING, Any from pandas.core.interchange.dataframe_protocol import Buffer, DlpackDeviceType if TYPE_CHECKING: import numpy as np import pyarrow as pa class PandasBuffer(Buffer): def __init__(self, x: np.ndarray, allow_copy: bool=True) -> None: if x.strides[0] and (not x.strides == (x.dtype.itemsize,)): if allow_copy: x = x.copy() else: raise RuntimeError('Exports cannot be zero-copy in the case of a non-contiguous buffer') self._x = x @property def bufsize(self) -> int: return self._x.size * self._x.dtype.itemsize @property def ptr(self) -> int: return self._x.__array_interface__['data'][0] def __dlpack__(self) -> Any: return self._x.__dlpack__() def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: return (DlpackDeviceType.CPU, None) def __repr__(self) -> str: return 'PandasBuffer(' + str({'bufsize': self.bufsize, 'ptr': self.ptr, 'device': self.__dlpack_device__()[0].name}) + ')' class PandasBufferPyarrow(Buffer): def __init__(self, buffer: pa.Buffer, *, length: int) -> None: self._buffer = buffer self._length = length @property def bufsize(self) -> int: return self._buffer.size @property def ptr(self) -> int: return self._buffer.address def __dlpack__(self) -> Any: raise NotImplementedError def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: return (DlpackDeviceType.CPU, None) def __repr__(self) -> str: return 'PandasBuffer[pyarrow](' + str({'bufsize': self.bufsize, 'ptr': self.ptr, 'device': 'CPU'}) + ')' # File: pandas-main/pandas/core/interchange/column.py from __future__ import annotations from typing import TYPE_CHECKING, Any import numpy as np from pandas._libs.lib import infer_dtype from pandas._libs.tslibs import iNaT from pandas.errors import NoBufferPresent from pandas.util._decorators import cache_readonly from pandas.core.dtypes.dtypes import BaseMaskedDtype import pandas as pd from pandas import ArrowDtype, DatetimeTZDtype from pandas.api.types import is_string_dtype from pandas.core.interchange.buffer import PandasBuffer, PandasBufferPyarrow from pandas.core.interchange.dataframe_protocol import Column, ColumnBuffers, ColumnNullType, DtypeKind from pandas.core.interchange.utils import ArrowCTypes, Endianness, dtype_to_arrow_c_fmt if TYPE_CHECKING: from pandas.core.interchange.dataframe_protocol import Buffer _NP_KINDS = {'i': DtypeKind.INT, 'u': DtypeKind.UINT, 'f': DtypeKind.FLOAT, 'b': DtypeKind.BOOL, 'U': DtypeKind.STRING, 'M': DtypeKind.DATETIME, 'm': DtypeKind.DATETIME} _NULL_DESCRIPTION = {DtypeKind.FLOAT: (ColumnNullType.USE_NAN, None), DtypeKind.DATETIME: (ColumnNullType.USE_SENTINEL, iNaT), DtypeKind.INT: (ColumnNullType.NON_NULLABLE, None), DtypeKind.UINT: (ColumnNullType.NON_NULLABLE, None), DtypeKind.BOOL: (ColumnNullType.NON_NULLABLE, None), DtypeKind.CATEGORICAL: (ColumnNullType.USE_SENTINEL, -1), DtypeKind.STRING: (ColumnNullType.USE_BYTEMASK, 0)} _NO_VALIDITY_BUFFER = {ColumnNullType.NON_NULLABLE: 'This column is non-nullable', ColumnNullType.USE_NAN: 'This column uses NaN as null', ColumnNullType.USE_SENTINEL: 'This column uses a sentinel value'} class PandasColumn(Column): def __init__(self, column: pd.Series, allow_copy: bool=True) -> None: if isinstance(column, pd.DataFrame): raise TypeError(f'Expected a Series, got a DataFrame. This likely happened because you called __dataframe__ on a DataFrame which, after converting column names to string, resulted in duplicated names: {column.columns}. Please rename these columns before using the interchange protocol.') if not isinstance(column, pd.Series): raise NotImplementedError(f'Columns of type {type(column)} not handled yet') self._col = column self._allow_copy = allow_copy def size(self) -> int: return self._col.size @property def offset(self) -> int: return 0 @cache_readonly def dtype(self) -> tuple[DtypeKind, int, str, str]: dtype = self._col.dtype if isinstance(dtype, pd.CategoricalDtype): codes = self._col.values.codes (_, bitwidth, c_arrow_dtype_f_str, _) = self._dtype_from_pandasdtype(codes.dtype) return (DtypeKind.CATEGORICAL, bitwidth, c_arrow_dtype_f_str, Endianness.NATIVE) elif is_string_dtype(dtype): if infer_dtype(self._col) in ('string', 'empty'): return (DtypeKind.STRING, 8, dtype_to_arrow_c_fmt(dtype), Endianness.NATIVE) raise NotImplementedError('Non-string object dtypes are not supported yet') else: return self._dtype_from_pandasdtype(dtype) def _dtype_from_pandasdtype(self, dtype) -> tuple[DtypeKind, int, str, str]: kind = _NP_KINDS.get(dtype.kind, None) if kind is None: raise ValueError(f'Data type {dtype} not supported by interchange protocol') if isinstance(dtype, ArrowDtype): byteorder = dtype.numpy_dtype.byteorder elif isinstance(dtype, DatetimeTZDtype): byteorder = dtype.base.byteorder elif isinstance(dtype, BaseMaskedDtype): byteorder = dtype.numpy_dtype.byteorder else: byteorder = dtype.byteorder if dtype == 'bool[pyarrow]': return (kind, dtype.itemsize, ArrowCTypes.BOOL, byteorder) return (kind, dtype.itemsize * 8, dtype_to_arrow_c_fmt(dtype), byteorder) @property def describe_categorical(self): if not self.dtype[0] == DtypeKind.CATEGORICAL: raise TypeError('describe_categorical only works on a column with categorical dtype!') return {'is_ordered': self._col.cat.ordered, 'is_dictionary': True, 'categories': PandasColumn(pd.Series(self._col.cat.categories))} @property def describe_null(self): if isinstance(self._col.dtype, BaseMaskedDtype): column_null_dtype = ColumnNullType.USE_BYTEMASK null_value = 1 return (column_null_dtype, null_value) if isinstance(self._col.dtype, ArrowDtype): if self._col.array._pa_array.chunks[0].buffers()[0] is None: return (ColumnNullType.NON_NULLABLE, None) return (ColumnNullType.USE_BITMASK, 0) kind = self.dtype[0] try: (null, value) = _NULL_DESCRIPTION[kind] except KeyError as err: raise NotImplementedError(f'Data type {kind} not yet supported') from err return (null, value) @cache_readonly def null_count(self) -> int: return self._col.isna().sum().item() @property def metadata(self) -> dict[str, pd.Index]: return {'pandas.index': self._col.index} def num_chunks(self) -> int: return 1 def get_chunks(self, n_chunks: int | None=None): if n_chunks and n_chunks > 1: size = len(self._col) step = size // n_chunks if size % n_chunks != 0: step += 1 for start in range(0, step * n_chunks, step): yield PandasColumn(self._col.iloc[start:start + step], self._allow_copy) else: yield self def get_buffers(self) -> ColumnBuffers: buffers: ColumnBuffers = {'data': self._get_data_buffer(), 'validity': None, 'offsets': None} try: buffers['validity'] = self._get_validity_buffer() except NoBufferPresent: pass try: buffers['offsets'] = self._get_offsets_buffer() except NoBufferPresent: pass return buffers def _get_data_buffer(self) -> tuple[Buffer, tuple[DtypeKind, int, str, str]]: buffer: Buffer if self.dtype[0] == DtypeKind.DATETIME: if len(self.dtype[2]) > 4: np_arr = self._col.dt.tz_convert(None).to_numpy() else: np_arr = self._col.to_numpy() buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy) dtype = (DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE) elif self.dtype[0] in (DtypeKind.INT, DtypeKind.UINT, DtypeKind.FLOAT, DtypeKind.BOOL): dtype = self.dtype arr = self._col.array if isinstance(self._col.dtype, ArrowDtype): arr = arr._pa_array.chunks[0] buffer = PandasBufferPyarrow(arr.buffers()[1], length=len(arr)) return (buffer, dtype) if isinstance(self._col.dtype, BaseMaskedDtype): np_arr = arr._data else: np_arr = arr._ndarray buffer = PandasBuffer(np_arr, allow_copy=self._allow_copy) elif self.dtype[0] == DtypeKind.CATEGORICAL: codes = self._col.values._codes buffer = PandasBuffer(codes, allow_copy=self._allow_copy) dtype = self._dtype_from_pandasdtype(codes.dtype) elif self.dtype[0] == DtypeKind.STRING: buf = self._col.to_numpy() b = bytearray() for obj in buf: if isinstance(obj, str): b.extend(obj.encode(encoding='utf-8')) buffer = PandasBuffer(np.frombuffer(b, dtype='uint8')) dtype = (DtypeKind.UINT, 8, ArrowCTypes.UINT8, Endianness.NATIVE) else: raise NotImplementedError(f'Data type {self._col.dtype} not handled yet') return (buffer, dtype) def _get_validity_buffer(self) -> tuple[Buffer, Any] | None: (null, invalid) = self.describe_null buffer: Buffer if isinstance(self._col.dtype, ArrowDtype): arr = self._col.array._pa_array.chunks[0] dtype = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, Endianness.NATIVE) if arr.buffers()[0] is None: return None buffer = PandasBufferPyarrow(arr.buffers()[0], length=len(arr)) return (buffer, dtype) if isinstance(self._col.dtype, BaseMaskedDtype): mask = self._col.array._mask buffer = PandasBuffer(mask) dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE) return (buffer, dtype) if self.dtype[0] == DtypeKind.STRING: buf = self._col.to_numpy() valid = invalid == 0 invalid = not valid mask = np.zeros(shape=(len(buf),), dtype=np.bool_) for (i, obj) in enumerate(buf): mask[i] = valid if isinstance(obj, str) else invalid buffer = PandasBuffer(mask) dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE) return (buffer, dtype) try: msg = f'{_NO_VALIDITY_BUFFER[null]} so does not have a separate mask' except KeyError as err: raise NotImplementedError('See self.describe_null') from err raise NoBufferPresent(msg) def _get_offsets_buffer(self) -> tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for (i, v) in enumerate(values): if isinstance(v, str): b = v.encode(encoding='utf-8') ptr += len(b) offsets[i + 1] = ptr buffer = PandasBuffer(offsets) dtype = (DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE) else: raise NoBufferPresent('This column has a fixed-length dtype so it does not have an offsets buffer') return (buffer, dtype) # File: pandas-main/pandas/core/interchange/dataframe.py from __future__ import annotations from collections import abc from typing import TYPE_CHECKING from pandas.core.interchange.column import PandasColumn from pandas.core.interchange.dataframe_protocol import DataFrame as DataFrameXchg from pandas.core.interchange.utils import maybe_rechunk if TYPE_CHECKING: from collections.abc import Iterable, Sequence from pandas import DataFrame, Index class PandasDataFrameXchg(DataFrameXchg): def __init__(self, df: DataFrame, allow_copy: bool=True) -> None: self._df = df.rename(columns=str) self._allow_copy = allow_copy for (i, _col) in enumerate(self._df.columns): rechunked = maybe_rechunk(self._df.iloc[:, i], allow_copy=allow_copy) if rechunked is not None: self._df.isetitem(i, rechunked) def __dataframe__(self, nan_as_null: bool=False, allow_copy: bool=True) -> PandasDataFrameXchg: return PandasDataFrameXchg(self._df, allow_copy) @property def metadata(self) -> dict[str, Index]: return {'pandas.index': self._df.index} def num_columns(self) -> int: return len(self._df.columns) def num_rows(self) -> int: return len(self._df) def num_chunks(self) -> int: return 1 def column_names(self) -> Index: return self._df.columns def get_column(self, i: int) -> PandasColumn: return PandasColumn(self._df.iloc[:, i], allow_copy=self._allow_copy) def get_column_by_name(self, name: str) -> PandasColumn: return PandasColumn(self._df[name], allow_copy=self._allow_copy) def get_columns(self) -> list[PandasColumn]: return [PandasColumn(self._df[name], allow_copy=self._allow_copy) for name in self._df.columns] def select_columns(self, indices: Sequence[int]) -> PandasDataFrameXchg: if not isinstance(indices, abc.Sequence): raise ValueError('`indices` is not a sequence') if not isinstance(indices, list): indices = list(indices) return PandasDataFrameXchg(self._df.iloc[:, indices], allow_copy=self._allow_copy) def select_columns_by_name(self, names: list[str]) -> PandasDataFrameXchg: if not isinstance(names, abc.Sequence): raise ValueError('`names` is not a sequence') if not isinstance(names, list): names = list(names) return PandasDataFrameXchg(self._df.loc[:, names], allow_copy=self._allow_copy) def get_chunks(self, n_chunks: int | None=None) -> Iterable[PandasDataFrameXchg]: if n_chunks and n_chunks > 1: size = len(self._df) step = size // n_chunks if size % n_chunks != 0: step += 1 for start in range(0, step * n_chunks, step): yield PandasDataFrameXchg(self._df.iloc[start:start + step, :], allow_copy=self._allow_copy) else: yield self # File: pandas-main/pandas/core/interchange/dataframe_protocol.py """""" from __future__ import annotations from abc import ABC, abstractmethod import enum from typing import TYPE_CHECKING, Any, TypedDict if TYPE_CHECKING: from collections.abc import Iterable, Sequence class DlpackDeviceType(enum.IntEnum): CPU = 1 CUDA = 2 CPU_PINNED = 3 OPENCL = 4 VULKAN = 7 METAL = 8 VPI = 9 ROCM = 10 class DtypeKind(enum.IntEnum): INT = 0 UINT = 1 FLOAT = 2 BOOL = 20 STRING = 21 DATETIME = 22 CATEGORICAL = 23 class ColumnNullType(enum.IntEnum): NON_NULLABLE = 0 USE_NAN = 1 USE_SENTINEL = 2 USE_BITMASK = 3 USE_BYTEMASK = 4 class ColumnBuffers(TypedDict): data: tuple[Buffer, Any] validity: tuple[Buffer, Any] | None offsets: tuple[Buffer, Any] | None class CategoricalDescription(TypedDict): is_ordered: bool is_dictionary: bool categories: Column | None class Buffer(ABC): @property @abstractmethod def bufsize(self) -> int: @property @abstractmethod def ptr(self) -> int: @abstractmethod def __dlpack__(self): raise NotImplementedError('__dlpack__') @abstractmethod def __dlpack_device__(self) -> tuple[DlpackDeviceType, int | None]: class Column(ABC): @abstractmethod def size(self) -> int: @property @abstractmethod def offset(self) -> int: @property @abstractmethod def dtype(self) -> tuple[DtypeKind, int, str, str]: @property @abstractmethod def describe_categorical(self) -> CategoricalDescription: @property @abstractmethod def describe_null(self) -> tuple[ColumnNullType, Any]: @property @abstractmethod def null_count(self) -> int | None: @property @abstractmethod def metadata(self) -> dict[str, Any]: @abstractmethod def num_chunks(self) -> int: @abstractmethod def get_chunks(self, n_chunks: int | None=None) -> Iterable[Column]: @abstractmethod def get_buffers(self) -> ColumnBuffers: class DataFrame(ABC): version = 0 @abstractmethod def __dataframe__(self, nan_as_null: bool=False, allow_copy: bool=True): @property @abstractmethod def metadata(self) -> dict[str, Any]: @abstractmethod def num_columns(self) -> int: @abstractmethod def num_rows(self) -> int | None: @abstractmethod def num_chunks(self) -> int: @abstractmethod def column_names(self) -> Iterable[str]: @abstractmethod def get_column(self, i: int) -> Column: @abstractmethod def get_column_by_name(self, name: str) -> Column: @abstractmethod def get_columns(self) -> Iterable[Column]: @abstractmethod def select_columns(self, indices: Sequence[int]) -> DataFrame: @abstractmethod def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: @abstractmethod def get_chunks(self, n_chunks: int | None=None) -> Iterable[DataFrame]: # File: pandas-main/pandas/core/interchange/from_dataframe.py from __future__ import annotations import ctypes import re from typing import Any, overload import numpy as np from pandas.compat._optional import import_optional_dependency import pandas as pd from pandas.core.interchange.dataframe_protocol import Buffer, Column, ColumnNullType, DataFrame as DataFrameXchg, DtypeKind from pandas.core.interchange.utils import ArrowCTypes, Endianness _NP_DTYPES: dict[DtypeKind, dict[int, Any]] = {DtypeKind.INT: {8: np.int8, 16: np.int16, 32: np.int32, 64: np.int64}, DtypeKind.UINT: {8: np.uint8, 16: np.uint16, 32: np.uint32, 64: np.uint64}, DtypeKind.FLOAT: {32: np.float32, 64: np.float64}, DtypeKind.BOOL: {1: bool, 8: bool}} def from_dataframe(df, allow_copy: bool=True) -> pd.DataFrame: if isinstance(df, pd.DataFrame): return df if not hasattr(df, '__dataframe__'): raise ValueError('`df` does not support __dataframe__') return _from_dataframe(df.__dataframe__(allow_copy=allow_copy), allow_copy=allow_copy) def _from_dataframe(df: DataFrameXchg, allow_copy: bool=True) -> pd.DataFrame: pandas_dfs = [] for chunk in df.get_chunks(): pandas_df = protocol_df_chunk_to_pandas(chunk) pandas_dfs.append(pandas_df) if not allow_copy and len(pandas_dfs) > 1: raise RuntimeError('To join chunks a copy is required which is forbidden by allow_copy=False') if not pandas_dfs: pandas_df = protocol_df_chunk_to_pandas(df) elif len(pandas_dfs) == 1: pandas_df = pandas_dfs[0] else: pandas_df = pd.concat(pandas_dfs, axis=0, ignore_index=True, copy=False) index_obj = df.metadata.get('pandas.index', None) if index_obj is not None: pandas_df.index = index_obj return pandas_df def protocol_df_chunk_to_pandas(df: DataFrameXchg) -> pd.DataFrame: columns: dict[str, Any] = {} buffers = [] for name in df.column_names(): if not isinstance(name, str): raise ValueError(f'Column {name} is not a string') if name in columns: raise ValueError(f'Column {name} is not unique') col = df.get_column_by_name(name) dtype = col.dtype[0] if dtype in (DtypeKind.INT, DtypeKind.UINT, DtypeKind.FLOAT, DtypeKind.BOOL): (columns[name], buf) = primitive_column_to_ndarray(col) elif dtype == DtypeKind.CATEGORICAL: (columns[name], buf) = categorical_column_to_series(col) elif dtype == DtypeKind.STRING: (columns[name], buf) = string_column_to_ndarray(col) elif dtype == DtypeKind.DATETIME: (columns[name], buf) = datetime_column_to_ndarray(col) else: raise NotImplementedError(f'Data type {dtype} not handled yet') buffers.append(buf) pandas_df = pd.DataFrame(columns) pandas_df.attrs['_INTERCHANGE_PROTOCOL_BUFFERS'] = buffers return pandas_df def primitive_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: buffers = col.get_buffers() (data_buff, data_dtype) = buffers['data'] data = buffer_to_ndarray(data_buff, data_dtype, offset=col.offset, length=col.size()) data = set_nulls(data, col, buffers['validity']) return (data, buffers) def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]: categorical = col.describe_categorical if not categorical['is_dictionary']: raise NotImplementedError('Non-dictionary categoricals not supported yet') cat_column = categorical['categories'] if hasattr(cat_column, '_col'): categories = np.array(cat_column._col) else: raise NotImplementedError("Interchanging categorical columns isn't supported yet, and our fallback of using the `col._col` attribute (a ndarray) failed.") buffers = col.get_buffers() (codes_buff, codes_dtype) = buffers['data'] codes = buffer_to_ndarray(codes_buff, codes_dtype, offset=col.offset, length=col.size()) if len(categories) > 0: values = categories[codes % len(categories)] else: values = codes cat = pd.Categorical(values, categories=categories, ordered=categorical['is_ordered']) data = pd.Series(cat) data = set_nulls(data, col, buffers['validity']) return (data, buffers) def string_column_to_ndarray(col: Column) -> tuple[np.ndarray, Any]: (null_kind, sentinel_val) = col.describe_null if null_kind not in (ColumnNullType.NON_NULLABLE, ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): raise NotImplementedError(f'{null_kind} null kind is not yet supported for string columns.') buffers = col.get_buffers() assert buffers['offsets'], 'String buffers must contain offsets' (data_buff, _) = buffers['data'] assert col.dtype[2] in (ArrowCTypes.STRING, ArrowCTypes.LARGE_STRING) data_dtype = (DtypeKind.UINT, 8, ArrowCTypes.UINT8, Endianness.NATIVE) data = buffer_to_ndarray(data_buff, data_dtype, offset=0, length=data_buff.bufsize) (offset_buff, offset_dtype) = buffers['offsets'] offsets = buffer_to_ndarray(offset_buff, offset_dtype, offset=col.offset, length=col.size() + 1) null_pos = None if null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): validity = buffers['validity'] if validity is not None: (valid_buff, valid_dtype) = validity null_pos = buffer_to_ndarray(valid_buff, valid_dtype, offset=col.offset, length=col.size()) if sentinel_val == 0: null_pos = ~null_pos str_list: list[None | float | str] = [None] * col.size() for i in range(col.size()): if null_pos is not None and null_pos[i]: str_list[i] = np.nan continue units = data[offsets[i]:offsets[i + 1]] str_bytes = bytes(units) string = str_bytes.decode(encoding='utf-8') str_list[i] = string return (np.asarray(str_list, dtype='object'), buffers) def parse_datetime_format_str(format_str, data) -> pd.Series | np.ndarray: timestamp_meta = re.match('ts([smun]):(.*)', format_str) if timestamp_meta: (unit, tz) = (timestamp_meta.group(1), timestamp_meta.group(2)) if unit != 's': unit += 's' data = data.astype(f'datetime64[{unit}]') if tz != '': data = pd.Series(data).dt.tz_localize('UTC').dt.tz_convert(tz) return data date_meta = re.match('td([Dm])', format_str) if date_meta: unit = date_meta.group(1) if unit == 'D': data = (data.astype(np.uint64) * (24 * 60 * 60)).astype('datetime64[s]') elif unit == 'm': data = data.astype('datetime64[ms]') else: raise NotImplementedError(f'Date unit is not supported: {unit}') return data raise NotImplementedError(f'DateTime kind is not supported: {format_str}') def datetime_column_to_ndarray(col: Column) -> tuple[np.ndarray | pd.Series, Any]: buffers = col.get_buffers() (_, col_bit_width, format_str, _) = col.dtype (dbuf, _) = buffers['data'] data = buffer_to_ndarray(dbuf, (DtypeKind.INT, col_bit_width, getattr(ArrowCTypes, f'INT{col_bit_width}'), Endianness.NATIVE), offset=col.offset, length=col.size()) data = parse_datetime_format_str(format_str, data) data = set_nulls(data, col, buffers['validity']) return (data, buffers) def buffer_to_ndarray(buffer: Buffer, dtype: tuple[DtypeKind, int, str, str], *, length: int, offset: int=0) -> np.ndarray: (kind, bit_width, _, _) = dtype column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None) if column_dtype is None: raise NotImplementedError(f'Conversion for {dtype} is not yet supported.') ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype) if bit_width == 1: assert length is not None, '`length` must be specified for a bit-mask buffer.' pa = import_optional_dependency('pyarrow') arr = pa.BooleanArray.from_buffers(pa.bool_(), length, [None, pa.foreign_buffer(buffer.ptr, length)], offset=offset) return np.asarray(arr) else: data_pointer = ctypes.cast(buffer.ptr + offset * bit_width // 8, ctypes.POINTER(ctypes_type)) if length > 0: return np.ctypeslib.as_array(data_pointer, shape=(length,)) return np.array([], dtype=ctypes_type) @overload def set_nulls(data: np.ndarray, col: Column, validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool=...) -> np.ndarray: ... @overload def set_nulls(data: pd.Series, col: Column, validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool=...) -> pd.Series: ... @overload def set_nulls(data: np.ndarray | pd.Series, col: Column, validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool=...) -> np.ndarray | pd.Series: ... def set_nulls(data: np.ndarray | pd.Series, col: Column, validity: tuple[Buffer, tuple[DtypeKind, int, str, str]] | None, allow_modify_inplace: bool=True) -> np.ndarray | pd.Series: if validity is None: return data (null_kind, sentinel_val) = col.describe_null null_pos = None if null_kind == ColumnNullType.USE_SENTINEL: null_pos = pd.Series(data) == sentinel_val elif null_kind in (ColumnNullType.USE_BITMASK, ColumnNullType.USE_BYTEMASK): assert validity, 'Expected to have a validity buffer for the mask' (valid_buff, valid_dtype) = validity null_pos = buffer_to_ndarray(valid_buff, valid_dtype, offset=col.offset, length=col.size()) if sentinel_val == 0: null_pos = ~null_pos elif null_kind in (ColumnNullType.NON_NULLABLE, ColumnNullType.USE_NAN): pass else: raise NotImplementedError(f'Null kind {null_kind} is not yet supported.') if null_pos is not None and np.any(null_pos): if not allow_modify_inplace: data = data.copy() try: data[null_pos] = None except TypeError: data = data.astype(float) data[null_pos] = None return data # File: pandas-main/pandas/core/interchange/utils.py """""" from __future__ import annotations import typing import numpy as np from pandas._libs import lib from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype, DatetimeTZDtype import pandas as pd if typing.TYPE_CHECKING: from pandas._typing import DtypeObj PYARROW_CTYPES = {'null': 'n', 'bool': 'b', 'uint8': 'C', 'uint16': 'S', 'uint32': 'I', 'uint64': 'L', 'int8': 'c', 'int16': 'S', 'int32': 'i', 'int64': 'l', 'halffloat': 'e', 'float': 'f', 'double': 'g', 'string': 'u', 'large_string': 'U', 'binary': 'z', 'time32[s]': 'tts', 'time32[ms]': 'ttm', 'time64[us]': 'ttu', 'time64[ns]': 'ttn', 'date32[day]': 'tdD', 'date64[ms]': 'tdm', 'timestamp[s]': 'tss:', 'timestamp[ms]': 'tsm:', 'timestamp[us]': 'tsu:', 'timestamp[ns]': 'tsn:', 'duration[s]': 'tDs', 'duration[ms]': 'tDm', 'duration[us]': 'tDu', 'duration[ns]': 'tDn'} class ArrowCTypes: NULL = 'n' BOOL = 'b' INT8 = 'c' UINT8 = 'C' INT16 = 's' UINT16 = 'S' INT32 = 'i' UINT32 = 'I' INT64 = 'l' UINT64 = 'L' FLOAT16 = 'e' FLOAT32 = 'f' FLOAT64 = 'g' STRING = 'u' LARGE_STRING = 'U' DATE32 = 'tdD' DATE64 = 'tdm' TIMESTAMP = 'ts{resolution}:{tz}' TIME = 'tt{resolution}' class Endianness: LITTLE = '<' BIG = '>' NATIVE = '=' NA = '|' def dtype_to_arrow_c_fmt(dtype: DtypeObj) -> str: if isinstance(dtype, CategoricalDtype): return ArrowCTypes.INT64 elif dtype == np.dtype('O'): return ArrowCTypes.STRING elif isinstance(dtype, ArrowDtype): import pyarrow as pa pa_type = dtype.pyarrow_dtype if pa.types.is_decimal(pa_type): return f'd:{pa_type.precision},{pa_type.scale}' elif pa.types.is_timestamp(pa_type) and pa_type.tz is not None: return f'ts{pa_type.unit[0]}:{pa_type.tz}' format_str = PYARROW_CTYPES.get(str(pa_type), None) if format_str is not None: return format_str format_str = getattr(ArrowCTypes, dtype.name.upper(), None) if format_str is not None: return format_str if isinstance(dtype, pd.StringDtype): return ArrowCTypes.STRING elif lib.is_np_dtype(dtype, 'M'): resolution = np.datetime_data(dtype)[0][0] return ArrowCTypes.TIMESTAMP.format(resolution=resolution, tz='') elif isinstance(dtype, DatetimeTZDtype): return ArrowCTypes.TIMESTAMP.format(resolution=dtype.unit[0], tz=dtype.tz) elif isinstance(dtype, pd.BooleanDtype): return ArrowCTypes.BOOL raise NotImplementedError(f'Conversion of {dtype} to Arrow C format string is not implemented.') def maybe_rechunk(series: pd.Series, *, allow_copy: bool) -> pd.Series | None: if not isinstance(series.dtype, pd.ArrowDtype): return None chunked_array = series.array._pa_array if len(chunked_array.chunks) == 1: return None if not allow_copy: raise RuntimeError('Found multi-chunk pyarrow array, but `allow_copy` is False. Please rechunk the array before calling this function, or set `allow_copy=True`.') arr = chunked_array.combine_chunks() return pd.Series(arr, dtype=series.dtype, name=series.name, index=series.index) # File: pandas-main/pandas/core/internals/__init__.py from pandas.core.internals.api import make_block from pandas.core.internals.concat import concatenate_managers from pandas.core.internals.managers import BlockManager, SingleBlockManager __all__ = ['make_block', 'BlockManager', 'SingleBlockManager', 'concatenate_managers'] # File: pandas-main/pandas/core/internals/api.py """""" from __future__ import annotations from typing import TYPE_CHECKING import warnings import numpy as np from pandas._libs.internals import BlockPlacement from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, ExtensionDtype, PeriodDtype from pandas.core.arrays import DatetimeArray, TimedeltaArray from pandas.core.construction import extract_array from pandas.core.internals.blocks import check_ndim, ensure_block_shape, extract_pandas_array, get_block_type, maybe_coerce_values if TYPE_CHECKING: from pandas._typing import ArrayLike, Dtype from pandas.core.internals.blocks import Block def _make_block(values: ArrayLike, placement: np.ndarray) -> Block: dtype = values.dtype klass = get_block_type(dtype) placement_obj = BlockPlacement(placement) if isinstance(dtype, ExtensionDtype) and dtype._supports_2d or isinstance(values, (DatetimeArray, TimedeltaArray)): values = ensure_block_shape(values, ndim=2) values = maybe_coerce_values(values) return klass(values, ndim=2, placement=placement_obj) def make_block(values, placement, klass=None, ndim=None, dtype: Dtype | None=None) -> Block: warnings.warn('make_block is deprecated and will be removed in a future version. Use pd.api.internals.create_dataframe_from_blocks or (recommended) higher-level public APIs instead.', DeprecationWarning, stacklevel=2) if dtype is not None: dtype = pandas_dtype(dtype) (values, dtype) = extract_pandas_array(values, dtype, ndim) from pandas.core.internals.blocks import ExtensionBlock if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype): klass = None if klass is None: dtype = dtype or values.dtype klass = get_block_type(dtype) if not isinstance(placement, BlockPlacement): placement = BlockPlacement(placement) ndim = maybe_infer_ndim(values, placement, ndim) if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)): values = extract_array(values, extract_numpy=True) values = ensure_block_shape(values, ndim) check_ndim(values, placement, ndim) values = maybe_coerce_values(values) return klass(values, ndim=ndim, placement=placement) def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int: if ndim is None: if not isinstance(values.dtype, np.dtype): if len(placement) != 1: ndim = 1 else: ndim = 2 else: ndim = values.ndim return ndim # File: pandas-main/pandas/core/internals/blocks.py from __future__ import annotations import inspect import re from typing import TYPE_CHECKING, Any, Literal, cast, final import warnings import weakref import numpy as np from pandas._libs import NaT, internals as libinternals, lib from pandas._libs.internals import BlockPlacement, BlockValuesRefs from pandas._libs.missing import NA from pandas._typing import ArrayLike, AxisInt, DtypeBackend, DtypeObj, FillnaOptions, IgnoreRaise, InterpolateOptions, QuantileInterpolation, Self, Shape, npt from pandas.errors import AbstractMethodError, OutOfBoundsDatetime from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.astype import astype_array_safe, astype_is_view from pandas.core.dtypes.cast import LossySetitemError, can_hold_element, convert_dtypes, find_result_type, np_can_hold_element from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_float_dtype, is_integer_dtype, is_list_like, is_scalar, is_string_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, ExtensionDtype, IntervalDtype, NumpyEADtype, PeriodDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCNumpyExtensionArray, ABCSeries from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna, na_value_for_dtype from pandas.core import missing import pandas.core.algorithms as algos from pandas.core.array_algos.putmask import extract_bool_array, putmask_inplace, putmask_without_repeat, setitem_datetimelike_compat, validate_putmask from pandas.core.array_algos.quantile import quantile_compat from pandas.core.array_algos.replace import compare_or_regex_search, replace_regex, should_use_regex from pandas.core.array_algos.transforms import shift from pandas.core.arrays import DatetimeArray, ExtensionArray, IntervalArray, NumpyExtensionArray, PeriodArray, TimedeltaArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.computation import expressions from pandas.core.construction import ensure_wrapped_if_datetimelike, extract_array from pandas.core.indexers import check_setitem_lengths from pandas.core.indexes.base import get_values_for_csv if TYPE_CHECKING: from collections.abc import Callable, Generator, Iterable, Sequence from pandas.core.api import Index from pandas.core.arrays._mixins import NDArrayBackedExtensionArray _dtype_obj = np.dtype('object') class Block(PandasObject, libinternals.Block): values: np.ndarray | ExtensionArray ndim: int refs: BlockValuesRefs __init__: Callable __slots__ = () is_numeric = False @final @cache_readonly def _validate_ndim(self) -> bool: dtype = self.dtype return not isinstance(dtype, ExtensionDtype) or isinstance(dtype, DatetimeTZDtype) @final @cache_readonly def is_object(self) -> bool: return self.values.dtype == _dtype_obj @final @cache_readonly def is_extension(self) -> bool: return not lib.is_np_dtype(self.values.dtype) @final @cache_readonly def _can_consolidate(self) -> bool: return not self.is_extension @final @cache_readonly def _consolidate_key(self): return (self._can_consolidate, self.dtype.name) @final @cache_readonly def _can_hold_na(self) -> bool: dtype = self.dtype if isinstance(dtype, np.dtype): return dtype.kind not in 'iub' return dtype._can_hold_na @final @property def is_bool(self) -> bool: return self.values.dtype == np.dtype(bool) @final def external_values(self): return external_values(self.values) @final @cache_readonly def fill_value(self): return na_value_for_dtype(self.dtype, compat=False) @final def _standardize_fill_value(self, value): if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): value = self.fill_value return value @property def mgr_locs(self) -> BlockPlacement: return self._mgr_locs @mgr_locs.setter def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: self._mgr_locs = new_mgr_locs @final def make_block(self, values, placement: BlockPlacement | None=None, refs: BlockValuesRefs | None=None) -> Block: if placement is None: placement = self._mgr_locs if self.is_extension: values = ensure_block_shape(values, ndim=self.ndim) return new_block(values, placement=placement, ndim=self.ndim, refs=refs) @final def make_block_same_class(self, values, placement: BlockPlacement | None=None, refs: BlockValuesRefs | None=None) -> Self: if placement is None: placement = self._mgr_locs return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) @final def __repr__(self) -> str: name = type(self).__name__ if self.ndim == 1: result = f'{name}: {len(self)} dtype: {self.dtype}' else: shape = ' x '.join([str(s) for s in self.shape]) result = f'{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}' return result @final def __len__(self) -> int: return len(self.values) @final def slice_block_columns(self, slc: slice) -> Self: new_mgr_locs = self._mgr_locs[slc] new_values = self._slice(slc) refs = self.refs return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) @final def take_block_columns(self, indices: npt.NDArray[np.intp]) -> Self: new_mgr_locs = self._mgr_locs[indices] new_values = self._slice(indices) return type(self)(new_values, new_mgr_locs, self.ndim, refs=None) @final def getitem_block_columns(self, slicer: slice, new_mgr_locs: BlockPlacement, ref_inplace_op: bool=False) -> Self: new_values = self._slice(slicer) refs = self.refs if not ref_inplace_op or self.refs.has_reference() else None return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) @final def _can_hold_element(self, element: Any) -> bool: element = extract_array(element, extract_numpy=True) return can_hold_element(self.values, element) @final def should_store(self, value: ArrayLike) -> bool: return value.dtype == self.dtype @final def apply(self, func, **kwargs) -> list[Block]: result = func(self.values, **kwargs) result = maybe_coerce_values(result) return self._split_op_result(result) @final def reduce(self, func) -> list[Block]: assert self.ndim == 2 result = func(self.values) if self.values.ndim == 1: res_values = result else: res_values = result.reshape(-1, 1) nb = self.make_block(res_values) return [nb] @final def _split_op_result(self, result: ArrayLike) -> list[Block]: if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): nbs = [] for (i, loc) in enumerate(self._mgr_locs): if not is_1d_only_ea_dtype(result.dtype): vals = result[i:i + 1] else: vals = result[i] bp = BlockPlacement(loc) block = self.make_block(values=vals, placement=bp) nbs.append(block) return nbs nb = self.make_block(result) return [nb] @final def _split(self) -> Generator[Block, None, None]: assert self.ndim == 2 for (i, ref_loc) in enumerate(self._mgr_locs): vals = self.values[slice(i, i + 1)] bp = BlockPlacement(ref_loc) nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) yield nb @final def split_and_operate(self, func, *args, **kwargs) -> list[Block]: assert self.ndim == 2 and self.shape[0] != 1 res_blocks = [] for nb in self._split(): rbs = func(nb, *args, **kwargs) res_blocks.extend(rbs) return res_blocks @final def coerce_to_target_dtype(self, other, raise_on_upcast: bool) -> Block: new_dtype = find_result_type(self.values.dtype, other) if new_dtype == self.dtype: raise AssertionError('Something has gone wrong, please report a bug at https://github.com/pandas-dev/pandas/issues') if is_scalar(other) and is_integer_dtype(self.values.dtype) and isna(other) and (other is not NaT) and (not (isinstance(other, (np.datetime64, np.timedelta64)) and np.isnat(other))): raise_on_upcast = False elif isinstance(other, np.ndarray) and other.ndim == 1 and is_integer_dtype(self.values.dtype) and is_float_dtype(other.dtype) and lib.has_only_ints_or_nan(other): raise_on_upcast = False if raise_on_upcast: raise TypeError(f"Invalid value '{other}' for dtype '{self.values.dtype}'") if self.values.dtype == new_dtype: raise AssertionError(f'Did not expect new dtype {new_dtype} to equal self.dtype {self.values.dtype}. Please report a bug at https://github.com/pandas-dev/pandas/issues.') try: return self.astype(new_dtype) except OutOfBoundsDatetime as err: raise OutOfBoundsDatetime(f"Incompatible (high-resolution) value for dtype='{self.dtype}'. Explicitly cast before operating.") from err @final def convert(self) -> list[Block]: if not self.is_object: return [self.copy(deep=False)] if self.ndim != 1 and self.shape[0] != 1: blocks = self.split_and_operate(Block.convert) if all((blk.dtype.kind == 'O' for blk in blocks)): return [self.copy(deep=False)] return blocks values = self.values if values.ndim == 2: values = values[0] res_values = lib.maybe_convert_objects(values, convert_non_numeric=True) refs = None if res_values is values or (isinstance(res_values, NumpyExtensionArray) and res_values._ndarray is values): refs = self.refs res_values = ensure_block_shape(res_values, self.ndim) res_values = maybe_coerce_values(res_values) return [self.make_block(res_values, refs=refs)] def convert_dtypes(self, infer_objects: bool=True, convert_string: bool=True, convert_integer: bool=True, convert_boolean: bool=True, convert_floating: bool=True, dtype_backend: DtypeBackend='numpy_nullable') -> list[Block]: if infer_objects and self.is_object: blks = self.convert() else: blks = [self] if not any([convert_floating, convert_integer, convert_boolean, convert_string]): return [b.copy(deep=False) for b in blks] rbs = [] for blk in blks: sub_blks = [blk] if blk.ndim == 1 or self.shape[0] == 1 else list(blk._split()) dtypes = [convert_dtypes(b.values, convert_string, convert_integer, convert_boolean, convert_floating, infer_objects, dtype_backend) for b in sub_blks] if all((dtype == self.dtype for dtype in dtypes)): rbs.append(blk.copy(deep=False)) continue for (dtype, b) in zip(dtypes, sub_blks): rbs.append(b.astype(dtype=dtype, squeeze=b.ndim != 1)) return rbs @final @cache_readonly def dtype(self) -> DtypeObj: return self.values.dtype @final def astype(self, dtype: DtypeObj, errors: IgnoreRaise='raise', squeeze: bool=False) -> Block: values = self.values if squeeze and values.ndim == 2 and is_1d_only_ea_dtype(dtype): if values.shape[0] != 1: raise ValueError('Can not squeeze with more than one column.') values = values[0, :] new_values = astype_array_safe(values, dtype, errors=errors) new_values = maybe_coerce_values(new_values) refs = None if astype_is_view(values.dtype, new_values.dtype): refs = self.refs newb = self.make_block(new_values, refs=refs) if newb.shape != self.shape: raise TypeError(f'cannot set astype for dtype ({self.dtype.name} [{self.shape}]) to different shape ({newb.dtype.name} [{newb.shape}])') return newb @final def get_values_for_csv(self, *, float_format, date_format, decimal, na_rep: str='nan', quoting=None) -> Block: result = get_values_for_csv(self.values, na_rep=na_rep, quoting=quoting, float_format=float_format, date_format=date_format, decimal=decimal) return self.make_block(result) @final def copy(self, deep: bool=True) -> Self: values = self.values refs: BlockValuesRefs | None if deep: values = values.copy() refs = None else: refs = self.refs return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) def _maybe_copy(self, inplace: bool) -> Self: if inplace: deep = self.refs.has_reference() return self.copy(deep=deep) return self.copy() @final def _get_refs_and_copy(self, inplace: bool): refs = None copy = not inplace if inplace: if self.refs.has_reference(): copy = True else: refs = self.refs return (copy, refs) @final def replace(self, to_replace, value, inplace: bool=False, mask: npt.NDArray[np.bool_] | None=None) -> list[Block]: values = self.values if not self._can_hold_element(to_replace): return [self.copy(deep=False)] if mask is None: mask = missing.mask_missing(values, to_replace) if not mask.any(): return [self.copy(deep=False)] elif self._can_hold_element(value): blk = self._maybe_copy(inplace) putmask_inplace(blk.values, mask, value) return [blk] elif self.ndim == 1 or self.shape[0] == 1: if value is None or value is NA: blk = self.astype(np.dtype(object)) else: blk = self.coerce_to_target_dtype(value, raise_on_upcast=False) return blk.replace(to_replace=to_replace, value=value, inplace=True, mask=mask) else: blocks = [] for (i, nb) in enumerate(self._split()): blocks.extend(type(self).replace(nb, to_replace=to_replace, value=value, inplace=True, mask=mask[i:i + 1])) return blocks @final def _replace_regex(self, to_replace, value, inplace: bool=False, mask=None) -> list[Block]: if not self._can_hold_element(to_replace): return [self.copy(deep=False)] rx = re.compile(to_replace) block = self._maybe_copy(inplace) replace_regex(block.values, rx, value, mask) return [block] @final def replace_list(self, src_list: Iterable[Any], dest_list: Sequence[Any], inplace: bool=False, regex: bool=False) -> list[Block]: values = self.values pairs = [(x, y) for (x, y) in zip(src_list, dest_list) if self._can_hold_element(x)] if not len(pairs): return [self.copy(deep=False)] src_len = len(pairs) - 1 if is_string_dtype(values.dtype): na_mask = ~isna(values) masks: Iterable[npt.NDArray[np.bool_]] = (extract_bool_array(cast(ArrayLike, compare_or_regex_search(values, s[0], regex=regex, mask=na_mask))) for s in pairs) else: masks = (missing.mask_missing(values, s[0]) for s in pairs) if inplace: masks = list(masks) rb = [self] for (i, ((src, dest), mask)) in enumerate(zip(pairs, masks)): new_rb: list[Block] = [] for (blk_num, blk) in enumerate(rb): if len(rb) == 1: m = mask else: mib = mask assert not isinstance(mib, bool) m = mib[blk_num:blk_num + 1] result = blk._replace_coerce(to_replace=src, value=dest, mask=m, inplace=inplace, regex=regex) if i != src_len: for b in result: ref = weakref.ref(b) b.refs.referenced_blocks.pop(b.refs.referenced_blocks.index(ref)) new_rb.extend(result) rb = new_rb return rb @final def _replace_coerce(self, to_replace, value, mask: npt.NDArray[np.bool_], inplace: bool=True, regex: bool=False) -> list[Block]: if should_use_regex(regex, to_replace): return self._replace_regex(to_replace, value, inplace=inplace, mask=mask) else: if value is None: if mask.any(): has_ref = self.refs.has_reference() nb = self.astype(np.dtype(object)) if not inplace: nb = nb.copy() elif inplace and has_ref and nb.refs.has_reference(): nb = nb.copy() putmask_inplace(nb.values, mask, value) return [nb] return [self.copy(deep=False)] return self.replace(to_replace=to_replace, value=value, inplace=inplace, mask=mask) def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: return arg def _unwrap_setitem_indexer(self, indexer): return indexer @property def shape(self) -> Shape: return self.values.shape def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: return self.values[i] def _slice(self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp]) -> ArrayLike: return self.values[slicer] def set_inplace(self, locs, values: ArrayLike, copy: bool=False) -> None: if copy: self.values = self.values.copy() self.values[locs] = values @final def take_nd(self, indexer: npt.NDArray[np.intp], axis: AxisInt, new_mgr_locs: BlockPlacement | None=None, fill_value=lib.no_default) -> Block: values = self.values if fill_value is lib.no_default: fill_value = self.fill_value allow_fill = False else: allow_fill = True new_values = algos.take_nd(values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value) if isinstance(self, ExtensionBlock): assert not (self.ndim == 1 and new_mgr_locs is None) assert not (axis == 0 and new_mgr_locs is None) if new_mgr_locs is None: new_mgr_locs = self._mgr_locs if new_values.dtype != self.dtype: return self.make_block(new_values, new_mgr_locs) else: return self.make_block_same_class(new_values, new_mgr_locs) def _unstack(self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_]): (new_values, mask) = unstacker.get_new_values(self.values.T, fill_value=fill_value) mask = mask.any(0) new_values = new_values.T[mask] new_placement = new_placement[mask] bp = BlockPlacement(new_placement) blocks = [new_block_2d(new_values, placement=bp)] return (blocks, mask) def setitem(self, indexer, value) -> Block: value = self._standardize_fill_value(value) values = cast(np.ndarray, self.values) if self.ndim == 2: values = values.T check_setitem_lengths(indexer, value, values) if self.dtype != _dtype_obj: value = extract_array(value, extract_numpy=True) try: casted = np_can_hold_element(values.dtype, value) except LossySetitemError: nb = self.coerce_to_target_dtype(value, raise_on_upcast=True) return nb.setitem(indexer, value) else: if self.dtype == _dtype_obj: vi = values[indexer] if lib.is_list_like(vi): casted = setitem_datetimelike_compat(values, len(vi), casted) self = self._maybe_copy(inplace=True) values = cast(np.ndarray, self.values.T) if isinstance(casted, np.ndarray) and casted.ndim == 1 and (len(casted) == 1): casted = casted[0, ...] try: values[indexer] = casted except (TypeError, ValueError) as err: if is_list_like(casted): raise ValueError('setting an array element with a sequence.') from err raise return self def putmask(self, mask, new) -> list[Block]: orig_mask = mask values = cast(np.ndarray, self.values) (mask, noop) = validate_putmask(values.T, mask) assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) if new is lib.no_default: new = self.fill_value new = self._standardize_fill_value(new) new = extract_array(new, extract_numpy=True) if noop: return [self.copy(deep=False)] try: casted = np_can_hold_element(values.dtype, new) self = self._maybe_copy(inplace=True) values = cast(np.ndarray, self.values) putmask_without_repeat(values.T, mask, casted) return [self] except LossySetitemError: if self.ndim == 1 or self.shape[0] == 1: if not is_list_like(new): return self.coerce_to_target_dtype(new, raise_on_upcast=True).putmask(mask, new) else: indexer = mask.nonzero()[0] nb = self.setitem(indexer, new[indexer]) return [nb] else: is_array = isinstance(new, np.ndarray) res_blocks = [] for (i, nb) in enumerate(self._split()): n = new if is_array: n = new[:, i:i + 1] submask = orig_mask[:, i:i + 1] rbs = nb.putmask(submask, n) res_blocks.extend(rbs) return res_blocks def where(self, other, cond) -> list[Block]: assert cond.ndim == self.ndim assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) transpose = self.ndim == 2 cond = extract_bool_array(cond) values = cast(np.ndarray, self.values) orig_other = other if transpose: values = values.T (icond, noop) = validate_putmask(values, ~cond) if noop: return [self.copy(deep=False)] if other is lib.no_default: other = self.fill_value other = self._standardize_fill_value(other) try: casted = np_can_hold_element(values.dtype, other) except (ValueError, TypeError, LossySetitemError): if self.ndim == 1 or self.shape[0] == 1: block = self.coerce_to_target_dtype(other, raise_on_upcast=False) return block.where(orig_other, cond) else: is_array = isinstance(other, (np.ndarray, ExtensionArray)) res_blocks = [] for (i, nb) in enumerate(self._split()): oth = other if is_array: oth = other[:, i:i + 1] submask = cond[:, i:i + 1] rbs = nb.where(oth, submask) res_blocks.extend(rbs) return res_blocks else: other = casted alt = setitem_datetimelike_compat(values, icond.sum(), other) if alt is not other: if is_list_like(other) and len(other) < len(values): np.where(~icond, values, other) raise NotImplementedError('This should not be reached; call to np.where above is expected to raise ValueError. Please report a bug at github.com/pandas-dev/pandas') result = values.copy() np.putmask(result, icond, alt) else: if is_list_like(other) and (not isinstance(other, np.ndarray)) and (len(other) == self.shape[-1]): other = np.array(other).reshape(values.shape) result = expressions.where(~icond, values, other) if transpose: result = result.T return [self.make_block(result)] def fillna(self, value, limit: int | None=None, inplace: bool=False) -> list[Block]: inplace = validate_bool_kwarg(inplace, 'inplace') if not self._can_hold_na: noop = True else: mask = isna(self.values) (mask, noop) = validate_putmask(self.values, mask) if noop: return [self.copy(deep=False)] if limit is not None: mask[mask.cumsum(self.ndim - 1) > limit] = False if inplace: nbs = self.putmask(mask.T, value) else: nbs = self.where(value, ~mask.T) return extend_blocks(nbs) def pad_or_backfill(self, *, method: FillnaOptions, inplace: bool=False, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None) -> list[Block]: if not self._can_hold_na: return [self.copy(deep=False)] (copy, refs) = self._get_refs_and_copy(inplace) vals = cast(NumpyExtensionArray, self.array_values) new_values = vals.T._pad_or_backfill(method=method, limit=limit, limit_area=limit_area, copy=copy).T data = extract_array(new_values, extract_numpy=True) return [self.make_block_same_class(data, refs=refs)] @final def interpolate(self, *, method: InterpolateOptions, index: Index, inplace: bool=False, limit: int | None=None, limit_direction: Literal['forward', 'backward', 'both']='forward', limit_area: Literal['inside', 'outside'] | None=None, **kwargs) -> list[Block]: inplace = validate_bool_kwarg(inplace, 'inplace') if method == 'asfreq': missing.clean_fill_method(method) if not self._can_hold_na: return [self.copy(deep=False)] if self.dtype == _dtype_obj: name = {1: 'Series', 2: 'DataFrame'}[self.ndim] raise TypeError(f'{name} cannot interpolate with object dtype.') (copy, refs) = self._get_refs_and_copy(inplace) new_values = self.array_values.interpolate(method=method, axis=self.ndim - 1, index=index, limit=limit, limit_direction=limit_direction, limit_area=limit_area, copy=copy, **kwargs) data = extract_array(new_values, extract_numpy=True) return [self.make_block_same_class(data, refs=refs)] @final def diff(self, n: int) -> list[Block]: new_values = algos.diff(self.values.T, n, axis=0).T return [self.make_block(values=new_values)] def shift(self, periods: int, fill_value: Any=None) -> list[Block]: axis = self.ndim - 1 if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: raise ValueError('fill_value must be a scalar') fill_value = self._standardize_fill_value(fill_value) try: casted = np_can_hold_element(self.dtype, fill_value) except LossySetitemError: nb = self.coerce_to_target_dtype(fill_value, raise_on_upcast=False) return nb.shift(periods, fill_value=fill_value) else: values = cast(np.ndarray, self.values) new_values = shift(values, periods, axis, casted) return [self.make_block_same_class(new_values)] @final def quantile(self, qs: Index, interpolation: QuantileInterpolation='linear') -> Block: assert self.ndim == 2 assert is_list_like(qs) result = quantile_compat(self.values, np.asarray(qs._values), interpolation) result = ensure_block_shape(result, ndim=2) return new_block_2d(result, placement=self._mgr_locs) @final def round(self, decimals: int) -> Self: if not self.is_numeric or self.is_bool: return self.copy(deep=False) values = self.values.round(decimals) refs = None if values is self.values: refs = self.refs return self.make_block_same_class(values, refs=refs) def delete(self, loc) -> list[Block]: if not is_list_like(loc): loc = [loc] if self.ndim == 1: values = cast(np.ndarray, self.values) values = np.delete(values, loc) mgr_locs = self._mgr_locs.delete(loc) return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] if np.max(loc) >= self.values.shape[0]: raise IndexError loc = np.concatenate([loc, [self.values.shape[0]]]) mgr_locs_arr = self._mgr_locs.as_array new_blocks: list[Block] = [] previous_loc = -1 refs = self.refs if self.refs.has_reference() else None for idx in loc: if idx == previous_loc + 1: pass else: values = self.values[previous_loc + 1:idx, :] locs = mgr_locs_arr[previous_loc + 1:idx] nb = type(self)(values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs) new_blocks.append(nb) previous_loc = idx return new_blocks @property def is_view(self) -> bool: raise AbstractMethodError(self) @property def array_values(self) -> ExtensionArray: raise AbstractMethodError(self) def get_values(self, dtype: DtypeObj | None=None) -> np.ndarray: raise AbstractMethodError(self) class EABackedBlock(Block): values: ExtensionArray @final def shift(self, periods: int, fill_value: Any=None) -> list[Block]: new_values = self.values.T.shift(periods=periods, fill_value=fill_value).T return [self.make_block_same_class(new_values)] @final def setitem(self, indexer, value): orig_indexer = indexer orig_value = value indexer = self._unwrap_setitem_indexer(indexer) value = self._maybe_squeeze_arg(value) values = self.values if values.ndim == 2: values = values.T check_setitem_lengths(indexer, value, values) try: values[indexer] = value except (ValueError, TypeError): if isinstance(self.dtype, IntervalDtype): nb = self.coerce_to_target_dtype(orig_value, raise_on_upcast=True) return nb.setitem(orig_indexer, orig_value) elif isinstance(self, NDArrayBackedExtensionBlock): nb = self.coerce_to_target_dtype(orig_value, raise_on_upcast=True) return nb.setitem(orig_indexer, orig_value) else: raise else: return self @final def where(self, other, cond) -> list[Block]: arr = self.values.T cond = extract_bool_array(cond) orig_other = other orig_cond = cond other = self._maybe_squeeze_arg(other) cond = self._maybe_squeeze_arg(cond) if other is lib.no_default: other = self.fill_value (icond, noop) = validate_putmask(arr, ~cond) if noop: return [self.copy(deep=False)] try: res_values = arr._where(cond, other).T except (ValueError, TypeError): if self.ndim == 1 or self.shape[0] == 1: if isinstance(self.dtype, IntervalDtype): blk = self.coerce_to_target_dtype(orig_other, raise_on_upcast=False) return blk.where(orig_other, orig_cond) elif isinstance(self, NDArrayBackedExtensionBlock): blk = self.coerce_to_target_dtype(orig_other, raise_on_upcast=False) return blk.where(orig_other, orig_cond) else: raise else: is_array = isinstance(orig_other, (np.ndarray, ExtensionArray)) res_blocks = [] for (i, nb) in enumerate(self._split()): n = orig_other if is_array: n = orig_other[:, i:i + 1] submask = orig_cond[:, i:i + 1] rbs = nb.where(n, submask) res_blocks.extend(rbs) return res_blocks nb = self.make_block_same_class(res_values) return [nb] @final def putmask(self, mask, new) -> list[Block]: mask = extract_bool_array(mask) if new is lib.no_default: new = self.fill_value orig_new = new orig_mask = mask new = self._maybe_squeeze_arg(new) mask = self._maybe_squeeze_arg(mask) if not mask.any(): return [self.copy(deep=False)] self = self._maybe_copy(inplace=True) values = self.values if values.ndim == 2: values = values.T try: values._putmask(mask, new) except (TypeError, ValueError): if self.ndim == 1 or self.shape[0] == 1: if isinstance(self.dtype, IntervalDtype): blk = self.coerce_to_target_dtype(orig_new, raise_on_upcast=True) return blk.putmask(orig_mask, orig_new) elif isinstance(self, NDArrayBackedExtensionBlock): blk = self.coerce_to_target_dtype(orig_new, raise_on_upcast=True) return blk.putmask(orig_mask, orig_new) else: raise else: is_array = isinstance(orig_new, (np.ndarray, ExtensionArray)) res_blocks = [] for (i, nb) in enumerate(self._split()): n = orig_new if is_array: n = orig_new[:, i:i + 1] submask = orig_mask[:, i:i + 1] rbs = nb.putmask(submask, n) res_blocks.extend(rbs) return res_blocks return [self] @final def delete(self, loc) -> list[Block]: if self.ndim == 1: values = self.values.delete(loc) mgr_locs = self._mgr_locs.delete(loc) return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] elif self.values.ndim == 1: return [] return super().delete(loc) @final @cache_readonly def array_values(self) -> ExtensionArray: return self.values @final def get_values(self, dtype: DtypeObj | None=None) -> np.ndarray: values: ArrayLike = self.values if dtype == _dtype_obj: values = values.astype(object) return np.asarray(values).reshape(self.shape) @final def pad_or_backfill(self, *, method: FillnaOptions, inplace: bool=False, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None) -> list[Block]: values = self.values kwargs: dict[str, Any] = {'method': method, 'limit': limit} if 'limit_area' in inspect.signature(values._pad_or_backfill).parameters: kwargs['limit_area'] = limit_area elif limit_area is not None: raise NotImplementedError(f'{type(values).__name__} does not implement limit_area (added in pandas 2.2). 3rd-party ExtensionArray authors need to add this argument to _pad_or_backfill.') if values.ndim == 2: new_values = values.T._pad_or_backfill(**kwargs).T else: new_values = values._pad_or_backfill(**kwargs) return [self.make_block_same_class(new_values)] class ExtensionBlock(EABackedBlock): values: ExtensionArray def fillna(self, value, limit: int | None=None, inplace: bool=False) -> list[Block]: if isinstance(self.dtype, IntervalDtype): if limit is not None: raise ValueError('limit must be None') return super().fillna(value=value, limit=limit, inplace=inplace) if self._can_hold_na and (not self.values._hasna): refs = self.refs new_values = self.values else: (copy, refs) = self._get_refs_and_copy(inplace) try: new_values = self.values.fillna(value=value, limit=limit, copy=copy) except TypeError: refs = None new_values = self.values.fillna(value=value, limit=limit) warnings.warn(f"ExtensionArray.fillna added a 'copy' keyword in pandas 2.1.0. In a future version, ExtensionArray subclasses will need to implement this keyword or an exception will be raised. In the interim, the keyword is ignored by {type(self.values).__name__}.", DeprecationWarning, stacklevel=find_stack_level()) return [self.make_block_same_class(new_values, refs=refs)] @cache_readonly def shape(self) -> Shape: if self.ndim == 1: return (len(self.values),) return (len(self._mgr_locs), len(self.values)) def iget(self, i: int | tuple[int, int] | tuple[slice, int]): if isinstance(i, tuple): (col, loc) = i if not com.is_null_slice(col) and col != 0: raise IndexError(f'{self} only contains one item') if isinstance(col, slice): if loc < 0: loc += len(self.values) return self.values[loc:loc + 1] return self.values[loc] else: if i != 0: raise IndexError(f'{self} only contains one item') return self.values def set_inplace(self, locs, values: ArrayLike, copy: bool=False) -> None: if copy: self.values = self.values.copy() self.values[:] = values def _maybe_squeeze_arg(self, arg): if isinstance(arg, (np.ndarray, ExtensionArray)) and arg.ndim == self.values.ndim + 1: assert arg.shape[1] == 1 arg = arg[:, 0] elif isinstance(arg, ABCDataFrame): assert arg.shape[1] == 1 arg = arg._ixs(0, axis=1)._values return arg def _unwrap_setitem_indexer(self, indexer): if isinstance(indexer, tuple) and len(indexer) == 2: if all((isinstance(x, np.ndarray) and x.ndim == 2 for x in indexer)): (first, second) = indexer if not (second.size == 1 and (second == 0).all() and (first.shape[1] == 1)): raise NotImplementedError('This should not be reached. Please report a bug at github.com/pandas-dev/pandas/') indexer = first[:, 0] elif lib.is_integer(indexer[1]) and indexer[1] == 0: indexer = indexer[0] elif com.is_null_slice(indexer[1]): indexer = indexer[0] elif is_list_like(indexer[1]) and indexer[1][0] == 0: indexer = indexer[0] else: raise NotImplementedError('This should not be reached. Please report a bug at github.com/pandas-dev/pandas/') return indexer @property def is_view(self) -> bool: return False @cache_readonly def is_numeric(self) -> bool: return self.values.dtype._is_numeric def _slice(self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp]) -> ExtensionArray: if self.ndim == 2: if not isinstance(slicer, slice): raise AssertionError('invalid slicing for a 1-ndim ExtensionArray', slicer) new_locs = range(1)[slicer] if not len(new_locs): raise AssertionError('invalid slicing for a 1-ndim ExtensionArray', slicer) slicer = slice(None) return self.values[slicer] @final def slice_block_rows(self, slicer: slice) -> Self: new_values = self.values[slicer] return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs) def _unstack(self, unstacker, fill_value, new_placement: npt.NDArray[np.intp], needs_masking: npt.NDArray[np.bool_]): (new_values, mask) = unstacker.arange_result new_values = new_values.T[mask] new_placement = new_placement[mask] blocks = [type(self)(self.values.take(indices, allow_fill=needs_masking[i], fill_value=fill_value), BlockPlacement(place), ndim=2) for (i, (indices, place)) in enumerate(zip(new_values, new_placement))] return (blocks, mask) class NumpyBlock(Block): values: np.ndarray __slots__ = () @property def is_view(self) -> bool: return self.values.base is not None @property def array_values(self) -> ExtensionArray: return NumpyExtensionArray(self.values) def get_values(self, dtype: DtypeObj | None=None) -> np.ndarray: if dtype == _dtype_obj: return self.values.astype(_dtype_obj) return self.values @cache_readonly def is_numeric(self) -> bool: dtype = self.values.dtype kind = dtype.kind return kind in 'fciub' class NDArrayBackedExtensionBlock(EABackedBlock): values: NDArrayBackedExtensionArray @property def is_view(self) -> bool: return self.values._ndarray.base is not None class DatetimeLikeBlock(NDArrayBackedExtensionBlock): __slots__ = () is_numeric = False values: DatetimeArray | TimedeltaArray def maybe_coerce_values(values: ArrayLike) -> ArrayLike: if isinstance(values, np.ndarray): values = ensure_wrapped_if_datetimelike(values) if issubclass(values.dtype.type, str): values = np.array(values, dtype=object) if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: values = values._with_freq(None) return values def get_block_type(dtype: DtypeObj) -> type[Block]: if isinstance(dtype, DatetimeTZDtype): return DatetimeLikeBlock elif isinstance(dtype, PeriodDtype): return NDArrayBackedExtensionBlock elif isinstance(dtype, ExtensionDtype): return ExtensionBlock kind = dtype.kind if kind in 'Mm': return DatetimeLikeBlock return NumpyBlock def new_block_2d(values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None=None) -> Block: klass = get_block_type(values.dtype) values = maybe_coerce_values(values) return klass(values, ndim=2, placement=placement, refs=refs) def new_block(values, placement: BlockPlacement, *, ndim: int, refs: BlockValuesRefs | None=None) -> Block: klass = get_block_type(values.dtype) return klass(values, ndim=ndim, placement=placement, refs=refs) def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: if values.ndim > ndim: raise ValueError(f'Wrong number of dimensions. values.ndim > ndim [{values.ndim} > {ndim}]') if not is_1d_only_ea_dtype(values.dtype): if values.ndim != ndim: raise ValueError(f'Wrong number of dimensions. values.ndim != ndim [{values.ndim} != {ndim}]') if len(placement) != len(values): raise ValueError(f'Wrong number of items passed {len(values)}, placement implies {len(placement)}') elif ndim == 2 and len(placement) != 1: raise ValueError('need to split') def extract_pandas_array(values: ArrayLike, dtype: DtypeObj | None, ndim: int) -> tuple[ArrayLike, DtypeObj | None]: if isinstance(values, ABCNumpyExtensionArray): values = values.to_numpy() if ndim and ndim > 1: values = np.atleast_2d(values) if isinstance(dtype, NumpyEADtype): dtype = dtype.numpy_dtype return (values, dtype) def extend_blocks(result, blocks=None) -> list[Block]: if blocks is None: blocks = [] if isinstance(result, list): for r in result: if isinstance(r, list): blocks.extend(r) else: blocks.append(r) else: assert isinstance(result, Block), type(result) blocks.append(result) return blocks def ensure_block_shape(values: ArrayLike, ndim: int=1) -> ArrayLike: if values.ndim < ndim: if not is_1d_only_ea_dtype(values.dtype): values = cast('np.ndarray | DatetimeArray | TimedeltaArray', values) values = values.reshape(1, -1) return values def external_values(values: ArrayLike) -> ArrayLike: if isinstance(values, (PeriodArray, IntervalArray)): return values.astype(object) elif isinstance(values, (DatetimeArray, TimedeltaArray)): values = values._ndarray if isinstance(values, np.ndarray): values = values.view() values.flags.writeable = False return values # File: pandas-main/pandas/core/internals/concat.py from __future__ import annotations from typing import TYPE_CHECKING, cast import numpy as np from pandas._libs import NaT, algos as libalgos, internals as libinternals, lib from pandas._libs.missing import NA from pandas.util._decorators import cache_readonly from pandas.core.dtypes.cast import ensure_dtype_can_hold_na, find_common_type from pandas.core.dtypes.common import is_1d_only_ea_dtype, needs_i8_conversion from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.internals.blocks import ensure_block_shape, new_block_2d from pandas.core.internals.managers import BlockManager, make_na_array if TYPE_CHECKING: from collections.abc import Generator, Sequence from pandas._typing import ArrayLike, AxisInt, DtypeObj, Shape from pandas import Index from pandas.core.internals.blocks import Block, BlockPlacement def concatenate_managers(mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool) -> BlockManager: needs_copy = copy and concat_axis == 0 if concat_axis == 0: mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) return mgrs[0].concat_horizontal(mgrs, axes) if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0: first_dtype = mgrs_indexers[0][0].blocks[0].dtype if first_dtype in [np.float64, np.float32]: if all((_is_homogeneous_mgr(mgr, first_dtype) for (mgr, _) in mgrs_indexers)) and len(mgrs_indexers) > 1: shape = tuple((len(x) for x in axes)) nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype) return BlockManager((nb,), axes) mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) if len(mgrs) == 1: mgr = mgrs[0] out = mgr.copy(deep=False) out.axes = axes return out blocks = [] values: ArrayLike for (placement, join_units) in _get_combined_plan(mgrs): unit = join_units[0] blk = unit.block if _is_uniform_join_units(join_units): vals = [ju.block.values for ju in join_units] if not blk.is_extension: values = np.concatenate(vals, axis=1) elif is_1d_only_ea_dtype(blk.dtype): values = concat_compat(vals, axis=0, ea_compat_axis=True) values = ensure_block_shape(values, ndim=2) else: values = concat_compat(vals, axis=1) values = ensure_wrapped_if_datetimelike(values) fastpath = blk.values.dtype == values.dtype else: values = _concatenate_join_units(join_units, copy=copy) fastpath = False if fastpath: b = blk.make_block_same_class(values, placement=placement) else: b = new_block_2d(values, placement=placement) blocks.append(b) return BlockManager(tuple(blocks), axes) def _maybe_reindex_columns_na_proxy(axes: list[Index], mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]], needs_copy: bool) -> list[BlockManager]: new_mgrs = [] for (mgr, indexers) in mgrs_indexers: for (i, indexer) in indexers.items(): mgr = mgr.reindex_indexer(axes[i], indexers[i], axis=i, only_slice=True, allow_dups=True, use_na_proxy=True) if needs_copy and (not indexers): mgr = mgr.copy() new_mgrs.append(mgr) return new_mgrs def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool: if mgr.nblocks != 1: return False blk = mgr.blocks[0] if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1): return False return blk.dtype == first_dtype def _concat_homogeneous_fastpath(mgrs_indexers, shape: Shape, first_dtype: np.dtype) -> Block: if all((not indexers for (_, indexers) in mgrs_indexers)): arrs = [mgr.blocks[0].values.T for (mgr, _) in mgrs_indexers] arr = np.concatenate(arrs).T bp = libinternals.BlockPlacement(slice(shape[0])) nb = new_block_2d(arr, bp) return nb arr = np.empty(shape, dtype=first_dtype) if first_dtype == np.float64: take_func = libalgos.take_2d_axis0_float64_float64 else: take_func = libalgos.take_2d_axis0_float32_float32 start = 0 for (mgr, indexers) in mgrs_indexers: mgr_len = mgr.shape[1] end = start + mgr_len if 0 in indexers: take_func(mgr.blocks[0].values, indexers[0], arr[:, start:end]) else: arr[:, start:end] = mgr.blocks[0].values start += mgr_len bp = libinternals.BlockPlacement(slice(shape[0])) nb = new_block_2d(arr, bp) return nb def _get_combined_plan(mgrs: list[BlockManager]) -> Generator[tuple[BlockPlacement, list[JoinUnit]], None, None]: max_len = mgrs[0].shape[0] blknos_list = [mgr.blknos for mgr in mgrs] pairs = libinternals.get_concat_blkno_indexers(blknos_list) for (blknos, bp) in pairs: units_for_bp = [] for (k, mgr) in enumerate(mgrs): blkno = blknos[k] nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len) unit = JoinUnit(nb) units_for_bp.append(unit) yield (bp, units_for_bp) def _get_block_for_concat_plan(mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int) -> Block: blk = mgr.blocks[blkno] if len(bp) == len(blk.mgr_locs) and (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1): nb = blk else: ax0_blk_indexer = mgr.blklocs[bp.indexer] slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len) if isinstance(slc, slice): nb = blk.slice_block_columns(slc) else: nb = blk.take_block_columns(slc) return nb class JoinUnit: def __init__(self, block: Block) -> None: self.block = block def __repr__(self) -> str: return f'{type(self).__name__}({self.block!r})' def _is_valid_na_for(self, dtype: DtypeObj) -> bool: if not self.is_na: return False blk = self.block if blk.dtype.kind == 'V': return True if blk.dtype == object: values = blk.values return all((is_valid_na_for_dtype(x, dtype) for x in values.ravel(order='K'))) na_value = blk.fill_value if na_value is NaT and blk.dtype != dtype: return False if na_value is NA and needs_i8_conversion(dtype): return False return is_valid_na_for_dtype(na_value, dtype) @cache_readonly def is_na(self) -> bool: blk = self.block if blk.dtype.kind == 'V': return True return False def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: values: ArrayLike if upcasted_na is None and self.block.dtype.kind != 'V': return self.block.values else: fill_value = upcasted_na if self._is_valid_na_for(empty_dtype): blk_dtype = self.block.dtype if blk_dtype == np.dtype('object'): values = cast(np.ndarray, self.block.values) if values.size and values[0, 0] is None: fill_value = None return make_na_array(empty_dtype, self.block.shape, fill_value) return self.block.values def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike: empty_dtype = _get_empty_dtype(join_units) has_none_blocks = any((unit.block.dtype.kind == 'V' for unit in join_units)) upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks) to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) for ju in join_units] if any((is_1d_only_ea_dtype(t.dtype) for t in to_concat)): to_concat = [t if is_1d_only_ea_dtype(t.dtype) else t[0, :] for t in to_concat] concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) concat_values = ensure_block_shape(concat_values, 2) else: concat_values = concat_compat(to_concat, axis=1) return concat_values def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool): if isinstance(dtype, ExtensionDtype): return dtype.na_value elif dtype.kind in 'mM': return dtype.type('NaT') elif dtype.kind in 'fc': return dtype.type('NaN') elif dtype.kind == 'b': return None elif dtype.kind in 'iu': if not has_none_blocks: return None return np.nan elif dtype.kind == 'O': return np.nan raise NotImplementedError def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj: if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]): empty_dtype = join_units[0].block.dtype return empty_dtype has_none_blocks = any((unit.block.dtype.kind == 'V' for unit in join_units)) dtypes = [unit.block.dtype for unit in join_units if not unit.is_na] dtype = find_common_type(dtypes) if has_none_blocks: dtype = ensure_dtype_can_hold_na(dtype) return dtype def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool: first = join_units[0].block if first.dtype.kind == 'V': return False return all((type(ju.block) is type(first) for ju in join_units)) and all((ju.block.dtype == first.dtype or ju.block.dtype.kind in 'iub' for ju in join_units)) and all((not ju.is_na or ju.block.is_extension for ju in join_units)) # File: pandas-main/pandas/core/internals/construction.py """""" from __future__ import annotations from collections import abc from typing import TYPE_CHECKING, Any import numpy as np from numpy import ma from pandas._config import using_string_dtype from pandas._libs import lib from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar, dict_compat, maybe_cast_to_datetime, maybe_convert_platform, maybe_infer_to_datetimelike from pandas.core.dtypes.common import is_1d_only_ea_dtype, is_integer_dtype, is_list_like, is_named_tuple, is_object_dtype, is_scalar from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import algorithms, common as com from pandas.core.arrays import ExtensionArray from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import array as pd_array, extract_array, range_to_ndarray, sanitize_array from pandas.core.indexes.api import DatetimeIndex, Index, TimedeltaIndex, default_index, ensure_index, get_objs_combined_axis, maybe_sequence_to_range, union_indexes from pandas.core.internals.blocks import BlockPlacement, ensure_block_shape, new_block, new_block_2d from pandas.core.internals.managers import create_block_manager_from_blocks, create_block_manager_from_column_arrays if TYPE_CHECKING: from collections.abc import Hashable, Sequence from pandas._typing import ArrayLike, DtypeObj, Manager, npt def arrays_to_mgr(arrays, columns: Index, index, *, dtype: DtypeObj | None=None, verify_integrity: bool=True, consolidate: bool=True) -> Manager: if verify_integrity: if index is None: index = _extract_index(arrays) else: index = ensure_index(index) (arrays, refs) = _homogenize(arrays, index, dtype) else: index = ensure_index(index) arrays = [extract_array(x, extract_numpy=True) for x in arrays] refs = [None] * len(arrays) for arr in arrays: if not isinstance(arr, (np.ndarray, ExtensionArray)) or arr.ndim != 1 or len(arr) != len(index): raise ValueError('Arrays must be 1-dimensional np.ndarray or ExtensionArray with length matching len(index)') columns = ensure_index(columns) if len(columns) != len(arrays): raise ValueError('len(arrays) must match len(columns)') axes = [columns, index] return create_block_manager_from_column_arrays(arrays, axes, consolidate=consolidate, refs=refs) def rec_array_to_mgr(data: np.rec.recarray | np.ndarray, index, columns, dtype: DtypeObj | None, copy: bool) -> Manager: fdata = ma.getdata(data) if index is None: index = default_index(len(fdata)) else: index = ensure_index(index) if columns is not None: columns = ensure_index(columns) (arrays, arr_columns) = to_arrays(fdata, columns) (arrays, arr_columns) = reorder_arrays(arrays, arr_columns, columns, len(index)) if columns is None: columns = arr_columns mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype) if copy: mgr = mgr.copy() return mgr def ndarray_to_mgr(values, index, columns, dtype: DtypeObj | None, copy: bool) -> Manager: infer_object = not isinstance(values, (ABCSeries, Index, ExtensionArray)) if isinstance(values, ABCSeries): if columns is None: if values.name is not None: columns = Index([values.name]) if index is None: index = values.index else: values = values.reindex(index) if not len(values) and columns is not None and len(columns): values = np.empty((0, 1), dtype=object) vdtype = getattr(values, 'dtype', None) refs = None if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype): if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1: values = [values[:, n] for n in range(values.shape[1])] else: values = [values] if columns is None: columns = Index(range(len(values))) else: columns = ensure_index(columns) return arrays_to_mgr(values, columns, index, dtype=dtype) elif isinstance(vdtype, ExtensionDtype): values = extract_array(values, extract_numpy=True) if copy: values = values.copy() if values.ndim == 1: values = values.reshape(-1, 1) elif isinstance(values, (ABCSeries, Index)): if not copy and (dtype is None or astype_is_view(values.dtype, dtype)): refs = values._references if copy: values = values._values.copy() else: values = values._values values = _ensure_2d(values) elif isinstance(values, (np.ndarray, ExtensionArray)): if copy and (dtype is None or astype_is_view(values.dtype, dtype)): values = np.array(values, copy=True, order='F') else: values = np.array(values, copy=False) values = _ensure_2d(values) else: values = _prep_ndarraylike(values, copy=copy) if dtype is not None and values.dtype != dtype: values = sanitize_array(values, None, dtype=dtype, copy=copy, allow_2d=True) (index, columns) = _get_axes(values.shape[0], values.shape[1], index=index, columns=columns) _check_values_indices_shape_match(values, index, columns) values = values.T if dtype is None and infer_object and is_object_dtype(values.dtype): obj_columns = list(values) maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns] if any((x is not y for (x, y) in zip(obj_columns, maybe_datetime))): block_values = [new_block_2d(ensure_block_shape(dval, 2), placement=BlockPlacement(n)) for (n, dval) in enumerate(maybe_datetime)] else: bp = BlockPlacement(slice(len(columns))) nb = new_block_2d(values, placement=bp, refs=refs) block_values = [nb] elif dtype is None and values.dtype.kind == 'U' and using_string_dtype(): dtype = StringDtype(na_value=np.nan) obj_columns = list(values) block_values = [new_block(dtype.construct_array_type()._from_sequence(data, dtype=dtype), BlockPlacement(slice(i, i + 1)), ndim=2) for (i, data) in enumerate(obj_columns)] else: bp = BlockPlacement(slice(len(columns))) nb = new_block_2d(values, placement=bp, refs=refs) block_values = [nb] if len(columns) == 0: block_values = [] return create_block_manager_from_blocks(block_values, [columns, index], verify_integrity=False) def _check_values_indices_shape_match(values: np.ndarray, index: Index, columns: Index) -> None: if values.shape[1] != len(columns) or values.shape[0] != len(index): if values.shape[0] == 0 < len(index): raise ValueError('Empty data passed with indices specified.') passed = values.shape implied = (len(index), len(columns)) raise ValueError(f'Shape of passed values is {passed}, indices imply {implied}') def dict_to_mgr(data: dict, index, columns, *, dtype: DtypeObj | None=None, copy: bool=True) -> Manager: arrays: Sequence[Any] if columns is not None: columns = ensure_index(columns) arrays = [np.nan] * len(columns) midxs = set() data_keys = ensure_index(data.keys()) data_values = list(data.values()) for (i, column) in enumerate(columns): try: idx = data_keys.get_loc(column) except KeyError: midxs.add(i) continue array = data_values[idx] arrays[i] = array if is_scalar(array) and isna(array): midxs.add(i) if index is None: if midxs: index = _extract_index([array for (i, array) in enumerate(arrays) if i not in midxs]) else: index = _extract_index(arrays) else: index = ensure_index(index) if midxs and (not is_integer_dtype(dtype)): for i in midxs: arr = construct_1d_arraylike_from_scalar(arrays[i], len(index), dtype if dtype is not None else np.dtype('object')) arrays[i] = arr else: keys = maybe_sequence_to_range(list(data.keys())) columns = Index(keys) if keys else default_index(0) arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] if copy: arrays = [x.copy() if isinstance(x, ExtensionArray) else x.copy(deep=True) if isinstance(x, Index) or (isinstance(x, ABCSeries) and is_1d_only_ea_dtype(x.dtype)) else x for x in arrays] return arrays_to_mgr(arrays, columns, index, dtype=dtype, consolidate=copy) def nested_data_to_arrays(data: Sequence, columns: Index | None, index: Index | None, dtype: DtypeObj | None) -> tuple[list[ArrayLike], Index, Index]: if is_named_tuple(data[0]) and columns is None: columns = ensure_index(data[0]._fields) (arrays, columns) = to_arrays(data, columns, dtype=dtype) columns = ensure_index(columns) if index is None: if isinstance(data[0], ABCSeries): index = _get_names_from_index(data) else: index = default_index(len(data)) return (arrays, columns, index) def treat_as_nested(data) -> bool: return len(data) > 0 and is_list_like(data[0]) and (getattr(data[0], 'ndim', 1) == 1) and (not (isinstance(data, ExtensionArray) and data.ndim == 2)) def _prep_ndarraylike(values, copy: bool=True) -> np.ndarray: if len(values) == 0: return np.empty((0, 0), dtype=object) elif isinstance(values, range): arr = range_to_ndarray(values) return arr[..., np.newaxis] def convert(v): if not is_list_like(v) or isinstance(v, ABCDataFrame): return v v = extract_array(v, extract_numpy=True) res = maybe_convert_platform(v) return res if is_list_like(values[0]): values = np.array([convert(v) for v in values]) elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: values = np.array([convert(v) for v in values]) else: values = convert(values) return _ensure_2d(values) def _ensure_2d(values: np.ndarray) -> np.ndarray: if values.ndim == 1: values = values.reshape((values.shape[0], 1)) elif values.ndim != 2: raise ValueError(f'Must pass 2-d input. shape={values.shape}') return values def _homogenize(data, index: Index, dtype: DtypeObj | None) -> tuple[list[ArrayLike], list[Any]]: oindex = None homogenized = [] refs: list[Any] = [] for val in data: if isinstance(val, (ABCSeries, Index)): if dtype is not None: val = val.astype(dtype) if isinstance(val, ABCSeries) and val.index is not index: val = val.reindex(index) refs.append(val._references) val = val._values else: if isinstance(val, dict): if oindex is None: oindex = index.astype('O') if isinstance(index, (DatetimeIndex, TimedeltaIndex)): val = dict_compat(val) else: val = dict(val) val = lib.fast_multiget(val, oindex._values, default=np.nan) val = sanitize_array(val, index, dtype=dtype, copy=False) com.require_length_match(val, index) refs.append(None) homogenized.append(val) return (homogenized, refs) def _extract_index(data) -> Index: index: Index if len(data) == 0: return default_index(0) raw_lengths = set() indexes: list[list[Hashable] | Index] = [] have_raw_arrays = False have_series = False have_dicts = False for val in data: if isinstance(val, ABCSeries): have_series = True indexes.append(val.index) elif isinstance(val, dict): have_dicts = True indexes.append(list(val.keys())) elif is_list_like(val) and getattr(val, 'ndim', 1) == 1: have_raw_arrays = True raw_lengths.add(len(val)) elif isinstance(val, np.ndarray) and val.ndim > 1: raise ValueError('Per-column arrays must each be 1-dimensional') if not indexes and (not raw_lengths): raise ValueError('If using all scalar values, you must pass an index') if have_series: index = union_indexes(indexes) elif have_dicts: index = union_indexes(indexes, sort=False) if have_raw_arrays: if len(raw_lengths) > 1: raise ValueError('All arrays must be of the same length') if have_dicts: raise ValueError('Mixing dicts with non-Series may lead to ambiguous ordering.') raw_length = raw_lengths.pop() if have_series: if raw_length != len(index): msg = f'array length {raw_length} does not match index length {len(index)}' raise ValueError(msg) else: index = default_index(raw_length) return ensure_index(index) def reorder_arrays(arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int) -> tuple[list[ArrayLike], Index]: if columns is not None: if not columns.equals(arr_columns): new_arrays: list[ArrayLike] = [] indexer = arr_columns.get_indexer(columns) for (i, k) in enumerate(indexer): if k == -1: arr = np.empty(length, dtype=object) arr.fill(np.nan) else: arr = arrays[k] new_arrays.append(arr) arrays = new_arrays arr_columns = columns return (arrays, arr_columns) def _get_names_from_index(data) -> Index: has_some_name = any((getattr(s, 'name', None) is not None for s in data)) if not has_some_name: return default_index(len(data)) index: list[Hashable] = list(range(len(data))) count = 0 for (i, s) in enumerate(data): n = getattr(s, 'name', None) if n is not None: index[i] = n else: index[i] = f'Unnamed {count}' count += 1 return Index(index) def _get_axes(N: int, K: int, index: Index | None, columns: Index | None) -> tuple[Index, Index]: if index is None: index = default_index(N) else: index = ensure_index(index) if columns is None: columns = default_index(K) else: columns = ensure_index(columns) return (index, columns) def dataclasses_to_dicts(data): from dataclasses import asdict return list(map(asdict, data)) def to_arrays(data, columns: Index | None, dtype: DtypeObj | None=None) -> tuple[list[ArrayLike], Index]: if not len(data): if isinstance(data, np.ndarray): if data.dtype.names is not None: columns = ensure_index(data.dtype.names) arrays = [data[name] for name in columns] if len(data) == 0: for (i, arr) in enumerate(arrays): if arr.ndim == 2: arrays[i] = arr[:, 0] return (arrays, columns) return ([], ensure_index([])) elif isinstance(data, np.ndarray) and data.dtype.names is not None: columns = Index(list(data.dtype.names)) arrays = [data[k] for k in columns] return (arrays, columns) if isinstance(data[0], (list, tuple)): arr = _list_to_arrays(data) elif isinstance(data[0], abc.Mapping): (arr, columns) = _list_of_dict_to_arrays(data, columns) elif isinstance(data[0], ABCSeries): (arr, columns) = _list_of_series_to_arrays(data, columns) else: data = [tuple(x) for x in data] arr = _list_to_arrays(data) (content, columns) = _finalize_columns_and_data(arr, columns, dtype) return (content, columns) def _list_to_arrays(data: list[tuple | list]) -> np.ndarray: if isinstance(data[0], tuple): content = lib.to_object_array_tuples(data) else: content = lib.to_object_array(data) return content def _list_of_series_to_arrays(data: list, columns: Index | None) -> tuple[np.ndarray, Index]: if columns is None: pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))] columns = get_objs_combined_axis(pass_data, sort=False) indexer_cache: dict[int, np.ndarray] = {} aligned_values = [] for s in data: index = getattr(s, 'index', None) if index is None: index = default_index(len(s)) if id(index) in indexer_cache: indexer = indexer_cache[id(index)] else: indexer = indexer_cache[id(index)] = index.get_indexer(columns) values = extract_array(s, extract_numpy=True) aligned_values.append(algorithms.take_nd(values, indexer)) content = np.vstack(aligned_values) return (content, columns) def _list_of_dict_to_arrays(data: list[dict], columns: Index | None) -> tuple[np.ndarray, Index]: if columns is None: gen = (list(x.keys()) for x in data) sort = not any((isinstance(d, dict) for d in data)) pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort) columns = ensure_index(pre_cols) data = [d if type(d) is dict else dict(d) for d in data] content = lib.dicts_to_array(data, list(columns)) return (content, columns) def _finalize_columns_and_data(content: np.ndarray, columns: Index | None, dtype: DtypeObj | None) -> tuple[list[ArrayLike], Index]: contents = list(content.T) try: columns = _validate_or_indexify_columns(contents, columns) except AssertionError as err: raise ValueError(err) from err if len(contents) and contents[0].dtype == np.object_: contents = convert_object_array(contents, dtype=dtype) return (contents, columns) def _validate_or_indexify_columns(content: list[np.ndarray], columns: Index | None) -> Index: if columns is None: columns = default_index(len(content)) else: is_mi_list = isinstance(columns, list) and all((isinstance(col, list) for col in columns)) if not is_mi_list and len(columns) != len(content): raise AssertionError(f'{len(columns)} columns passed, passed data had {len(content)} columns') if is_mi_list: if len({len(col) for col in columns}) > 1: raise ValueError('Length of columns passed for MultiIndex columns is different') if columns and len(columns[0]) != len(content): raise ValueError(f'{len(columns[0])} columns passed, passed data had {len(content)} columns') return columns def convert_object_array(content: list[npt.NDArray[np.object_]], dtype: DtypeObj | None, dtype_backend: str='numpy', coerce_float: bool=False) -> list[ArrayLike]: def convert(arr): if dtype != np.dtype('O'): arr = lib.maybe_convert_objects(arr, try_float=coerce_float, convert_to_nullable_dtype=dtype_backend != 'numpy') if dtype is None: if arr.dtype == np.dtype('O'): arr = maybe_infer_to_datetimelike(arr) if dtype_backend != 'numpy' and arr.dtype == np.dtype('O'): new_dtype = StringDtype() arr_cls = new_dtype.construct_array_type() arr = arr_cls._from_sequence(arr, dtype=new_dtype) elif dtype_backend != 'numpy' and isinstance(arr, np.ndarray): if arr.dtype.kind in 'iufb': arr = pd_array(arr, copy=False) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() arr = cls._from_sequence(arr, dtype=dtype, copy=False) elif dtype.kind in 'mM': arr = maybe_cast_to_datetime(arr, dtype) return arr arrays = [convert(arr) for arr in content] return arrays # File: pandas-main/pandas/core/internals/managers.py from __future__ import annotations from collections.abc import Callable, Hashable, Sequence import itertools from typing import TYPE_CHECKING, Any, Literal, NoReturn, cast, final import warnings import numpy as np from pandas._config.config import get_option from pandas._libs import algos as libalgos, internals as libinternals, lib from pandas._libs.internals import BlockPlacement, BlockValuesRefs from pandas._libs.tslibs import Timestamp from pandas.errors import AbstractMethodError, PerformanceWarning from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.util._validators import validate_bool_kwarg from pandas.core.dtypes.cast import find_common_type, infer_dtype_from_scalar, np_can_hold_element from pandas.core.dtypes.common import ensure_platform_int, is_1d_only_ea_dtype, is_list_like from pandas.core.dtypes.dtypes import DatetimeTZDtype, ExtensionDtype, SparseDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.missing import array_equals, isna import pandas.core.algorithms as algos from pandas.core.arrays import DatetimeArray from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.base import PandasObject from pandas.core.construction import ensure_wrapped_if_datetimelike, extract_array from pandas.core.indexers import maybe_convert_indices from pandas.core.indexes.api import Index, default_index, ensure_index from pandas.core.internals.blocks import Block, NumpyBlock, ensure_block_shape, extend_blocks, get_block_type, maybe_coerce_values, new_block, new_block_2d from pandas.core.internals.ops import blockwise_all, operate_blockwise if TYPE_CHECKING: from collections.abc import Generator from pandas._typing import ArrayLike, AxisInt, DtypeObj, QuantileInterpolation, Self, Shape, npt from pandas.api.extensions import ExtensionArray def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None: if not len(dtypes): return None return find_common_type(dtypes) def ensure_np_dtype(dtype: DtypeObj) -> np.dtype: if isinstance(dtype, SparseDtype): dtype = dtype.subtype dtype = cast(np.dtype, dtype) elif isinstance(dtype, ExtensionDtype): dtype = np.dtype('object') elif dtype == np.dtype(str): dtype = np.dtype('object') return dtype class BaseBlockManager(PandasObject): __slots__ = () _blknos: npt.NDArray[np.intp] _blklocs: npt.NDArray[np.intp] blocks: tuple[Block, ...] axes: list[Index] @property def ndim(self) -> int: raise NotImplementedError _known_consolidated: bool _is_consolidated: bool def __init__(self, blocks, axes, verify_integrity: bool=True) -> None: raise NotImplementedError @final def __len__(self) -> int: return len(self.items) @property def shape(self) -> Shape: return tuple((len(ax) for ax in self.axes)) @classmethod def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: raise NotImplementedError @property def blknos(self) -> npt.NDArray[np.intp]: if self._blknos is None: self._rebuild_blknos_and_blklocs() return self._blknos @property def blklocs(self) -> npt.NDArray[np.intp]: if self._blklocs is None: self._rebuild_blknos_and_blklocs() return self._blklocs def make_empty(self, axes=None) -> Self: if axes is None: axes = [default_index(0)] + self.axes[1:] if self.ndim == 1: assert isinstance(self, SingleBlockManager) blk = self.blocks[0] arr = blk.values[:0] bp = BlockPlacement(slice(0, 0)) nb = blk.make_block_same_class(arr, placement=bp) blocks = [nb] else: blocks = [] return type(self).from_blocks(blocks, axes) def __bool__(self) -> bool: return True def set_axis(self, axis: AxisInt, new_labels: Index) -> None: self._validate_set_axis(axis, new_labels) self.axes[axis] = new_labels @final def _validate_set_axis(self, axis: AxisInt, new_labels: Index) -> None: old_len = len(self.axes[axis]) new_len = len(new_labels) if axis == 1 and len(self.items) == 0: pass elif new_len != old_len: raise ValueError(f'Length mismatch: Expected axis has {old_len} elements, new values have {new_len} elements') @property def is_single_block(self) -> bool: return len(self.blocks) == 1 @property def items(self) -> Index: return self.axes[0] def _has_no_reference(self, i: int) -> bool: blkno = self.blknos[i] return self._has_no_reference_block(blkno) def _has_no_reference_block(self, blkno: int) -> bool: return not self.blocks[blkno].refs.has_reference() def add_references(self, mgr: BaseBlockManager) -> None: if len(self.blocks) != len(mgr.blocks): return for (i, blk) in enumerate(self.blocks): blk.refs = mgr.blocks[i].refs blk.refs.add_reference(blk) def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool: blk = self.blocks[blkno] return any((blk is ref() for ref in mgr.blocks[blkno].refs.referenced_blocks)) def get_dtypes(self) -> npt.NDArray[np.object_]: dtypes = np.array([blk.dtype for blk in self.blocks], dtype=object) return dtypes.take(self.blknos) @property def arrays(self) -> list[ArrayLike]: return [blk.values for blk in self.blocks] def __repr__(self) -> str: output = type(self).__name__ for (i, ax) in enumerate(self.axes): if i == 0: output += f'\nItems: {ax}' else: output += f'\nAxis {i}: {ax}' for block in self.blocks: output += f'\n{block}' return output def _equal_values(self, other: Self) -> bool: raise AbstractMethodError(self) @final def equals(self, other: object) -> bool: if not isinstance(other, type(self)): return False (self_axes, other_axes) = (self.axes, other.axes) if len(self_axes) != len(other_axes): return False if not all((ax1.equals(ax2) for (ax1, ax2) in zip(self_axes, other_axes))): return False return self._equal_values(other) def apply(self, f, align_keys: list[str] | None=None, **kwargs) -> Self: assert 'filter' not in kwargs align_keys = align_keys or [] result_blocks: list[Block] = [] aligned_args = {k: kwargs[k] for k in align_keys} for b in self.blocks: if aligned_args: for (k, obj) in aligned_args.items(): if isinstance(obj, (ABCSeries, ABCDataFrame)): if obj.ndim == 1: kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values else: kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values else: kwargs[k] = obj[b.mgr_locs.indexer] if callable(f): applied = b.apply(f, **kwargs) else: applied = getattr(b, f)(**kwargs) result_blocks = extend_blocks(applied, result_blocks) out = type(self).from_blocks(result_blocks, self.axes) return out @final def isna(self, func) -> Self: return self.apply('apply', func=func) @final def fillna(self, value, limit: int | None, inplace: bool) -> Self: if limit is not None: limit = libalgos.validate_limit(None, limit=limit) return self.apply('fillna', value=value, limit=limit, inplace=inplace) @final def where(self, other, cond, align: bool) -> Self: if align: align_keys = ['other', 'cond'] else: align_keys = ['cond'] other = extract_array(other, extract_numpy=True) return self.apply('where', align_keys=align_keys, other=other, cond=cond) @final def putmask(self, mask, new, align: bool=True) -> Self: if align: align_keys = ['new', 'mask'] else: align_keys = ['mask'] new = extract_array(new, extract_numpy=True) return self.apply('putmask', align_keys=align_keys, mask=mask, new=new) @final def round(self, decimals: int) -> Self: return self.apply('round', decimals=decimals) @final def replace(self, to_replace, value, inplace: bool) -> Self: inplace = validate_bool_kwarg(inplace, 'inplace') assert not lib.is_list_like(to_replace) assert not lib.is_list_like(value) return self.apply('replace', to_replace=to_replace, value=value, inplace=inplace) @final def replace_regex(self, **kwargs) -> Self: return self.apply('_replace_regex', **kwargs) @final def replace_list(self, src_list: list[Any], dest_list: list[Any], inplace: bool=False, regex: bool=False) -> Self: inplace = validate_bool_kwarg(inplace, 'inplace') bm = self.apply('replace_list', src_list=src_list, dest_list=dest_list, inplace=inplace, regex=regex) bm._consolidate_inplace() return bm def interpolate(self, inplace: bool, **kwargs) -> Self: return self.apply('interpolate', inplace=inplace, **kwargs) def pad_or_backfill(self, inplace: bool, **kwargs) -> Self: return self.apply('pad_or_backfill', inplace=inplace, **kwargs) def shift(self, periods: int, fill_value) -> Self: if fill_value is lib.no_default: fill_value = None return self.apply('shift', periods=periods, fill_value=fill_value) def setitem(self, indexer, value) -> Self: if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: raise ValueError(f'Cannot set values with ndim > {self.ndim}') if not self._has_no_reference(0): if self.ndim == 2 and isinstance(indexer, tuple): blk_loc = self.blklocs[indexer[1]] if is_list_like(blk_loc) and blk_loc.ndim == 2: blk_loc = np.squeeze(blk_loc, axis=0) elif not is_list_like(blk_loc): blk_loc = [blk_loc] if len(blk_loc) == 0: return self.copy(deep=False) values = self.blocks[0].values if values.ndim == 2: values = values[blk_loc] self._iset_split_block(0, blk_loc, values) self.blocks[0].setitem((indexer[0], np.arange(len(blk_loc))), value) return self self = self.copy() return self.apply('setitem', indexer=indexer, value=value) def diff(self, n: int) -> Self: return self.apply('diff', n=n) def astype(self, dtype, errors: str='raise') -> Self: return self.apply('astype', dtype=dtype, errors=errors) def convert(self) -> Self: return self.apply('convert') def convert_dtypes(self, **kwargs): return self.apply('convert_dtypes', **kwargs) def get_values_for_csv(self, *, float_format, date_format, decimal, na_rep: str='nan', quoting=None) -> Self: return self.apply('get_values_for_csv', na_rep=na_rep, quoting=quoting, float_format=float_format, date_format=date_format, decimal=decimal) @property def any_extension_types(self) -> bool: return any((block.is_extension for block in self.blocks)) @property def is_view(self) -> bool: if len(self.blocks) == 1: return self.blocks[0].is_view return False def _get_data_subset(self, predicate: Callable) -> Self: blocks = [blk for blk in self.blocks if predicate(blk.values)] return self._combine(blocks) def get_bool_data(self) -> Self: new_blocks = [] for blk in self.blocks: if blk.dtype == bool: new_blocks.append(blk) elif blk.is_object: new_blocks.extend((nb for nb in blk._split() if nb.is_bool)) return self._combine(new_blocks) def get_numeric_data(self) -> Self: numeric_blocks = [blk for blk in self.blocks if blk.is_numeric] if len(numeric_blocks) == len(self.blocks): return self return self._combine(numeric_blocks) def _combine(self, blocks: list[Block], index: Index | None=None) -> Self: if len(blocks) == 0: if self.ndim == 2: if index is not None: axes = [self.items[:0], index] else: axes = [self.items[:0]] + self.axes[1:] return self.make_empty(axes) return self.make_empty() indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks])) inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0]) new_blocks: list[Block] = [] for b in blocks: nb = b.copy(deep=False) nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer]) new_blocks.append(nb) axes = list(self.axes) if index is not None: axes[-1] = index axes[0] = self.items.take(indexer) return type(self).from_blocks(new_blocks, axes) @property def nblocks(self) -> int: return len(self.blocks) def copy(self, deep: bool | Literal['all']=True) -> Self: if deep: def copy_func(ax): return ax.copy(deep=True) if deep == 'all' else ax.view() new_axes = [copy_func(ax) for ax in self.axes] else: new_axes = [ax.view() for ax in self.axes] res = self.apply('copy', deep=deep) res.axes = new_axes if self.ndim > 1: blknos = self._blknos if blknos is not None: res._blknos = blknos.copy() res._blklocs = self._blklocs.copy() if deep: res._consolidate_inplace() return res def is_consolidated(self) -> bool: return True def consolidate(self) -> Self: if self.is_consolidated(): return self bm = type(self)(self.blocks, self.axes, verify_integrity=False) bm._is_consolidated = False bm._consolidate_inplace() return bm def _consolidate_inplace(self) -> None: return @final def reindex_axis(self, new_index: Index, axis: AxisInt, fill_value=None, only_slice: bool=False) -> Self: (new_index, indexer) = self.axes[axis].reindex(new_index) return self.reindex_indexer(new_index, indexer, axis=axis, fill_value=fill_value, only_slice=only_slice) def reindex_indexer(self, new_axis: Index, indexer: npt.NDArray[np.intp] | None, axis: AxisInt, fill_value=None, allow_dups: bool=False, only_slice: bool=False, *, use_na_proxy: bool=False) -> Self: if indexer is None: if new_axis is self.axes[axis]: return self result = self.copy(deep=False) result.axes = list(self.axes) result.axes[axis] = new_axis return result assert isinstance(indexer, np.ndarray) if not allow_dups: self.axes[axis]._validate_can_reindex(indexer) if axis >= self.ndim: raise IndexError('Requested axis not found in manager') if axis == 0: new_blocks = list(self._slice_take_blocks_ax0(indexer, fill_value=fill_value, only_slice=only_slice, use_na_proxy=use_na_proxy)) else: new_blocks = [blk.take_nd(indexer, axis=1, fill_value=fill_value if fill_value is not None else blk.fill_value) for blk in self.blocks] new_axes = list(self.axes) new_axes[axis] = new_axis new_mgr = type(self).from_blocks(new_blocks, new_axes) if axis == 1: new_mgr._blknos = self.blknos.copy() new_mgr._blklocs = self.blklocs.copy() return new_mgr def _slice_take_blocks_ax0(self, slice_or_indexer: slice | np.ndarray, fill_value=lib.no_default, only_slice: bool=False, *, use_na_proxy: bool=False, ref_inplace_op: bool=False) -> Generator[Block, None, None]: allow_fill = fill_value is not lib.no_default (sl_type, slobj, sllen) = _preprocess_slice_or_indexer(slice_or_indexer, self.shape[0], allow_fill=allow_fill) if self.is_single_block: blk = self.blocks[0] if sl_type == 'slice': if sllen == 0: return bp = BlockPlacement(slice(0, sllen)) yield blk.getitem_block_columns(slobj, new_mgr_locs=bp) return elif not allow_fill or self.ndim == 1: if allow_fill and fill_value is None: fill_value = blk.fill_value if not allow_fill and only_slice: for (i, ml) in enumerate(slobj): yield blk.getitem_block_columns(slice(ml, ml + 1), new_mgr_locs=BlockPlacement(i), ref_inplace_op=ref_inplace_op) else: bp = BlockPlacement(slice(0, sllen)) yield blk.take_nd(slobj, axis=0, new_mgr_locs=bp, fill_value=fill_value) return if sl_type == 'slice': blknos = self.blknos[slobj] blklocs = self.blklocs[slobj] else: blknos = algos.take_nd(self.blknos, slobj, fill_value=-1, allow_fill=allow_fill) blklocs = algos.take_nd(self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill) group = not only_slice for (blkno, mgr_locs) in libinternals.get_blkno_placements(blknos, group=group): if blkno == -1: yield self._make_na_block(placement=mgr_locs, fill_value=fill_value, use_na_proxy=use_na_proxy) else: blk = self.blocks[blkno] if not blk._can_consolidate and (not blk._validate_ndim): deep = False for mgr_loc in mgr_locs: newblk = blk.copy(deep=deep) newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1)) yield newblk else: taker = blklocs[mgr_locs.indexer] max_len = max(len(mgr_locs), taker.max() + 1) taker = lib.maybe_indices_to_slice(taker, max_len) if isinstance(taker, slice): nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs) yield nb elif only_slice: for (i, ml) in zip(taker, mgr_locs): slc = slice(i, i + 1) bp = BlockPlacement(ml) nb = blk.getitem_block_columns(slc, new_mgr_locs=bp) yield nb else: nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs) yield nb def _make_na_block(self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool=False) -> Block: if use_na_proxy: assert fill_value is None shape = (len(placement), self.shape[1]) vals = np.empty(shape, dtype=np.void) nb = NumpyBlock(vals, placement, ndim=2) return nb if fill_value is None or fill_value is np.nan: fill_value = np.nan dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) if dtype is not None and np.issubdtype(dtype.type, np.floating): fill_value = dtype.type(fill_value) shape = (len(placement), self.shape[1]) (dtype, fill_value) = infer_dtype_from_scalar(fill_value) block_values = make_na_array(dtype, shape, fill_value) return new_block_2d(block_values, placement=placement) def take(self, indexer: npt.NDArray[np.intp], axis: AxisInt=1, verify: bool=True) -> Self: n = self.shape[axis] indexer = maybe_convert_indices(indexer, n, verify=verify) new_labels = self.axes[axis].take(indexer) return self.reindex_indexer(new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True) class BlockManager(libinternals.BlockManager, BaseBlockManager): ndim = 2 def __init__(self, blocks: Sequence[Block], axes: Sequence[Index], verify_integrity: bool=True) -> None: if verify_integrity: for block in blocks: if self.ndim != block.ndim: raise AssertionError(f'Number of Block dimensions ({block.ndim}) must equal number of axes ({self.ndim})') self._verify_integrity() def _verify_integrity(self) -> None: mgr_shape = self.shape tot_items = sum((len(x.mgr_locs) for x in self.blocks)) for block in self.blocks: if block.shape[1:] != mgr_shape[1:]: raise_construction_error(tot_items, block.shape[1:], self.axes) if len(self.items) != tot_items: raise AssertionError(f'Number of manager items must equal union of block items\n# manager items: {len(self.items)}, # tot_items: {tot_items}') @classmethod def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: return cls(blocks, axes, verify_integrity=False) def fast_xs(self, loc: int) -> SingleBlockManager: if len(self.blocks) == 1: result: np.ndarray | ExtensionArray = self.blocks[0].iget((slice(None), loc)) bp = BlockPlacement(slice(0, len(result))) block = new_block(result, placement=bp, ndim=1, refs=self.blocks[0].refs) return SingleBlockManager(block, self.axes[0]) dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) n = len(self) if isinstance(dtype, ExtensionDtype): result = np.empty(n, dtype=object) else: result = np.empty(n, dtype=dtype) result = ensure_wrapped_if_datetimelike(result) for blk in self.blocks: for (i, rl) in enumerate(blk.mgr_locs): result[rl] = blk.iget((i, loc)) if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() result = cls._from_sequence(result, dtype=dtype) bp = BlockPlacement(slice(0, len(result))) block = new_block(result, placement=bp, ndim=1) return SingleBlockManager(block, self.axes[0]) def iget(self, i: int, track_ref: bool=True) -> SingleBlockManager: block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) bp = BlockPlacement(slice(0, len(values))) nb = type(block)(values, placement=bp, ndim=1, refs=block.refs if track_ref else None) return SingleBlockManager(nb, self.axes[1]) def iget_values(self, i: int) -> ArrayLike: block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) return values @property def column_arrays(self) -> list[np.ndarray]: result: list[np.ndarray | None] = [None] * len(self.items) for blk in self.blocks: mgr_locs = blk._mgr_locs values = blk.array_values._values_for_json() if values.ndim == 1: result[mgr_locs[0]] = values else: for (i, loc) in enumerate(mgr_locs): result[loc] = values[i] return result def iset(self, loc: int | slice | np.ndarray, value: ArrayLike, inplace: bool=False, refs: BlockValuesRefs | None=None) -> None: if self._blklocs is None and self.ndim > 1: self._rebuild_blknos_and_blklocs() value_is_extension_type = is_1d_only_ea_dtype(value.dtype) if not value_is_extension_type: if value.ndim == 2: value = value.T else: value = ensure_block_shape(value, ndim=2) if value.shape[1:] != self.shape[1:]: raise AssertionError('Shape of new values must be compatible with manager shape') if lib.is_integer(loc): loc = cast(int, loc) blkno = self.blknos[loc] blk = self.blocks[blkno] if len(blk._mgr_locs) == 1: return self._iset_single(loc, value, inplace=inplace, blkno=blkno, blk=blk, refs=refs) loc = [loc] if value_is_extension_type: def value_getitem(placement): return value else: def value_getitem(placement): return value[placement.indexer] blknos = self.blknos[loc] blklocs = self.blklocs[loc].copy() unfit_mgr_locs = [] unfit_val_locs = [] removed_blknos = [] for (blkno_l, val_locs) in libinternals.get_blkno_placements(blknos, group=True): blk = self.blocks[blkno_l] blk_locs = blklocs[val_locs.indexer] if inplace and blk.should_store(value): if not self._has_no_reference_block(blkno_l): self._iset_split_block(blkno_l, blk_locs, value_getitem(val_locs), refs=refs) else: blk.set_inplace(blk_locs, value_getitem(val_locs)) continue else: unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs]) unfit_val_locs.append(val_locs) if len(val_locs) == len(blk.mgr_locs): removed_blknos.append(blkno_l) continue else: self._iset_split_block(blkno_l, blk_locs, refs=refs) if len(removed_blknos): is_deleted = np.zeros(self.nblocks, dtype=np.bool_) is_deleted[removed_blknos] = True new_blknos = np.empty(self.nblocks, dtype=np.intp) new_blknos.fill(-1) new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos)) self._blknos = new_blknos[self._blknos] self.blocks = tuple((blk for (i, blk) in enumerate(self.blocks) if i not in set(removed_blknos))) if unfit_val_locs: unfit_idxr = np.concatenate(unfit_mgr_locs) unfit_count = len(unfit_idxr) new_blocks: list[Block] = [] if value_is_extension_type: new_blocks.extend((new_block_2d(values=value, placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)), refs=refs) for mgr_loc in unfit_idxr)) self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks) self._blklocs[unfit_idxr] = 0 else: unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:]) new_blocks.append(new_block_2d(values=value_getitem(unfit_val_items), placement=BlockPlacement(unfit_idxr), refs=refs)) self._blknos[unfit_idxr] = len(self.blocks) self._blklocs[unfit_idxr] = np.arange(unfit_count) self.blocks += tuple(new_blocks) self._known_consolidated = False def _iset_split_block(self, blkno_l: int, blk_locs: np.ndarray | list[int], value: ArrayLike | None=None, refs: BlockValuesRefs | None=None) -> None: blk = self.blocks[blkno_l] if self._blklocs is None: self._rebuild_blknos_and_blklocs() nbs_tup = tuple(blk.delete(blk_locs)) if value is not None: locs = blk.mgr_locs.as_array[blk_locs] first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs) else: first_nb = nbs_tup[0] nbs_tup = tuple(nbs_tup[1:]) nr_blocks = len(self.blocks) blocks_tup = self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1:] + nbs_tup self.blocks = blocks_tup if not nbs_tup and value is not None: return self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb)) for (i, nb) in enumerate(nbs_tup): self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb)) self._blknos[nb.mgr_locs.indexer] = i + nr_blocks def _iset_single(self, loc: int, value: ArrayLike, inplace: bool, blkno: int, blk: Block, refs: BlockValuesRefs | None=None) -> None: if inplace and blk.should_store(value): copy = not self._has_no_reference_block(blkno) iloc = self.blklocs[loc] blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy) return nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs) old_blocks = self.blocks new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1:] self.blocks = new_blocks return def column_setitem(self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool=False) -> None: if not self._has_no_reference(loc): blkno = self.blknos[loc] blk_loc = self.blklocs[loc] values = self.blocks[blkno].values if values.ndim == 1: values = values.copy() else: values = values[[blk_loc]] self._iset_split_block(blkno, [blk_loc], values) col_mgr = self.iget(loc, track_ref=False) if inplace_only: col_mgr.setitem_inplace(idx, value) else: new_mgr = col_mgr.setitem((idx,), value) self.iset(loc, new_mgr._block.values, inplace=True) def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: new_axis = self.items.insert(loc, item) if value.ndim == 2: value = value.T if len(value) > 1: raise ValueError(f'Expected a 1D array, got an array with shape {value.T.shape}') else: value = ensure_block_shape(value, ndim=self.ndim) bp = BlockPlacement(slice(loc, loc + 1)) block = new_block_2d(values=value, placement=bp, refs=refs) if not len(self.blocks): self._blklocs = np.array([0], dtype=np.intp) self._blknos = np.array([0], dtype=np.intp) else: self._insert_update_mgr_locs(loc) self._insert_update_blklocs_and_blknos(loc) self.axes[0] = new_axis self.blocks += (block,) self._known_consolidated = False if get_option('performance_warnings') and sum((not block.is_extension for block in self.blocks)) > 100: warnings.warn('DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance. Consider joining all columns at once using pd.concat(axis=1) instead. To get a de-fragmented frame, use `newframe = frame.copy()`', PerformanceWarning, stacklevel=find_stack_level()) def _insert_update_mgr_locs(self, loc) -> None: blknos = np.bincount(self.blknos[loc:]).nonzero()[0] for blkno in blknos: blk = self.blocks[blkno] blk._mgr_locs = blk._mgr_locs.increment_above(loc) def _insert_update_blklocs_and_blknos(self, loc) -> None: if loc == self.blklocs.shape[0]: self._blklocs = np.append(self._blklocs, 0) self._blknos = np.append(self._blknos, len(self.blocks)) elif loc == 0: self._blklocs = np.concatenate([[0], self._blklocs]) self._blknos = np.concatenate([[len(self.blocks)], self._blknos]) else: (new_blklocs, new_blknos) = libinternals.update_blklocs_and_blknos(self.blklocs, self.blknos, loc, len(self.blocks)) self._blklocs = new_blklocs self._blknos = new_blknos def idelete(self, indexer) -> BlockManager: is_deleted = np.zeros(self.shape[0], dtype=np.bool_) is_deleted[indexer] = True taker = (~is_deleted).nonzero()[0] nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True) new_columns = self.items[~is_deleted] axes = [new_columns, self.axes[1]] return type(self)(tuple(nbs), axes, verify_integrity=False) def grouped_reduce(self, func: Callable) -> Self: result_blocks: list[Block] = [] for blk in self.blocks: if blk.is_object: for sb in blk._split(): applied = sb.apply(func) result_blocks = extend_blocks(applied, result_blocks) else: applied = blk.apply(func) result_blocks = extend_blocks(applied, result_blocks) if len(result_blocks) == 0: nrows = 0 else: nrows = result_blocks[0].values.shape[-1] index = default_index(nrows) return type(self).from_blocks(result_blocks, [self.axes[0], index]) def reduce(self, func: Callable) -> Self: assert self.ndim == 2 res_blocks: list[Block] = [] for blk in self.blocks: nbs = blk.reduce(func) res_blocks.extend(nbs) index = Index([None]) new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager: return operate_blockwise(self, other, array_op) def _equal_values(self: BlockManager, other: BlockManager) -> bool: return blockwise_all(self, other, array_equals) def quantile(self, *, qs: Index, interpolation: QuantileInterpolation='linear') -> Self: assert self.ndim >= 2 assert is_list_like(qs) new_axes = list(self.axes) new_axes[1] = Index(qs, dtype=np.float64) blocks = [blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks] return type(self)(blocks, new_axes) def unstack(self, unstacker, fill_value) -> BlockManager: new_columns = unstacker.get_new_columns(self.items) new_index = unstacker.new_index allow_fill = not unstacker.mask_all if allow_fill: new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) needs_masking = new_mask2D.any(axis=0) else: needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool) new_blocks: list[Block] = [] columns_mask: list[np.ndarray] = [] if len(self.items) == 0: factor = 1 else: fac = len(new_columns) / len(self.items) assert fac == int(fac) factor = int(fac) for blk in self.blocks: mgr_locs = blk.mgr_locs new_placement = mgr_locs.tile_for_unstack(factor) (blocks, mask) = blk._unstack(unstacker, fill_value, new_placement=new_placement, needs_masking=needs_masking) new_blocks.extend(blocks) columns_mask.extend(mask) assert mask.sum() == sum((len(nb._mgr_locs) for nb in blocks)) new_columns = new_columns[columns_mask] bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False) return bm def to_iter_dict(self) -> Generator[tuple[str, Self], None, None]: key = lambda block: str(block.dtype) for (dtype, blocks) in itertools.groupby(sorted(self.blocks, key=key), key=key): yield (dtype, self._combine(list(blocks))) def as_array(self, dtype: np.dtype | None=None, copy: bool=False, na_value: object=lib.no_default) -> np.ndarray: passed_nan = lib.is_float(na_value) and isna(na_value) if len(self.blocks) == 0: arr = np.empty(self.shape, dtype=float) return arr.transpose() if self.is_single_block: blk = self.blocks[0] if na_value is not lib.no_default: if lib.is_np_dtype(blk.dtype, 'f') and passed_nan: pass else: copy = True if blk.is_extension: arr = blk.values.to_numpy(dtype=dtype, na_value=na_value, copy=copy).reshape(blk.shape) elif not copy: arr = np.asarray(blk.values, dtype=dtype) else: arr = np.array(blk.values, dtype=dtype, copy=copy) if not copy: arr = arr.view() arr.flags.writeable = False else: arr = self._interleave(dtype=dtype, na_value=na_value) if na_value is lib.no_default: pass elif arr.dtype.kind == 'f' and passed_nan: pass else: arr[isna(arr)] = na_value return arr.transpose() def _interleave(self, dtype: np.dtype | None=None, na_value: object=lib.no_default) -> np.ndarray: if not dtype: dtype = interleaved_dtype([blk.dtype for blk in self.blocks]) dtype = ensure_np_dtype(dtype) result = np.empty(self.shape, dtype=dtype) itemmask = np.zeros(self.shape[0]) if dtype == np.dtype('object') and na_value is lib.no_default: for blk in self.blocks: rl = blk.mgr_locs arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 return result for blk in self.blocks: rl = blk.mgr_locs if blk.is_extension: arr = blk.values.to_numpy(dtype=dtype, na_value=na_value) else: arr = blk.get_values(dtype) result[rl.indexer] = arr itemmask[rl.indexer] = 1 if not itemmask.all(): raise AssertionError('Some items were not contained in blocks') return result def is_consolidated(self) -> bool: if not self._known_consolidated: self._consolidate_check() return self._is_consolidated def _consolidate_check(self) -> None: if len(self.blocks) == 1: self._is_consolidated = True self._known_consolidated = True return dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate] self._is_consolidated = len(dtypes) == len(set(dtypes)) self._known_consolidated = True def _consolidate_inplace(self) -> None: if not self.is_consolidated(): self.blocks = _consolidate(self.blocks) self._is_consolidated = True self._known_consolidated = True self._rebuild_blknos_and_blklocs() @classmethod def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self: offset = 0 blocks: list[Block] = [] for mgr in mgrs: for blk in mgr.blocks: nb = blk.slice_block_columns(slice(None)) nb._mgr_locs = nb._mgr_locs.add(offset) blocks.append(nb) offset += len(mgr.items) new_mgr = cls(tuple(blocks), axes) return new_mgr @classmethod def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self: raise NotImplementedError('This logic lives (for now) in internals.concat') class SingleBlockManager(BaseBlockManager): @property def ndim(self) -> Literal[1]: return 1 _is_consolidated = True _known_consolidated = True __slots__ = () is_single_block = True def __init__(self, block: Block, axis: Index, verify_integrity: bool=False) -> None: self.axes = [axis] self.blocks = (block,) @classmethod def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: assert len(blocks) == 1 assert len(axes) == 1 return cls(blocks[0], axes[0], verify_integrity=False) @classmethod def from_array(cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None=None) -> SingleBlockManager: array = maybe_coerce_values(array) bp = BlockPlacement(slice(0, len(index))) block = new_block(array, placement=bp, ndim=1, refs=refs) return cls(block, index) def to_2d_mgr(self, columns: Index) -> BlockManager: blk = self.blocks[0] arr = ensure_block_shape(blk.values, ndim=2) bp = BlockPlacement(0) new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs) axes = [columns, self.axes[0]] return BlockManager([new_blk], axes=axes, verify_integrity=False) def _has_no_reference(self, i: int=0) -> bool: return not self.blocks[0].refs.has_reference() def __getstate__(self): block_values = [b.values for b in self.blocks] block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks] axes_array = list(self.axes) extra_state = {'0.14.1': {'axes': axes_array, 'blocks': [{'values': b.values, 'mgr_locs': b.mgr_locs.indexer} for b in self.blocks]}} return (axes_array, block_values, block_items, extra_state) def __setstate__(self, state) -> None: def unpickle_block(values, mgr_locs, ndim: int) -> Block: values = extract_array(values, extract_numpy=True) if not isinstance(mgr_locs, BlockPlacement): mgr_locs = BlockPlacement(mgr_locs) values = maybe_coerce_values(values) return new_block(values, placement=mgr_locs, ndim=ndim) if isinstance(state, tuple) and len(state) >= 4 and ('0.14.1' in state[3]): state = state[3]['0.14.1'] self.axes = [ensure_index(ax) for ax in state['axes']] ndim = len(self.axes) self.blocks = tuple((unpickle_block(b['values'], b['mgr_locs'], ndim=ndim) for b in state['blocks'])) else: raise NotImplementedError('pre-0.14.1 pickles are no longer supported') self._post_setstate() def _post_setstate(self) -> None: pass @cache_readonly def _block(self) -> Block: return self.blocks[0] @final @property def array(self) -> ArrayLike: return self.blocks[0].values @property def _blknos(self) -> None: return None @property def _blklocs(self) -> None: return None def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Self: blk = self._block if len(indexer) > 0 and indexer.all(): return type(self)(blk.copy(deep=False), self.index) array = blk.values[indexer] if isinstance(indexer, np.ndarray) and indexer.dtype.kind == 'b': refs = None else: refs = blk.refs bp = BlockPlacement(slice(0, len(array))) block = type(blk)(array, placement=bp, ndim=1, refs=refs) new_idx = self.index[indexer] return type(self)(block, new_idx) def get_slice(self, slobj: slice, axis: AxisInt=0) -> SingleBlockManager: if axis >= self.ndim: raise IndexError('Requested axis not found in manager') blk = self._block array = blk.values[slobj] bp = BlockPlacement(slice(0, len(array))) block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs) new_index = self.index._getitem_slice(slobj) return type(self)(block, new_index) @property def index(self) -> Index: return self.axes[0] @property def dtype(self) -> DtypeObj: return self._block.dtype def get_dtypes(self) -> npt.NDArray[np.object_]: return np.array([self._block.dtype], dtype=object) def external_values(self): return self._block.external_values() def internal_values(self): return self._block.values def array_values(self) -> ExtensionArray: return self._block.array_values def get_numeric_data(self) -> Self: if self._block.is_numeric: return self.copy(deep=False) return self.make_empty() @property def _can_hold_na(self) -> bool: return self._block._can_hold_na def setitem_inplace(self, indexer, value) -> None: if not self._has_no_reference(0): self.blocks = (self._block.copy(),) self._reset_cache() arr = self.array if isinstance(arr, np.ndarray): value = np_can_hold_element(arr.dtype, value) if isinstance(value, np.ndarray) and value.ndim == 1 and (len(value) == 1): value = value[0, ...] arr[indexer] = value def idelete(self, indexer) -> SingleBlockManager: nb = self._block.delete(indexer)[0] self.blocks = (nb,) self.axes[0] = self.axes[0].delete(indexer) self._reset_cache() return self def fast_xs(self, loc): raise NotImplementedError('Use series._values[loc] instead') def set_values(self, values: ArrayLike) -> None: self.blocks[0].values = values self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values))) def _equal_values(self, other: Self) -> bool: if other.ndim != 1: return False left = self.blocks[0].values right = other.blocks[0].values return array_equals(left, right) def grouped_reduce(self, func): arr = self.array res = func(arr) index = default_index(len(res)) mgr = type(self).from_array(res, index) return mgr def create_block_manager_from_blocks(blocks: list[Block], axes: list[Index], consolidate: bool=True, verify_integrity: bool=True) -> BlockManager: try: mgr = BlockManager(blocks, axes, verify_integrity=verify_integrity) except ValueError as err: arrays = [blk.values for blk in blocks] tot_items = sum((arr.shape[0] for arr in arrays)) raise_construction_error(tot_items, arrays[0].shape[1:], axes, err) if consolidate: mgr._consolidate_inplace() return mgr def create_block_manager_from_column_arrays(arrays: list[ArrayLike], axes: list[Index], consolidate: bool, refs: list) -> BlockManager: try: blocks = _form_blocks(arrays, consolidate, refs) mgr = BlockManager(blocks, axes, verify_integrity=False) except ValueError as e: raise_construction_error(len(arrays), arrays[0].shape, axes, e) if consolidate: mgr._consolidate_inplace() return mgr def raise_construction_error(tot_items: int, block_shape: Shape, axes: list[Index], e: ValueError | None=None) -> NoReturn: passed = tuple(map(int, [tot_items] + list(block_shape))) if len(passed) <= 2: passed = passed[::-1] implied = tuple((len(ax) for ax in axes)) if len(implied) <= 2: implied = implied[::-1] if passed == implied and e is not None: raise e if block_shape[0] == 0: raise ValueError('Empty data passed with indices specified.') raise ValueError(f'Shape of passed values is {passed}, indices imply {implied}') def _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, DtypeObj]: dtype = tup[1].dtype if is_1d_only_ea_dtype(dtype): sep = id(dtype) else: sep = 0 return (sep, dtype) def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]: tuples = enumerate(arrays) if not consolidate: return _tuples_to_blocks_no_consolidate(tuples, refs) grouper = itertools.groupby(tuples, _grouping_func) nbs: list[Block] = [] for ((_, dtype), tup_block) in grouper: block_type = get_block_type(dtype) if isinstance(dtype, np.dtype): is_dtlike = dtype.kind in 'mM' if issubclass(dtype.type, (str, bytes)): dtype = np.dtype(object) (values, placement) = _stack_arrays(tup_block, dtype) if is_dtlike: values = ensure_wrapped_if_datetimelike(values) blk = block_type(values, placement=BlockPlacement(placement), ndim=2) nbs.append(blk) elif is_1d_only_ea_dtype(dtype): dtype_blocks = [block_type(x[1], placement=BlockPlacement(x[0]), ndim=2) for x in tup_block] nbs.extend(dtype_blocks) else: dtype_blocks = [block_type(ensure_block_shape(x[1], 2), placement=BlockPlacement(x[0]), ndim=2) for x in tup_block] nbs.extend(dtype_blocks) return nbs def _tuples_to_blocks_no_consolidate(tuples, refs) -> list[Block]: return [new_block_2d(ensure_block_shape(arr, ndim=2), placement=BlockPlacement(i), refs=ref) for ((i, arr), ref) in zip(tuples, refs)] def _stack_arrays(tuples, dtype: np.dtype): (placement, arrays) = zip(*tuples) first = arrays[0] shape = (len(arrays),) + first.shape stacked = np.empty(shape, dtype=dtype) for (i, arr) in enumerate(arrays): stacked[i] = arr return (stacked, placement) def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]: gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks: list[Block] = [] for ((_can_consolidate, dtype), group_blocks) in grouper: (merged_blocks, _) = _merge_blocks(list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate) new_blocks = extend_blocks(merged_blocks, new_blocks) return tuple(new_blocks) def _merge_blocks(blocks: list[Block], dtype: DtypeObj, can_consolidate: bool) -> tuple[list[Block], bool]: if len(blocks) == 1: return (blocks, False) if can_consolidate: new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks]) new_values: ArrayLike if isinstance(blocks[0].dtype, np.dtype): new_values = np.vstack([b.values for b in blocks]) else: bvals = [blk.values for blk in blocks] bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals) new_values = bvals2[0]._concat_same_type(bvals2, axis=0) argsort = np.argsort(new_mgr_locs) new_values = new_values[argsort] new_mgr_locs = new_mgr_locs[argsort] bp = BlockPlacement(new_mgr_locs) return ([new_block_2d(new_values, placement=bp)], True) return (blocks, False) def _preprocess_slice_or_indexer(slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool): if isinstance(slice_or_indexer, slice): return ('slice', slice_or_indexer, libinternals.slice_len(slice_or_indexer, length)) else: if not isinstance(slice_or_indexer, np.ndarray) or slice_or_indexer.dtype.kind != 'i': dtype = getattr(slice_or_indexer, 'dtype', None) raise TypeError(type(slice_or_indexer), dtype) indexer = ensure_platform_int(slice_or_indexer) if not allow_fill: indexer = maybe_convert_indices(indexer, length) return ('fancy', indexer, len(indexer)) def make_na_array(dtype: DtypeObj, shape: Shape, fill_value) -> ArrayLike: if isinstance(dtype, DatetimeTZDtype): ts = Timestamp(fill_value).as_unit(dtype.unit) i8values = np.full(shape, ts._value) dt64values = i8values.view(f'M8[{dtype.unit}]') return DatetimeArray._simple_new(dt64values, dtype=dtype) elif is_1d_only_ea_dtype(dtype): dtype = cast(ExtensionDtype, dtype) cls = dtype.construct_array_type() missing_arr = cls._from_sequence([], dtype=dtype) (ncols, nrows) = shape assert ncols == 1, ncols empty_arr = -1 * np.ones((nrows,), dtype=np.intp) return missing_arr.take(empty_arr, allow_fill=True, fill_value=fill_value) elif isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() missing_arr = cls._empty(shape=shape, dtype=dtype) missing_arr[:] = fill_value return missing_arr else: missing_arr_np = np.empty(shape, dtype=dtype) missing_arr_np.fill(fill_value) if dtype.kind in 'mM': missing_arr_np = ensure_wrapped_if_datetimelike(missing_arr_np) return missing_arr_np # File: pandas-main/pandas/core/internals/ops.py from __future__ import annotations from typing import TYPE_CHECKING, NamedTuple from pandas.core.dtypes.common import is_1d_only_ea_dtype if TYPE_CHECKING: from collections.abc import Iterator from pandas._libs.internals import BlockPlacement from pandas._typing import ArrayLike from pandas.core.internals.blocks import Block from pandas.core.internals.managers import BlockManager class BlockPairInfo(NamedTuple): lvals: ArrayLike rvals: ArrayLike locs: BlockPlacement left_ea: bool right_ea: bool rblk: Block def _iter_block_pairs(left: BlockManager, right: BlockManager) -> Iterator[BlockPairInfo]: for blk in left.blocks: locs = blk.mgr_locs blk_vals = blk.values left_ea = blk_vals.ndim == 1 rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True) for rblk in rblks: right_ea = rblk.values.ndim == 1 (lvals, rvals) = _get_same_shape_values(blk, rblk, left_ea, right_ea) info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk) yield info def operate_blockwise(left: BlockManager, right: BlockManager, array_op) -> BlockManager: res_blks: list[Block] = [] for (lvals, rvals, locs, left_ea, right_ea, rblk) in _iter_block_pairs(left, right): res_values = array_op(lvals, rvals) if left_ea and (not right_ea) and hasattr(res_values, 'reshape') and (not is_1d_only_ea_dtype(res_values.dtype)): res_values = res_values.reshape(1, -1) nbs = rblk._split_op_result(res_values) _reset_block_mgr_locs(nbs, locs) res_blks.extend(nbs) new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False) return new_mgr def _reset_block_mgr_locs(nbs: list[Block], locs) -> None: for nb in nbs: nblocs = locs[nb.mgr_locs.indexer] nb.mgr_locs = nblocs def _get_same_shape_values(lblk: Block, rblk: Block, left_ea: bool, right_ea: bool) -> tuple[ArrayLike, ArrayLike]: lvals = lblk.values rvals = rblk.values assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs if not (left_ea or right_ea): lvals = lvals[rblk.mgr_locs.indexer, :] assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) elif left_ea and right_ea: assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) elif right_ea: lvals = lvals[rblk.mgr_locs.indexer, :] assert lvals.shape[0] == 1, lvals.shape lvals = lvals[0, :] else: assert rvals.shape[0] == 1, rvals.shape rvals = rvals[0, :] return (lvals, rvals) def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool: for info in _iter_block_pairs(left, right): res = op(info.lvals, info.rvals) if not res: return False return True # File: pandas-main/pandas/core/methods/describe.py """""" from __future__ import annotations from abc import ABC, abstractmethod from typing import TYPE_CHECKING, cast import numpy as np from pandas._typing import DtypeObj, NDFrameT, npt from pandas.util._validators import validate_percentile from pandas.core.dtypes.common import is_bool_dtype, is_numeric_dtype from pandas.core.dtypes.dtypes import ArrowDtype, DatetimeTZDtype, ExtensionDtype from pandas.core.arrays.floating import Float64Dtype from pandas.core.reshape.concat import concat from pandas.io.formats.format import format_percentiles if TYPE_CHECKING: from collections.abc import Callable, Hashable, Sequence from pandas import DataFrame, Series def describe_ndframe(*, obj: NDFrameT, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None, percentiles: Sequence[float] | np.ndarray | None) -> NDFrameT: percentiles = _refine_percentiles(percentiles) describer: NDFrameDescriberAbstract if obj.ndim == 1: describer = SeriesDescriber(obj=cast('Series', obj)) else: describer = DataFrameDescriber(obj=cast('DataFrame', obj), include=include, exclude=exclude) result = describer.describe(percentiles=percentiles) return cast(NDFrameT, result) class NDFrameDescriberAbstract(ABC): def __init__(self, obj: DataFrame | Series) -> None: self.obj = obj @abstractmethod def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame | Series: class SeriesDescriber(NDFrameDescriberAbstract): obj: Series def describe(self, percentiles: Sequence[float] | np.ndarray) -> Series: describe_func = select_describe_func(self.obj) return describe_func(self.obj, percentiles) class DataFrameDescriber(NDFrameDescriberAbstract): obj: DataFrame def __init__(self, obj: DataFrame, *, include: str | Sequence[str] | None, exclude: str | Sequence[str] | None) -> None: self.include = include self.exclude = exclude if obj.ndim == 2 and obj.columns.size == 0: raise ValueError('Cannot describe a DataFrame without columns') super().__init__(obj) def describe(self, percentiles: Sequence[float] | np.ndarray) -> DataFrame: data = self._select_data() ldesc: list[Series] = [] for (_, series) in data.items(): describe_func = select_describe_func(series) ldesc.append(describe_func(series, percentiles)) col_names = reorder_columns(ldesc) d = concat([x.reindex(col_names) for x in ldesc], axis=1, ignore_index=True, sort=False) d.columns = data.columns.copy() return d def _select_data(self) -> DataFrame: if self.include is None and self.exclude is None: default_include: list[npt.DTypeLike] = [np.number, 'datetime'] data = self.obj.select_dtypes(include=default_include) if len(data.columns) == 0: data = self.obj elif self.include == 'all': if self.exclude is not None: msg = "exclude must be None when include is 'all'" raise ValueError(msg) data = self.obj else: data = self.obj.select_dtypes(include=self.include, exclude=self.exclude) return data def reorder_columns(ldesc: Sequence[Series]) -> list[Hashable]: names: list[Hashable] = [] seen_names: set[Hashable] = set() ldesc_indexes = sorted((x.index for x in ldesc), key=len) for idxnames in ldesc_indexes: for name in idxnames: if name not in seen_names: seen_names.add(name) names.append(name) return names def describe_numeric_1d(series: Series, percentiles: Sequence[float]) -> Series: from pandas import Series formatted_percentiles = format_percentiles(percentiles) stat_index = ['count', 'mean', 'std', 'min'] + formatted_percentiles + ['max'] d = [series.count(), series.mean(), series.std(), series.min()] + series.quantile(percentiles).tolist() + [series.max()] dtype: DtypeObj | None if isinstance(series.dtype, ExtensionDtype): if isinstance(series.dtype, ArrowDtype): if series.dtype.kind == 'm': dtype = None else: import pyarrow as pa dtype = ArrowDtype(pa.float64()) else: dtype = Float64Dtype() elif series.dtype.kind in 'iufb': dtype = np.dtype('float') else: dtype = None return Series(d, index=stat_index, name=series.name, dtype=dtype) def describe_categorical_1d(data: Series, percentiles_ignored: Sequence[float]) -> Series: names = ['count', 'unique', 'top', 'freq'] objcounts = data.value_counts() count_unique = len(objcounts[objcounts != 0]) if count_unique > 0: (top, freq) = (objcounts.index[0], objcounts.iloc[0]) dtype = None else: (top, freq) = (np.nan, np.nan) dtype = 'object' result = [data.count(), count_unique, top, freq] from pandas import Series return Series(result, index=names, name=data.name, dtype=dtype) def describe_timestamp_1d(data: Series, percentiles: Sequence[float]) -> Series: from pandas import Series formatted_percentiles = format_percentiles(percentiles) stat_index = ['count', 'mean', 'min'] + formatted_percentiles + ['max'] d = [data.count(), data.mean(), data.min()] + data.quantile(percentiles).tolist() + [data.max()] return Series(d, index=stat_index, name=data.name) def select_describe_func(data: Series) -> Callable: if is_bool_dtype(data.dtype): return describe_categorical_1d elif is_numeric_dtype(data): return describe_numeric_1d elif data.dtype.kind == 'M' or isinstance(data.dtype, DatetimeTZDtype): return describe_timestamp_1d elif data.dtype.kind == 'm': return describe_numeric_1d else: return describe_categorical_1d def _refine_percentiles(percentiles: Sequence[float] | np.ndarray | None) -> npt.NDArray[np.float64]: if percentiles is None: return np.array([0.25, 0.5, 0.75]) percentiles = list(percentiles) validate_percentile(percentiles) if 0.5 not in percentiles: percentiles.append(0.5) percentiles = np.asarray(percentiles) unique_pcts = np.unique(percentiles) assert percentiles is not None if len(unique_pcts) < len(percentiles): raise ValueError('percentiles cannot contain duplicates') return unique_pcts # File: pandas-main/pandas/core/methods/selectn.py """""" from __future__ import annotations from collections.abc import Hashable, Sequence from typing import TYPE_CHECKING, Generic, cast, final import numpy as np from pandas._libs import algos as libalgos from pandas.core.dtypes.common import is_bool_dtype, is_complex_dtype, is_integer_dtype, is_list_like, is_numeric_dtype, needs_i8_conversion from pandas.core.dtypes.dtypes import BaseMaskedDtype from pandas.core.indexes.api import default_index if TYPE_CHECKING: from pandas._typing import DtypeObj, IndexLabel, NDFrameT from pandas import DataFrame, Index, Series else: from pandas._typing import T NDFrameT = T DataFrame = T Series = T class SelectN(Generic[NDFrameT]): def __init__(self, obj: NDFrameT, n: int, keep: str) -> None: self.obj = obj self.n = n self.keep = keep if self.keep not in ('first', 'last', 'all'): raise ValueError('keep must be either "first", "last" or "all"') def compute(self, method: str) -> NDFrameT: raise NotImplementedError @final def nlargest(self) -> NDFrameT: return self.compute('nlargest') @final def nsmallest(self) -> NDFrameT: return self.compute('nsmallest') @final @staticmethod def is_valid_dtype_n_method(dtype: DtypeObj) -> bool: if is_numeric_dtype(dtype): return not is_complex_dtype(dtype) return needs_i8_conversion(dtype) class SelectNSeries(SelectN[Series]): def compute(self, method: str) -> Series: from pandas.core.reshape.concat import concat n = self.n dtype = self.obj.dtype if not self.is_valid_dtype_n_method(dtype): raise TypeError(f"Cannot use method '{method}' with dtype {dtype}") if n <= 0: return self.obj[[]] dropped = self.obj.dropna() nan_index = self.obj.drop(dropped.index) if n >= len(self.obj): ascending = method == 'nsmallest' return self.obj.sort_values(ascending=ascending).head(n) new_dtype = dropped.dtype arr = dropped._values if needs_i8_conversion(arr.dtype): arr = arr.view('i8') elif isinstance(arr.dtype, BaseMaskedDtype): arr = arr._data else: arr = np.asarray(arr) if arr.dtype.kind == 'b': arr = arr.view(np.uint8) if method == 'nlargest': arr = -arr if is_integer_dtype(new_dtype): arr -= 1 elif is_bool_dtype(new_dtype): arr = 1 - -arr if self.keep == 'last': arr = arr[::-1] nbase = n narr = len(arr) n = min(n, narr) if len(arr) > 0: kth_val = libalgos.kth_smallest(arr.copy(order='C'), n - 1) else: kth_val = np.nan (ns,) = np.nonzero(arr <= kth_val) inds = ns[arr[ns].argsort(kind='mergesort')] if self.keep != 'all': inds = inds[:n] findex = nbase elif len(inds) < nbase <= len(nan_index) + len(inds): findex = len(nan_index) + len(inds) else: findex = len(inds) if self.keep == 'last': inds = narr - 1 - inds return concat([dropped.iloc[inds], nan_index]).iloc[:findex] class SelectNFrame(SelectN[DataFrame]): def __init__(self, obj: DataFrame, n: int, keep: str, columns: IndexLabel) -> None: super().__init__(obj, n, keep) if not is_list_like(columns) or isinstance(columns, tuple): columns = [columns] columns = cast(Sequence[Hashable], columns) columns = list(columns) self.columns = columns def compute(self, method: str) -> DataFrame: n = self.n frame = self.obj columns = self.columns for column in columns: dtype = frame[column].dtype if not self.is_valid_dtype_n_method(dtype): raise TypeError(f'Column {column!r} has dtype {dtype}, cannot use method {method!r} with this dtype') def get_indexer(current_indexer: Index, other_indexer: Index) -> Index: if method == 'nsmallest': return current_indexer.append(other_indexer) else: return other_indexer.append(current_indexer) original_index = frame.index cur_frame = frame = frame.reset_index(drop=True) cur_n = n indexer: Index = default_index(0) for (i, column) in enumerate(columns): series = cur_frame[column] is_last_column = len(columns) - 1 == i values = getattr(series, method)(cur_n, keep=self.keep if is_last_column else 'all') if is_last_column or len(values) <= cur_n: indexer = get_indexer(indexer, values.index) break border_value = values == values[values.index[-1]] unsafe_values = values[border_value] safe_values = values[~border_value] indexer = get_indexer(indexer, safe_values.index) cur_frame = cur_frame.loc[unsafe_values.index] cur_n = n - len(indexer) frame = frame.take(indexer) frame.index = original_index.take(indexer) if len(columns) == 1: return frame ascending = method == 'nsmallest' return frame.sort_values(columns, ascending=ascending, kind='mergesort') # File: pandas-main/pandas/core/methods/to_dict.py from __future__ import annotations from typing import TYPE_CHECKING, Literal, overload import warnings import numpy as np from pandas._libs import lib, missing as libmissing from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import maybe_box_native from pandas.core.dtypes.dtypes import BaseMaskedDtype, ExtensionDtype from pandas.core import common as com if TYPE_CHECKING: from collections.abc import Generator from pandas._typing import MutableMappingT from pandas import DataFrame def create_data_for_split(df: DataFrame, are_all_object_dtype_cols: bool, object_dtype_indices: list[int]) -> Generator[list, None, None]: if are_all_object_dtype_cols: for tup in df.itertuples(index=False, name=None): yield list(map(maybe_box_native, tup)) else: for tup in df.itertuples(index=False, name=None): data = list(tup) if object_dtype_indices: for i in object_dtype_indices: data[i] = maybe_box_native(data[i]) yield data @overload def to_dict(df: DataFrame, orient: Literal['dict', 'list', 'series', 'split', 'tight', 'index']=..., *, into: type[MutableMappingT] | MutableMappingT, index: bool=...) -> MutableMappingT: ... @overload def to_dict(df: DataFrame, orient: Literal['records'], *, into: type[MutableMappingT] | MutableMappingT, index: bool=...) -> list[MutableMappingT]: ... @overload def to_dict(df: DataFrame, orient: Literal['dict', 'list', 'series', 'split', 'tight', 'index']=..., *, into: type[dict]=..., index: bool=...) -> dict: ... @overload def to_dict(df: DataFrame, orient: Literal['records'], *, into: type[dict]=..., index: bool=...) -> list[dict]: ... def to_dict(df: DataFrame, orient: Literal['dict', 'list', 'series', 'split', 'tight', 'records', 'index']='dict', *, into: type[MutableMappingT] | MutableMappingT=dict, index: bool=True) -> MutableMappingT | list[MutableMappingT]: if orient != 'tight' and (not df.columns.is_unique): warnings.warn('DataFrame columns are not unique, some columns will be omitted.', UserWarning, stacklevel=find_stack_level()) into_c = com.standardize_mapping(into) orient = orient.lower() if not index and orient not in ['split', 'tight']: raise ValueError("'index=False' is only valid when 'orient' is 'split' or 'tight'") if orient == 'series': return into_c(((k, v) for (k, v) in df.items())) if orient == 'dict': return into_c(((k, v.to_dict(into=into)) for (k, v) in df.items())) box_native_indices = [i for (i, col_dtype) in enumerate(df.dtypes.values) if col_dtype == np.dtype(object) or isinstance(col_dtype, ExtensionDtype)] are_all_object_dtype_cols = len(box_native_indices) == len(df.dtypes) if orient == 'list': object_dtype_indices_as_set: set[int] = set(box_native_indices) box_na_values = (lib.no_default if not isinstance(col_dtype, BaseMaskedDtype) else libmissing.NA for col_dtype in df.dtypes.values) return into_c(((k, list(map(maybe_box_native, v.to_numpy(na_value=box_na_value))) if i in object_dtype_indices_as_set else list(map(maybe_box_native, v.to_numpy()))) for (i, (box_na_value, (k, v))) in enumerate(zip(box_na_values, df.items())))) elif orient == 'split': data = list(create_data_for_split(df, are_all_object_dtype_cols, box_native_indices)) return into_c(((('index', df.index.tolist()),) if index else ()) + (('columns', df.columns.tolist()), ('data', data))) elif orient == 'tight': return into_c(((('index', df.index.tolist()),) if index else ()) + (('columns', df.columns.tolist()), ('data', [list(map(maybe_box_native, t)) for t in df.itertuples(index=False, name=None)])) + ((('index_names', list(df.index.names)),) if index else ()) + (('column_names', list(df.columns.names)),)) elif orient == 'records': columns = df.columns.tolist() if are_all_object_dtype_cols: return [into_c(zip(columns, map(maybe_box_native, row))) for row in df.itertuples(index=False, name=None)] else: data = [into_c(zip(columns, t)) for t in df.itertuples(index=False, name=None)] if box_native_indices: object_dtype_indices_as_set = set(box_native_indices) object_dtype_cols = {col for (i, col) in enumerate(df.columns) if i in object_dtype_indices_as_set} for row in data: for col in object_dtype_cols: row[col] = maybe_box_native(row[col]) return data elif orient == 'index': if not df.index.is_unique: raise ValueError("DataFrame index must be unique for orient='index'.") columns = df.columns.tolist() if are_all_object_dtype_cols: return into_c(((t[0], dict(zip(df.columns, map(maybe_box_native, t[1:])))) for t in df.itertuples(name=None))) elif box_native_indices: object_dtype_indices_as_set = set(box_native_indices) return into_c(((t[0], {column: maybe_box_native(v) if i in object_dtype_indices_as_set else v for (i, (column, v)) in enumerate(zip(columns, t[1:]))}) for t in df.itertuples(name=None))) else: return into_c(((t[0], dict(zip(columns, t[1:]))) for t in df.itertuples(name=None))) else: raise ValueError(f"orient '{orient}' not understood") # File: pandas-main/pandas/core/missing.py """""" from __future__ import annotations from functools import wraps from typing import TYPE_CHECKING, Any, Literal, cast, overload import numpy as np from pandas._libs import NaT, algos, lib from pandas._typing import ArrayLike, AxisInt, F, ReindexMethod, npt from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.cast import infer_dtype_from from pandas.core.dtypes.common import is_array_like, is_bool_dtype, is_numeric_dtype, is_numeric_v_string_like, is_object_dtype, needs_i8_conversion from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.missing import is_valid_na_for_dtype, isna, na_value_for_dtype if TYPE_CHECKING: from pandas import Index def check_value_size(value, mask: npt.NDArray[np.bool_], length: int): if is_array_like(value): if len(value) != length: raise ValueError(f"Length of 'value' does not match. Got ({len(value)}) expected {length}") value = value[mask] return value def mask_missing(arr: ArrayLike, values_to_mask) -> npt.NDArray[np.bool_]: (dtype, values_to_mask) = infer_dtype_from(values_to_mask) if isinstance(dtype, np.dtype): values_to_mask = np.array(values_to_mask, dtype=dtype) else: cls = dtype.construct_array_type() if not lib.is_list_like(values_to_mask): values_to_mask = [values_to_mask] values_to_mask = cls._from_sequence(values_to_mask, dtype=dtype, copy=False) potential_na = False if is_object_dtype(arr.dtype): potential_na = True arr_mask = ~isna(arr) na_mask = isna(values_to_mask) nonna = values_to_mask[~na_mask] mask = np.zeros(arr.shape, dtype=bool) if is_numeric_dtype(arr.dtype) and (not is_bool_dtype(arr.dtype)) and is_bool_dtype(nonna.dtype): pass elif is_bool_dtype(arr.dtype) and is_numeric_dtype(nonna.dtype) and (not is_bool_dtype(nonna.dtype)): pass else: for x in nonna: if is_numeric_v_string_like(arr, x): pass else: if potential_na: new_mask = np.zeros(arr.shape, dtype=np.bool_) new_mask[arr_mask] = arr[arr_mask] == x else: new_mask = arr == x if not isinstance(new_mask, np.ndarray): new_mask = new_mask.to_numpy(dtype=bool, na_value=False) mask |= new_mask if na_mask.any(): mask |= isna(arr) return mask @overload def clean_fill_method(method: Literal['ffill', 'pad', 'bfill', 'backfill'], *, allow_nearest: Literal[False]=...) -> Literal['pad', 'backfill']: ... @overload def clean_fill_method(method: Literal['ffill', 'pad', 'bfill', 'backfill', 'nearest'], *, allow_nearest: Literal[True]) -> Literal['pad', 'backfill', 'nearest']: ... def clean_fill_method(method: Literal['ffill', 'pad', 'bfill', 'backfill', 'nearest'], *, allow_nearest: bool=False) -> Literal['pad', 'backfill', 'nearest']: if isinstance(method, str): method = method.lower() if method == 'ffill': method = 'pad' elif method == 'bfill': method = 'backfill' valid_methods = ['pad', 'backfill'] expecting = 'pad (ffill) or backfill (bfill)' if allow_nearest: valid_methods.append('nearest') expecting = 'pad (ffill), backfill (bfill) or nearest' if method not in valid_methods: raise ValueError(f'Invalid fill method. Expecting {expecting}. Got {method}') return method NP_METHODS = ['linear', 'time', 'index', 'values'] SP_METHODS = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh', 'spline', 'polynomial', 'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', 'cubicspline'] def clean_interp_method(method: str, index: Index, **kwargs) -> str: order = kwargs.get('order') if method in ('spline', 'polynomial') and order is None: raise ValueError('You must specify the order of the spline or polynomial.') valid = NP_METHODS + SP_METHODS if method not in valid: raise ValueError(f"method must be one of {valid}. Got '{method}' instead.") if method in ('krogh', 'piecewise_polynomial', 'pchip'): if not index.is_monotonic_increasing: raise ValueError(f'{method} interpolation requires that the index be monotonic.') return method def find_valid_index(how: str, is_valid: npt.NDArray[np.bool_]) -> int | None: assert how in ['first', 'last'] if len(is_valid) == 0: return None if is_valid.ndim == 2: is_valid = is_valid.any(axis=1) if how == 'first': idxpos = is_valid[:].argmax() elif how == 'last': idxpos = len(is_valid) - 1 - is_valid[::-1].argmax() chk_notna = is_valid[idxpos] if not chk_notna: return None return idxpos def validate_limit_direction(limit_direction: str) -> Literal['forward', 'backward', 'both']: valid_limit_directions = ['forward', 'backward', 'both'] limit_direction = limit_direction.lower() if limit_direction not in valid_limit_directions: raise ValueError(f"Invalid limit_direction: expecting one of {valid_limit_directions}, got '{limit_direction}'.") return limit_direction def validate_limit_area(limit_area: str | None) -> Literal['inside', 'outside'] | None: if limit_area is not None: valid_limit_areas = ['inside', 'outside'] limit_area = limit_area.lower() if limit_area not in valid_limit_areas: raise ValueError(f'Invalid limit_area: expecting one of {valid_limit_areas}, got {limit_area}.') return limit_area def infer_limit_direction(limit_direction: Literal['backward', 'forward', 'both'] | None, method: str) -> Literal['backward', 'forward', 'both']: if limit_direction is None: if method in ('backfill', 'bfill'): limit_direction = 'backward' else: limit_direction = 'forward' else: if method in ('pad', 'ffill') and limit_direction != 'forward': raise ValueError(f"`limit_direction` must be 'forward' for method `{method}`") if method in ('backfill', 'bfill') and limit_direction != 'backward': raise ValueError(f"`limit_direction` must be 'backward' for method `{method}`") return limit_direction def get_interp_index(method, index: Index) -> Index: if method == 'linear': from pandas import Index if isinstance(index.dtype, DatetimeTZDtype) or lib.is_np_dtype(index.dtype, 'mM'): index = Index(index.view('i8')) elif not is_numeric_dtype(index.dtype): index = Index(range(len(index))) else: methods = {'index', 'values', 'nearest', 'time'} is_numeric_or_datetime = is_numeric_dtype(index.dtype) or isinstance(index.dtype, DatetimeTZDtype) or lib.is_np_dtype(index.dtype, 'mM') valid = NP_METHODS + SP_METHODS if method in valid: if method not in methods and (not is_numeric_or_datetime): raise ValueError(f'Index column must be numeric or datetime type when using {method} method other than linear. Try setting a numeric or datetime index column before interpolating.') else: raise ValueError(f'Can not interpolate with method={method}.') if isna(index).any(): raise NotImplementedError('Interpolation with NaNs in the index has not been implemented. Try filling those NaNs before interpolating.') return index def interpolate_2d_inplace(data: np.ndarray, index: Index, axis: AxisInt, method: str='linear', limit: int | None=None, limit_direction: str='forward', limit_area: str | None=None, fill_value: Any | None=None, mask=None, **kwargs) -> None: clean_interp_method(method, index, **kwargs) if is_valid_na_for_dtype(fill_value, data.dtype): fill_value = na_value_for_dtype(data.dtype, compat=False) if method == 'time': if not needs_i8_conversion(index.dtype): raise ValueError('time-weighted interpolation only works on Series or DataFrames with a DatetimeIndex') method = 'values' limit_direction = validate_limit_direction(limit_direction) limit_area_validated = validate_limit_area(limit_area) limit = algos.validate_limit(nobs=None, limit=limit) indices = _index_to_interp_indices(index, method) def func(yvalues: np.ndarray) -> None: _interpolate_1d(indices=indices, yvalues=yvalues, method=method, limit=limit, limit_direction=limit_direction, limit_area=limit_area_validated, fill_value=fill_value, bounds_error=False, mask=mask, **kwargs) np.apply_along_axis(func, axis, data) def _index_to_interp_indices(index: Index, method: str) -> np.ndarray: xarr = index._values if needs_i8_conversion(xarr.dtype): xarr = xarr.view('i8') if method == 'linear': inds = xarr inds = cast(np.ndarray, inds) else: inds = np.asarray(xarr) if method in ('values', 'index'): if inds.dtype == np.object_: inds = lib.maybe_convert_objects(inds) return inds def _interpolate_1d(indices: np.ndarray, yvalues: np.ndarray, method: str='linear', limit: int | None=None, limit_direction: str='forward', limit_area: Literal['inside', 'outside'] | None=None, fill_value: Any | None=None, bounds_error: bool=False, order: int | None=None, mask=None, **kwargs) -> None: if mask is not None: invalid = mask else: invalid = isna(yvalues) valid = ~invalid if not valid.any(): return if valid.all(): return all_nans = np.flatnonzero(invalid) first_valid_index = find_valid_index(how='first', is_valid=valid) if first_valid_index is None: first_valid_index = 0 start_nans = np.arange(first_valid_index) last_valid_index = find_valid_index(how='last', is_valid=valid) if last_valid_index is None: last_valid_index = len(yvalues) end_nans = np.arange(1 + last_valid_index, len(valid)) if limit_direction == 'forward': preserve_nans = np.union1d(start_nans, _interp_limit(invalid, limit, 0)) elif limit_direction == 'backward': preserve_nans = np.union1d(end_nans, _interp_limit(invalid, 0, limit)) else: preserve_nans = np.unique(_interp_limit(invalid, limit, limit)) if limit_area == 'inside': preserve_nans = np.union1d(preserve_nans, start_nans) preserve_nans = np.union1d(preserve_nans, end_nans) elif limit_area == 'outside': mid_nans = np.setdiff1d(all_nans, start_nans, assume_unique=True) mid_nans = np.setdiff1d(mid_nans, end_nans, assume_unique=True) preserve_nans = np.union1d(preserve_nans, mid_nans) is_datetimelike = yvalues.dtype.kind in 'mM' if is_datetimelike: yvalues = yvalues.view('i8') if method in NP_METHODS: indexer = np.argsort(indices[valid]) yvalues[invalid] = np.interp(indices[invalid], indices[valid][indexer], yvalues[valid][indexer]) else: yvalues[invalid] = _interpolate_scipy_wrapper(indices[valid], yvalues[valid], indices[invalid], method=method, fill_value=fill_value, bounds_error=bounds_error, order=order, **kwargs) if mask is not None: mask[:] = False mask[preserve_nans] = True elif is_datetimelike: yvalues[preserve_nans] = NaT.value else: yvalues[preserve_nans] = np.nan return def _interpolate_scipy_wrapper(x: np.ndarray, y: np.ndarray, new_x: np.ndarray, method: str, fill_value=None, bounds_error: bool=False, order=None, **kwargs): extra = f'{method} interpolation requires SciPy.' import_optional_dependency('scipy', extra=extra) from scipy import interpolate new_x = np.asarray(new_x) alt_methods = {'barycentric': interpolate.barycentric_interpolate, 'krogh': interpolate.krogh_interpolate, 'from_derivatives': _from_derivatives, 'piecewise_polynomial': _from_derivatives, 'cubicspline': _cubicspline_interpolate, 'akima': _akima_interpolate, 'pchip': interpolate.pchip_interpolate} interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'polynomial'] if method in interp1d_methods: if method == 'polynomial': kind = order else: kind = method terp = interpolate.interp1d(x, y, kind=kind, fill_value=fill_value, bounds_error=bounds_error) new_y = terp(new_x) elif method == 'spline': if isna(order) or order <= 0: raise ValueError(f'order needs to be specified and greater than 0; got order: {order}') terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs) new_y = terp(new_x) else: if not x.flags.writeable: x = x.copy() if not y.flags.writeable: y = y.copy() if not new_x.flags.writeable: new_x = new_x.copy() terp = alt_methods.get(method, None) if terp is None: raise ValueError(f'Can not interpolate with method={method}.') kwargs.pop('downcast', None) new_y = terp(x, y, new_x, **kwargs) return new_y def _from_derivatives(xi: np.ndarray, yi: np.ndarray, x: np.ndarray, order=None, der: int | list[int] | None=0, extrapolate: bool=False): from scipy import interpolate method = interpolate.BPoly.from_derivatives m = method(xi, yi.reshape(-1, 1), orders=order, extrapolate=extrapolate) return m(x) def _akima_interpolate(xi: np.ndarray, yi: np.ndarray, x: np.ndarray, der: int | list[int] | None=0, axis: AxisInt=0): from scipy import interpolate P = interpolate.Akima1DInterpolator(xi, yi, axis=axis) return P(x, nu=der) def _cubicspline_interpolate(xi: np.ndarray, yi: np.ndarray, x: np.ndarray, axis: AxisInt=0, bc_type: str | tuple[Any, Any]='not-a-knot', extrapolate=None): from scipy import interpolate P = interpolate.CubicSpline(xi, yi, axis=axis, bc_type=bc_type, extrapolate=extrapolate) return P(x) def pad_or_backfill_inplace(values: np.ndarray, method: Literal['pad', 'backfill']='pad', axis: AxisInt=0, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None) -> None: transf = (lambda x: x) if axis == 0 else lambda x: x.T if values.ndim == 1: if axis != 0: raise AssertionError('cannot interpolate on a ndim == 1 with axis != 0') values = values.reshape(tuple((1,) + values.shape)) method = clean_fill_method(method) tvalues = transf(values) func = get_fill_func(method, ndim=2) func(tvalues, limit=limit, limit_area=limit_area) def _fillna_prep(values, mask: npt.NDArray[np.bool_] | None=None) -> npt.NDArray[np.bool_]: if mask is None: mask = isna(values) return mask def _datetimelike_compat(func: F) -> F: @wraps(func) def new_func(values, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, mask=None): if needs_i8_conversion(values.dtype): if mask is None: mask = isna(values) (result, mask) = func(values.view('i8'), limit=limit, limit_area=limit_area, mask=mask) return (result.view(values.dtype), mask) return func(values, limit=limit, limit_area=limit_area, mask=mask) return cast(F, new_func) @_datetimelike_compat def _pad_1d(values: np.ndarray, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: mask = _fillna_prep(values, mask) if limit_area is not None and (not mask.all()): _fill_limit_area_1d(mask, limit_area) algos.pad_inplace(values, mask, limit=limit) return (values, mask) @_datetimelike_compat def _backfill_1d(values: np.ndarray, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: mask = _fillna_prep(values, mask) if limit_area is not None and (not mask.all()): _fill_limit_area_1d(mask, limit_area) algos.backfill_inplace(values, mask, limit=limit) return (values, mask) @_datetimelike_compat def _pad_2d(values: np.ndarray, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[np.ndarray, npt.NDArray[np.bool_]]: mask = _fillna_prep(values, mask) if limit_area is not None: _fill_limit_area_2d(mask, limit_area) if values.size: algos.pad_2d_inplace(values, mask, limit=limit) return (values, mask) @_datetimelike_compat def _backfill_2d(values, limit: int | None=None, limit_area: Literal['inside', 'outside'] | None=None, mask: npt.NDArray[np.bool_] | None=None): mask = _fillna_prep(values, mask) if limit_area is not None: _fill_limit_area_2d(mask, limit_area) if values.size: algos.backfill_2d_inplace(values, mask, limit=limit) else: pass return (values, mask) def _fill_limit_area_1d(mask: npt.NDArray[np.bool_], limit_area: Literal['outside', 'inside']) -> None: neg_mask = ~mask first = neg_mask.argmax() last = len(neg_mask) - neg_mask[::-1].argmax() - 1 if limit_area == 'inside': mask[:first] = False mask[last + 1:] = False elif limit_area == 'outside': mask[first + 1:last] = False def _fill_limit_area_2d(mask: npt.NDArray[np.bool_], limit_area: Literal['outside', 'inside']) -> None: neg_mask = ~mask.T if limit_area == 'outside': la_mask = np.maximum.accumulate(neg_mask, axis=0) & np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1] else: la_mask = ~np.maximum.accumulate(neg_mask, axis=0) | ~np.maximum.accumulate(neg_mask[::-1], axis=0)[::-1] mask[la_mask.T] = False _fill_methods = {'pad': _pad_1d, 'backfill': _backfill_1d} def get_fill_func(method, ndim: int=1): method = clean_fill_method(method) if ndim == 1: return _fill_methods[method] return {'pad': _pad_2d, 'backfill': _backfill_2d}[method] def clean_reindex_fill_method(method) -> ReindexMethod | None: if method is None: return None return clean_fill_method(method, allow_nearest=True) def _interp_limit(invalid: npt.NDArray[np.bool_], fw_limit: int | None, bw_limit: int | None) -> np.ndarray: N = len(invalid) f_idx = np.array([], dtype=np.int64) b_idx = np.array([], dtype=np.int64) assume_unique = True def inner(invalid, limit: int): limit = min(limit, N) windowed = np.lib.stride_tricks.sliding_window_view(invalid, limit + 1).all(1) idx = np.union1d(np.where(windowed)[0] + limit, np.where((~invalid[:limit + 1]).cumsum() == 0)[0]) return idx if fw_limit is not None: if fw_limit == 0: f_idx = np.where(invalid)[0] assume_unique = False else: f_idx = inner(invalid, fw_limit) if bw_limit is not None: if bw_limit == 0: return f_idx else: b_idx = N - 1 - inner(invalid[::-1], bw_limit) if fw_limit == 0: return b_idx return np.intersect1d(f_idx, b_idx, assume_unique=assume_unique) # File: pandas-main/pandas/core/nanops.py from __future__ import annotations import functools import itertools from typing import TYPE_CHECKING, Any, cast import warnings import numpy as np from pandas._config import get_option from pandas._libs import NaT, NaTType, iNaT, lib from pandas._typing import ArrayLike, AxisInt, CorrelationMethod, Dtype, DtypeObj, F, Scalar, Shape, npt from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_complex, is_float, is_float_dtype, is_integer, is_numeric_dtype, is_object_dtype, needs_i8_conversion, pandas_dtype from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna if TYPE_CHECKING: from collections.abc import Callable bn = import_optional_dependency('bottleneck', errors='warn') _BOTTLENECK_INSTALLED = bn is not None _USE_BOTTLENECK = False def set_use_bottleneck(v: bool=True) -> None: global _USE_BOTTLENECK if _BOTTLENECK_INSTALLED: _USE_BOTTLENECK = v set_use_bottleneck(get_option('compute.use_bottleneck')) class disallow: def __init__(self, *dtypes: Dtype) -> None: super().__init__() self.dtypes = tuple((pandas_dtype(dtype).type for dtype in dtypes)) def check(self, obj) -> bool: return hasattr(obj, 'dtype') and issubclass(obj.dtype.type, self.dtypes) def __call__(self, f: F) -> F: @functools.wraps(f) def _f(*args, **kwargs): obj_iter = itertools.chain(args, kwargs.values()) if any((self.check(obj) for obj in obj_iter)): f_name = f.__name__.replace('nan', '') raise TypeError(f"reduction operation '{f_name}' not allowed for this dtype") try: return f(*args, **kwargs) except ValueError as e: if is_object_dtype(args[0]): raise TypeError(e) from e raise return cast(F, _f) class bottleneck_switch: def __init__(self, name=None, **kwargs) -> None: self.name = name self.kwargs = kwargs def __call__(self, alt: F) -> F: bn_name = self.name or alt.__name__ try: bn_func = getattr(bn, bn_name) except (AttributeError, NameError): bn_func = None @functools.wraps(alt) def f(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, **kwds): if len(self.kwargs) > 0: for (k, v) in self.kwargs.items(): if k not in kwds: kwds[k] = v if values.size == 0 and kwds.get('min_count') is None: return _na_for_min_count(values, axis) if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name): if kwds.get('mask', None) is None: kwds.pop('mask', None) result = bn_func(values, axis=axis, **kwds) if _has_infs(result): result = alt(values, axis=axis, skipna=skipna, **kwds) else: result = alt(values, axis=axis, skipna=skipna, **kwds) else: result = alt(values, axis=axis, skipna=skipna, **kwds) return result return cast(F, f) def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool: if dtype != object and (not needs_i8_conversion(dtype)): return name not in ['nansum', 'nanprod', 'nanmean'] return False def _has_infs(result) -> bool: if isinstance(result, np.ndarray): if result.dtype in ('f8', 'f4'): return lib.has_infs(result.ravel('K')) try: return np.isinf(result).any() except (TypeError, NotImplementedError): return False def _get_fill_value(dtype: DtypeObj, fill_value: Scalar | None=None, fill_value_typ=None): if fill_value is not None: return fill_value if _na_ok_dtype(dtype): if fill_value_typ is None: return np.nan elif fill_value_typ == '+inf': return np.inf else: return -np.inf elif fill_value_typ == '+inf': return lib.i8max else: return iNaT def _maybe_get_mask(values: np.ndarray, skipna: bool, mask: npt.NDArray[np.bool_] | None) -> npt.NDArray[np.bool_] | None: if mask is None: if values.dtype.kind in 'biu': return None if skipna or values.dtype.kind in 'mM': mask = isna(values) return mask def _get_values(values: np.ndarray, skipna: bool, fill_value: Any=None, fill_value_typ: str | None=None, mask: npt.NDArray[np.bool_] | None=None) -> tuple[np.ndarray, npt.NDArray[np.bool_] | None]: mask = _maybe_get_mask(values, skipna, mask) dtype = values.dtype datetimelike = False if values.dtype.kind in 'mM': values = np.asarray(values.view('i8')) datetimelike = True if skipna and mask is not None: fill_value = _get_fill_value(dtype, fill_value=fill_value, fill_value_typ=fill_value_typ) if fill_value is not None: if mask.any(): if datetimelike or _na_ok_dtype(dtype): values = values.copy() np.putmask(values, mask, fill_value) else: values = np.where(~mask, values, fill_value) return (values, mask) def _get_dtype_max(dtype: np.dtype) -> np.dtype: dtype_max = dtype if dtype.kind in 'bi': dtype_max = np.dtype(np.int64) elif dtype.kind == 'u': dtype_max = np.dtype(np.uint64) elif dtype.kind == 'f': dtype_max = np.dtype(np.float64) return dtype_max def _na_ok_dtype(dtype: DtypeObj) -> bool: if needs_i8_conversion(dtype): return False return not issubclass(dtype.type, np.integer) def _wrap_results(result, dtype: np.dtype, fill_value=None): if result is NaT: pass elif dtype.kind == 'M': if fill_value is None: fill_value = iNaT if not isinstance(result, np.ndarray): assert not isna(fill_value), 'Expected non-null fill_value' if result == fill_value: result = np.nan if isna(result): result = np.datetime64('NaT', 'ns').astype(dtype) else: result = np.int64(result).view(dtype) result = result.astype(dtype, copy=False) else: result = result.astype(dtype) elif dtype.kind == 'm': if not isinstance(result, np.ndarray): if result == fill_value or np.isnan(result): result = np.timedelta64('NaT').astype(dtype) elif np.fabs(result) > lib.i8max: raise ValueError('overflow in timedelta operation') else: result = np.int64(result).astype(dtype, copy=False) else: result = result.astype('m8[ns]').view(dtype) return result def _datetimelike_compat(func: F) -> F: @functools.wraps(func) def new_func(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None, **kwargs): orig_values = values datetimelike = values.dtype.kind in 'mM' if datetimelike and mask is None: mask = isna(values) result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs) if datetimelike: result = _wrap_results(result, orig_values.dtype, fill_value=iNaT) if not skipna: assert mask is not None result = _mask_datetimelike_result(result, axis, mask, orig_values) return result return cast(F, new_func) def _na_for_min_count(values: np.ndarray, axis: AxisInt | None) -> Scalar | np.ndarray: if values.dtype.kind in 'iufcb': values = values.astype('float64') fill_value = na_value_for_dtype(values.dtype) if values.ndim == 1: return fill_value elif axis is None: return fill_value else: result_shape = values.shape[:axis] + values.shape[axis + 1:] return np.full(result_shape, fill_value, dtype=values.dtype) def maybe_operate_rowwise(func: F) -> F: @functools.wraps(func) def newfunc(values: np.ndarray, *, axis: AxisInt | None=None, **kwargs): if axis == 1 and values.ndim == 2 and values.flags['C_CONTIGUOUS'] and (values.shape[1] / 1000 > values.shape[0]) and (values.dtype != object) and (values.dtype != bool): arrs = list(values) if kwargs.get('mask') is not None: mask = kwargs.pop('mask') results = [func(arrs[i], mask=mask[i], **kwargs) for i in range(len(arrs))] else: results = [func(x, **kwargs) for x in arrs] return np.array(results) return func(values, axis=axis, **kwargs) return cast(F, newfunc) def nanany(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> bool: if values.dtype.kind in 'iub' and mask is None: return values.any(axis) if values.dtype.kind == 'M': raise TypeError("datetime64 type does not support operation 'any'") (values, _) = _get_values(values, skipna, fill_value=False, mask=mask) if values.dtype == object: values = values.astype(bool) return values.any(axis) def nanall(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> bool: if values.dtype.kind in 'iub' and mask is None: return values.all(axis) if values.dtype.kind == 'M': raise TypeError("datetime64 type does not support operation 'all'") (values, _) = _get_values(values, skipna, fill_value=True, mask=mask) if values.dtype == object: values = values.astype(bool) return values.all(axis) @disallow('M8') @_datetimelike_compat @maybe_operate_rowwise def nansum(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, min_count: int=0, mask: npt.NDArray[np.bool_] | None=None) -> float: dtype = values.dtype (values, mask) = _get_values(values, skipna, fill_value=0, mask=mask) dtype_sum = _get_dtype_max(dtype) if dtype.kind == 'f': dtype_sum = dtype elif dtype.kind == 'm': dtype_sum = np.dtype(np.float64) the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count) return the_sum def _mask_datetimelike_result(result: np.ndarray | np.datetime64 | np.timedelta64, axis: AxisInt | None, mask: npt.NDArray[np.bool_], orig_values: np.ndarray) -> np.ndarray | np.datetime64 | np.timedelta64 | NaTType: if isinstance(result, np.ndarray): result = result.astype('i8').view(orig_values.dtype) axis_mask = mask.any(axis=axis) result[axis_mask] = iNaT elif mask.any(): return np.int64(iNaT).view(orig_values.dtype) return result @bottleneck_switch() @_datetimelike_compat def nanmean(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> float: dtype = values.dtype (values, mask) = _get_values(values, skipna, fill_value=0, mask=mask) dtype_sum = _get_dtype_max(dtype) dtype_count = np.dtype(np.float64) if dtype.kind in 'mM': dtype_sum = np.dtype(np.float64) elif dtype.kind in 'iu': dtype_sum = np.dtype(np.float64) elif dtype.kind == 'f': dtype_sum = dtype dtype_count = dtype count = _get_counts(values.shape, mask, axis, dtype=dtype_count) the_sum = values.sum(axis, dtype=dtype_sum) the_sum = _ensure_numeric(the_sum) if axis is not None and getattr(the_sum, 'ndim', False): count = cast(np.ndarray, count) with np.errstate(all='ignore'): the_mean = the_sum / count ct_mask = count == 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return the_mean @bottleneck_switch() def nanmedian(values, *, axis: AxisInt | None=None, skipna: bool=True, mask=None): using_nan_sentinel = values.dtype.kind == 'f' and mask is None def get_median(x, _mask=None): if _mask is None: _mask = notna(x) else: _mask = ~_mask if not skipna and (not _mask.all()): return np.nan with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'All-NaN slice encountered', RuntimeWarning) warnings.filterwarnings('ignore', 'Mean of empty slice', RuntimeWarning) res = np.nanmedian(x[_mask]) return res dtype = values.dtype (values, mask) = _get_values(values, skipna, mask=mask, fill_value=None) if values.dtype.kind != 'f': if values.dtype == object: inferred = lib.infer_dtype(values) if inferred in ['string', 'mixed']: raise TypeError(f'Cannot convert {values} to numeric') try: values = values.astype('f8') except ValueError as err: raise TypeError(str(err)) from err if not using_nan_sentinel and mask is not None: if not values.flags.writeable: values = values.copy() values[mask] = np.nan notempty = values.size if values.ndim > 1 and axis is not None: if notempty: if not skipna: res = np.apply_along_axis(get_median, axis, values) else: with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'All-NaN slice encountered', RuntimeWarning) if values.shape[1] == 1 and axis == 0 or (values.shape[0] == 1 and axis == 1): res = np.nanmedian(np.squeeze(values), keepdims=True) else: res = np.nanmedian(values, axis=axis) else: res = _get_empty_reduction_result(values.shape, axis) else: res = get_median(values, mask) if notempty else np.nan return _wrap_results(res, dtype) def _get_empty_reduction_result(shape: Shape, axis: AxisInt) -> np.ndarray: shp = np.array(shape) dims = np.arange(len(shape)) ret = np.empty(shp[dims != axis], dtype=np.float64) ret.fill(np.nan) return ret def _get_counts_nanvar(values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, ddof: int, dtype: np.dtype=np.dtype(np.float64)) -> tuple[float | np.ndarray, float | np.ndarray]: count = _get_counts(values_shape, mask, axis, dtype=dtype) d = count - dtype.type(ddof) if is_float(count): if count <= ddof: count = np.nan d = np.nan else: count = cast(np.ndarray, count) mask = count <= ddof if mask.any(): np.putmask(d, mask, np.nan) np.putmask(count, mask, np.nan) return (count, d) @bottleneck_switch(ddof=1) def nanstd(values, *, axis: AxisInt | None=None, skipna: bool=True, ddof: int=1, mask=None): if values.dtype == 'M8[ns]': values = values.view('m8[ns]') orig_dtype = values.dtype (values, mask) = _get_values(values, skipna, mask=mask) result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)) return _wrap_results(result, orig_dtype) @disallow('M8', 'm8') @bottleneck_switch(ddof=1) def nanvar(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, ddof: int=1, mask=None): dtype = values.dtype mask = _maybe_get_mask(values, skipna, mask) if dtype.kind in 'iu': values = values.astype('f8') if mask is not None: values[mask] = np.nan if values.dtype.kind == 'f': (count, d) = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) else: (count, d) = _get_counts_nanvar(values.shape, mask, axis, ddof) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) if mask is not None: np.putmask(sqr, mask, 0) result = sqr.sum(axis=axis, dtype=np.float64) / d if dtype.kind == 'f': result = result.astype(dtype, copy=False) return result @disallow('M8', 'm8') def nansem(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, ddof: int=1, mask: npt.NDArray[np.bool_] | None=None) -> float: nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask) mask = _maybe_get_mask(values, skipna, mask) if values.dtype.kind != 'f': values = values.astype('f8') if not skipna and mask is not None and mask.any(): return np.nan (count, _) = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask) return np.sqrt(var) / np.sqrt(count) def _nanminmax(meth, fill_value_typ): @bottleneck_switch(name=f'nan{meth}') @_datetimelike_compat def reduction(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None): if values.size == 0: return _na_for_min_count(values, axis) (values, mask) = _get_values(values, skipna, fill_value_typ=fill_value_typ, mask=mask) result = getattr(values, meth)(axis) result = _maybe_null_out(result, axis, mask, values.shape) return result return reduction nanmin = _nanminmax('min', fill_value_typ='+inf') nanmax = _nanminmax('max', fill_value_typ='-inf') def nanargmax(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> int | np.ndarray: (values, mask) = _get_values(values, True, fill_value_typ='-inf', mask=mask) result = values.argmax(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result def nanargmin(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> int | np.ndarray: (values, mask) = _get_values(values, True, fill_value_typ='+inf', mask=mask) result = values.argmin(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result @disallow('M8', 'm8') @maybe_operate_rowwise def nanskew(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> float: mask = _maybe_get_mask(values, skipna, mask) if values.dtype.kind != 'f': values = values.astype('f8') count = _get_counts(values.shape, mask, axis) else: count = _get_counts(values.shape, mask, axis, dtype=values.dtype) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) elif not skipna and mask is not None and mask.any(): return np.nan with np.errstate(invalid='ignore', divide='ignore'): mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna and mask is not None: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted3 = adjusted2 * adjusted m2 = adjusted2.sum(axis, dtype=np.float64) m3 = adjusted3.sum(axis, dtype=np.float64) m2 = _zero_out_fperr(m2) m3 = _zero_out_fperr(m3) with np.errstate(invalid='ignore', divide='ignore'): result = count * (count - 1) ** 0.5 / (count - 2) * (m3 / m2 ** 1.5) dtype = values.dtype if dtype.kind == 'f': result = result.astype(dtype, copy=False) if isinstance(result, np.ndarray): result = np.where(m2 == 0, 0, result) result[count < 3] = np.nan else: result = dtype.type(0) if m2 == 0 else result if count < 3: return np.nan return result @disallow('M8', 'm8') @maybe_operate_rowwise def nankurt(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, mask: npt.NDArray[np.bool_] | None=None) -> float: mask = _maybe_get_mask(values, skipna, mask) if values.dtype.kind != 'f': values = values.astype('f8') count = _get_counts(values.shape, mask, axis) else: count = _get_counts(values.shape, mask, axis, dtype=values.dtype) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) elif not skipna and mask is not None and mask.any(): return np.nan with np.errstate(invalid='ignore', divide='ignore'): mean = values.sum(axis, dtype=np.float64) / count if axis is not None: mean = np.expand_dims(mean, axis) adjusted = values - mean if skipna and mask is not None: np.putmask(adjusted, mask, 0) adjusted2 = adjusted ** 2 adjusted4 = adjusted2 ** 2 m2 = adjusted2.sum(axis, dtype=np.float64) m4 = adjusted4.sum(axis, dtype=np.float64) with np.errstate(invalid='ignore', divide='ignore'): adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3)) numerator = count * (count + 1) * (count - 1) * m4 denominator = (count - 2) * (count - 3) * m2 ** 2 numerator = _zero_out_fperr(numerator) denominator = _zero_out_fperr(denominator) if not isinstance(denominator, np.ndarray): if count < 4: return np.nan if denominator == 0: return values.dtype.type(0) with np.errstate(invalid='ignore', divide='ignore'): result = numerator / denominator - adj dtype = values.dtype if dtype.kind == 'f': result = result.astype(dtype, copy=False) if isinstance(result, np.ndarray): result = np.where(denominator == 0, 0, result) result[count < 4] = np.nan return result @disallow('M8', 'm8') @maybe_operate_rowwise def nanprod(values: np.ndarray, *, axis: AxisInt | None=None, skipna: bool=True, min_count: int=0, mask: npt.NDArray[np.bool_] | None=None) -> float: mask = _maybe_get_mask(values, skipna, mask) if skipna and mask is not None: values = values.copy() values[mask] = 1 result = values.prod(axis) return _maybe_null_out(result, axis, mask, values.shape, min_count=min_count) def _maybe_arg_null_out(result: np.ndarray, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, skipna: bool) -> np.ndarray | int: if mask is None: return result if axis is None or not getattr(result, 'ndim', False): if skipna and mask.all(): raise ValueError('Encountered all NA values') elif not skipna and mask.any(): raise ValueError('Encountered an NA value with skipna=False') elif skipna and mask.all(axis).any(): raise ValueError('Encountered all NA values') elif not skipna and mask.any(axis).any(): raise ValueError('Encountered an NA value with skipna=False') return result def _get_counts(values_shape: Shape, mask: npt.NDArray[np.bool_] | None, axis: AxisInt | None, dtype: np.dtype[np.floating]=np.dtype(np.float64)) -> np.floating | npt.NDArray[np.floating]: if axis is None: if mask is not None: n = mask.size - mask.sum() else: n = np.prod(values_shape) return dtype.type(n) if mask is not None: count = mask.shape[axis] - mask.sum(axis) else: count = values_shape[axis] if is_integer(count): return dtype.type(count) return count.astype(dtype, copy=False) def _maybe_null_out(result: np.ndarray | float | NaTType, axis: AxisInt | None, mask: npt.NDArray[np.bool_] | None, shape: tuple[int, ...], min_count: int=1) -> np.ndarray | float | NaTType: if mask is None and min_count == 0: return result if axis is not None and isinstance(result, np.ndarray): if mask is not None: null_mask = mask.shape[axis] - mask.sum(axis) - min_count < 0 else: below_count = shape[axis] - min_count < 0 new_shape = shape[:axis] + shape[axis + 1:] null_mask = np.broadcast_to(below_count, new_shape) if np.any(null_mask): if is_numeric_dtype(result): if np.iscomplexobj(result): result = result.astype('c16') elif not is_float_dtype(result): result = result.astype('f8', copy=False) result[null_mask] = np.nan else: result[null_mask] = None elif result is not NaT: if check_below_min_count(shape, mask, min_count): result_dtype = getattr(result, 'dtype', None) if is_float_dtype(result_dtype): result = result_dtype.type('nan') else: result = np.nan return result def check_below_min_count(shape: tuple[int, ...], mask: npt.NDArray[np.bool_] | None, min_count: int) -> bool: if min_count > 0: if mask is None: non_nulls = np.prod(shape) else: non_nulls = mask.size - mask.sum() if non_nulls < min_count: return True return False def _zero_out_fperr(arg): if isinstance(arg, np.ndarray): return np.where(np.abs(arg) < 1e-14, 0, arg) else: return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg @disallow('M8', 'm8') def nancorr(a: np.ndarray, b: np.ndarray, *, method: CorrelationMethod='pearson', min_periods: int | None=None) -> float: if len(a) != len(b): raise AssertionError('Operands to nancorr must have same size') if min_periods is None: min_periods = 1 valid = notna(a) & notna(b) if not valid.all(): a = a[valid] b = b[valid] if len(a) < min_periods: return np.nan a = _ensure_numeric(a) b = _ensure_numeric(b) f = get_corr_func(method) return f(a, b) def get_corr_func(method: CorrelationMethod) -> Callable[[np.ndarray, np.ndarray], float]: if method == 'kendall': from scipy.stats import kendalltau def func(a, b): return kendalltau(a, b)[0] return func elif method == 'spearman': from scipy.stats import spearmanr def func(a, b): return spearmanr(a, b)[0] return func elif method == 'pearson': def func(a, b): return np.corrcoef(a, b)[0, 1] return func elif callable(method): return method raise ValueError(f"Unknown method '{method}', expected one of 'kendall', 'spearman', 'pearson', or callable") @disallow('M8', 'm8') def nancov(a: np.ndarray, b: np.ndarray, *, min_periods: int | None=None, ddof: int | None=1) -> float: if len(a) != len(b): raise AssertionError('Operands to nancov must have same size') if min_periods is None: min_periods = 1 valid = notna(a) & notna(b) if not valid.all(): a = a[valid] b = b[valid] if len(a) < min_periods: return np.nan a = _ensure_numeric(a) b = _ensure_numeric(b) return np.cov(a, b, ddof=ddof)[0, 1] def _ensure_numeric(x): if isinstance(x, np.ndarray): if x.dtype.kind in 'biu': x = x.astype(np.float64) elif x.dtype == object: inferred = lib.infer_dtype(x) if inferred in ['string', 'mixed']: raise TypeError(f'Could not convert {x} to numeric') try: x = x.astype(np.complex128) except (TypeError, ValueError): try: x = x.astype(np.float64) except ValueError as err: raise TypeError(f'Could not convert {x} to numeric') from err else: if not np.any(np.imag(x)): x = x.real elif not (is_float(x) or is_integer(x) or is_complex(x)): if isinstance(x, str): raise TypeError(f"Could not convert string '{x}' to numeric") try: x = float(x) except (TypeError, ValueError): try: x = complex(x) except ValueError as err: raise TypeError(f'Could not convert {x} to numeric') from err return x def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike: (mask_a, mask_b) = {np.cumprod: (1.0, np.nan), np.maximum.accumulate: (-np.inf, np.nan), np.cumsum: (0.0, np.nan), np.minimum.accumulate: (np.inf, np.nan)}[accum_func] assert values.dtype.kind not in 'mM' if skipna and (not issubclass(values.dtype.type, (np.integer, np.bool_))): vals = values.copy() mask = isna(vals) vals[mask] = mask_a result = accum_func(vals, axis=0) result[mask] = mask_b else: result = accum_func(values, axis=0) return result # File: pandas-main/pandas/core/ops/__init__.py """""" from __future__ import annotations from pandas.core.ops.array_ops import arithmetic_op, comp_method_OBJECT_ARRAY, comparison_op, fill_binop, get_array_op, logical_op, maybe_prepare_scalar_for_op from pandas.core.ops.common import get_op_result_name, unpack_zerodim_and_defer from pandas.core.ops.docstrings import make_flex_doc from pandas.core.ops.invalid import invalid_comparison from pandas.core.ops.mask_ops import kleene_and, kleene_or, kleene_xor from pandas.core.roperator import radd, rand_, rdiv, rdivmod, rfloordiv, rmod, rmul, ror_, rpow, rsub, rtruediv, rxor ARITHMETIC_BINOPS: set[str] = {'add', 'sub', 'mul', 'pow', 'mod', 'floordiv', 'truediv', 'divmod', 'radd', 'rsub', 'rmul', 'rpow', 'rmod', 'rfloordiv', 'rtruediv', 'rdivmod'} __all__ = ['ARITHMETIC_BINOPS', 'arithmetic_op', 'comparison_op', 'comp_method_OBJECT_ARRAY', 'invalid_comparison', 'fill_binop', 'kleene_and', 'kleene_or', 'kleene_xor', 'logical_op', 'make_flex_doc', 'radd', 'rand_', 'rdiv', 'rdivmod', 'rfloordiv', 'rmod', 'rmul', 'ror_', 'rpow', 'rsub', 'rtruediv', 'rxor', 'unpack_zerodim_and_defer', 'get_op_result_name', 'maybe_prepare_scalar_for_op', 'get_array_op'] # File: pandas-main/pandas/core/ops/array_ops.py """""" from __future__ import annotations import datetime from functools import partial import operator from typing import TYPE_CHECKING, Any import numpy as np from pandas._libs import NaT, Timedelta, Timestamp, lib, ops as libops from pandas._libs.tslibs import BaseOffset, get_supported_dtype, is_supported_dtype, is_unitless from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike, find_common_type from pandas.core.dtypes.common import ensure_object, is_bool_dtype, is_list_like, is_numeric_v_string_like, is_object_dtype, is_scalar from pandas.core.dtypes.generic import ABCExtensionArray, ABCIndex, ABCSeries from pandas.core.dtypes.missing import isna, notna from pandas.core import roperator from pandas.core.computation import expressions from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.ops import missing from pandas.core.ops.dispatch import should_extension_dispatch from pandas.core.ops.invalid import invalid_comparison if TYPE_CHECKING: from pandas._typing import ArrayLike, Shape def fill_binop(left, right, fill_value): if fill_value is not None: left_mask = isna(left) right_mask = isna(right) mask = left_mask ^ right_mask if left_mask.any(): left = left.copy() left[left_mask & mask] = fill_value if right_mask.any(): right = right.copy() right[right_mask & mask] = fill_value return (left, right) def comp_method_OBJECT_ARRAY(op, x, y): if isinstance(y, list): y = construct_1d_object_array_from_listlike(y) if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): if not is_object_dtype(y.dtype): y = y.astype(np.object_) if isinstance(y, (ABCSeries, ABCIndex)): y = y._values if x.shape != y.shape: raise ValueError('Shapes must match', x.shape, y.shape) result = libops.vec_compare(x.ravel(), y.ravel(), op) else: result = libops.scalar_compare(x.ravel(), y, op) return result.reshape(x.shape) def _masked_arith_op(x: np.ndarray, y, op) -> np.ndarray: xrav = x.ravel() if isinstance(y, np.ndarray): dtype = find_common_type([x.dtype, y.dtype]) result = np.empty(x.size, dtype=dtype) if len(x) != len(y): raise ValueError(x.shape, y.shape) ymask = notna(y) yrav = y.ravel() mask = notna(xrav) & ymask.ravel() if mask.any(): result[mask] = op(xrav[mask], yrav[mask]) else: if not is_scalar(y): raise TypeError(f'Cannot broadcast np.ndarray with operand of type {type(y)}') result = np.empty(x.size, dtype=x.dtype) mask = notna(xrav) if op is pow: mask = np.where(x == 1, False, mask) elif op is roperator.rpow: mask = np.where(y == 1, False, mask) if mask.any(): result[mask] = op(xrav[mask], y) np.putmask(result, ~mask, np.nan) result = result.reshape(x.shape) return result def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool=False): if isinstance(right, str): func = op else: func = partial(expressions.evaluate, op) try: result = func(left, right) except TypeError: if not is_cmp and (left.dtype == object or getattr(right, 'dtype', None) == object): result = _masked_arith_op(left, right, op) else: raise if is_cmp and (is_scalar(result) or result is NotImplemented): return invalid_comparison(left, right, op) return missing.dispatch_fill_zeros(op, left, right, result) def arithmetic_op(left: ArrayLike, right: Any, op): if should_extension_dispatch(left, right) or isinstance(right, (Timedelta, BaseOffset, Timestamp)) or right is NaT: res_values = op(left, right) else: _bool_arith_check(op, left, right) res_values = _na_arithmetic_op(left, right, op) return res_values def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: lvalues = ensure_wrapped_if_datetimelike(left) rvalues = ensure_wrapped_if_datetimelike(right) rvalues = lib.item_from_zerodim(rvalues) if isinstance(rvalues, list): rvalues = np.asarray(rvalues) if isinstance(rvalues, (np.ndarray, ABCExtensionArray)): if len(lvalues) != len(rvalues): raise ValueError('Lengths must match to compare', lvalues.shape, rvalues.shape) if should_extension_dispatch(lvalues, rvalues) or ((isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) and lvalues.dtype != object): res_values = op(lvalues, rvalues) elif is_scalar(rvalues) and isna(rvalues): if op is operator.ne: res_values = np.ones(lvalues.shape, dtype=bool) else: res_values = np.zeros(lvalues.shape, dtype=bool) elif is_numeric_v_string_like(lvalues, rvalues): return invalid_comparison(lvalues, rvalues, op) elif lvalues.dtype == object or isinstance(rvalues, str): res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) else: res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) return res_values def na_logical_op(x: np.ndarray, y, op): try: result = op(x, y) except TypeError: if isinstance(y, np.ndarray): assert not (x.dtype.kind == 'b' and y.dtype.kind == 'b') x = ensure_object(x) y = ensure_object(y) result = libops.vec_binop(x.ravel(), y.ravel(), op) else: assert lib.is_scalar(y) if not isna(y): y = bool(y) try: result = libops.scalar_binop(x, y, op) except (TypeError, ValueError, AttributeError, OverflowError, NotImplementedError) as err: typ = type(y).__name__ raise TypeError(f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array and scalar of type [{typ}]") from err return result.reshape(x.shape) def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: def fill_bool(x, left=None): if x.dtype.kind in 'cfO': mask = isna(x) if mask.any(): x = x.astype(object) x[mask] = False if left is None or left.dtype.kind == 'b': x = x.astype(bool) return x right = lib.item_from_zerodim(right) if is_list_like(right) and (not hasattr(right, 'dtype')): raise TypeError('Logical ops (and, or, xor) between Pandas objects and dtype-less sequences (e.g. list, tuple) are no longer supported. Wrap the object in a Series, Index, or np.array before operating instead.') lvalues = ensure_wrapped_if_datetimelike(left) rvalues = right if should_extension_dispatch(lvalues, rvalues): res_values = op(lvalues, rvalues) else: if isinstance(rvalues, np.ndarray): is_other_int_dtype = rvalues.dtype.kind in 'iu' if not is_other_int_dtype: rvalues = fill_bool(rvalues, lvalues) else: is_other_int_dtype = lib.is_integer(rvalues) res_values = na_logical_op(lvalues, rvalues, op) if not (left.dtype.kind in 'iu' and is_other_int_dtype): res_values = fill_bool(res_values) return res_values def get_array_op(op): if isinstance(op, partial): return op op_name = op.__name__.strip('_').lstrip('r') if op_name == 'arith_op': return op if op_name in {'eq', 'ne', 'lt', 'le', 'gt', 'ge'}: return partial(comparison_op, op=op) elif op_name in {'and', 'or', 'xor', 'rand', 'ror', 'rxor'}: return partial(logical_op, op=op) elif op_name in {'add', 'sub', 'mul', 'truediv', 'floordiv', 'mod', 'divmod', 'pow'}: return partial(arithmetic_op, op=op) else: raise NotImplementedError(op_name) def maybe_prepare_scalar_for_op(obj, shape: Shape): if type(obj) is datetime.timedelta: return Timedelta(obj) elif type(obj) is datetime.datetime: return Timestamp(obj) elif isinstance(obj, np.datetime64): if isna(obj): from pandas.core.arrays import DatetimeArray if is_unitless(obj.dtype): obj = obj.astype('datetime64[ns]') elif not is_supported_dtype(obj.dtype): new_dtype = get_supported_dtype(obj.dtype) obj = obj.astype(new_dtype) right = np.broadcast_to(obj, shape) return DatetimeArray._simple_new(right, dtype=right.dtype) return Timestamp(obj) elif isinstance(obj, np.timedelta64): if isna(obj): from pandas.core.arrays import TimedeltaArray if is_unitless(obj.dtype): obj = obj.astype('timedelta64[ns]') elif not is_supported_dtype(obj.dtype): new_dtype = get_supported_dtype(obj.dtype) obj = obj.astype(new_dtype) right = np.broadcast_to(obj, shape) return TimedeltaArray._simple_new(right, dtype=right.dtype) return Timedelta(obj) elif isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) return obj _BOOL_OP_NOT_ALLOWED = {operator.truediv, roperator.rtruediv, operator.floordiv, roperator.rfloordiv, operator.pow, roperator.rpow} def _bool_arith_check(op, a: np.ndarray, b) -> None: if op in _BOOL_OP_NOT_ALLOWED: if a.dtype.kind == 'b' and (is_bool_dtype(b) or lib.is_bool(b)): op_name = op.__name__.strip('_').lstrip('r') raise NotImplementedError(f"operator '{op_name}' not implemented for bool dtypes") # File: pandas-main/pandas/core/ops/common.py """""" from __future__ import annotations from functools import wraps from typing import TYPE_CHECKING from pandas._libs.lib import item_from_zerodim from pandas._libs.missing import is_matching_na from pandas.core.dtypes.generic import ABCIndex, ABCSeries if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import F def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]: def wrapper(method: F) -> F: return _unpack_zerodim_and_defer(method, name) return wrapper def _unpack_zerodim_and_defer(method: F, name: str) -> F: stripped_name = name.removeprefix('__').removesuffix('__') is_cmp = stripped_name in {'eq', 'ne', 'lt', 'le', 'gt', 'ge'} @wraps(method) def new_method(self, other): if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries): pass else: prio = getattr(other, '__pandas_priority__', None) if prio is not None: if prio > self.__pandas_priority__: return NotImplemented other = item_from_zerodim(other) return method(self, other) return new_method def get_op_result_name(left, right): if isinstance(right, (ABCSeries, ABCIndex)): name = _maybe_match_name(left, right) else: name = left.name return name def _maybe_match_name(a, b): a_has = hasattr(a, 'name') b_has = hasattr(b, 'name') if a_has and b_has: try: if a.name == b.name: return a.name elif is_matching_na(a.name, b.name): return a.name else: return None except TypeError: if is_matching_na(a.name, b.name): return a.name return None except ValueError: return None elif a_has: return a.name elif b_has: return b.name return None # File: pandas-main/pandas/core/ops/dispatch.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any from pandas.core.dtypes.generic import ABCExtensionArray if TYPE_CHECKING: from pandas._typing import ArrayLike def should_extension_dispatch(left: ArrayLike, right: Any) -> bool: return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray) # File: pandas-main/pandas/core/ops/docstrings.py """""" from __future__ import annotations def make_flex_doc(op_name: str, typ: str) -> str: op_name = op_name.replace('__', '') op_desc = _op_descriptions[op_name] op_desc_op = op_desc['op'] assert op_desc_op is not None if op_name.startswith('r'): equiv = f'other {op_desc_op} {typ}' elif op_name == 'divmod': equiv = f'{op_name}({typ}, other)' else: equiv = f'{typ} {op_desc_op} other' if typ == 'series': base_doc = _flex_doc_SERIES if op_desc['reverse']: base_doc += _see_also_reverse_SERIES.format(reverse=op_desc['reverse'], see_also_desc=op_desc['see_also_desc']) doc_no_examples = base_doc.format(desc=op_desc['desc'], op_name=op_name, equiv=equiv, series_returns=op_desc['series_returns']) ser_example = op_desc['series_examples'] if ser_example: doc = doc_no_examples + ser_example else: doc = doc_no_examples elif typ == 'dataframe': if op_name in ['eq', 'ne', 'le', 'lt', 'ge', 'gt']: base_doc = _flex_comp_doc_FRAME doc = _flex_comp_doc_FRAME.format(op_name=op_name, desc=op_desc['desc']) else: base_doc = _flex_doc_FRAME doc = base_doc.format(desc=op_desc['desc'], op_name=op_name, equiv=equiv, reverse=op_desc['reverse']) else: raise AssertionError('Invalid typ argument.') return doc _common_examples_algebra_SERIES = "\nExamples\n--------\n>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd'])\n>>> a\na 1.0\nb 1.0\nc 1.0\nd NaN\ndtype: float64\n>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e'])\n>>> b\na 1.0\nb NaN\nd 1.0\ne NaN\ndtype: float64" _common_examples_comparison_SERIES = "\nExamples\n--------\n>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e'])\n>>> a\na 1.0\nb 1.0\nc 1.0\nd NaN\ne 1.0\ndtype: float64\n>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f'])\n>>> b\na 0.0\nb 1.0\nc 2.0\nd NaN\nf 1.0\ndtype: float64" _add_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.add(b, fill_value=0)\na 2.0\nb 1.0\nc 1.0\nd 1.0\ne NaN\ndtype: float64\n' _sub_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.subtract(b, fill_value=0)\na 0.0\nb 1.0\nc 1.0\nd -1.0\ne NaN\ndtype: float64\n' _mul_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.multiply(b, fill_value=0)\na 1.0\nb 0.0\nc 0.0\nd 0.0\ne NaN\ndtype: float64\n' _div_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.divide(b, fill_value=0)\na 1.0\nb inf\nc inf\nd 0.0\ne NaN\ndtype: float64\n' _floordiv_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.floordiv(b, fill_value=0)\na 1.0\nb inf\nc inf\nd 0.0\ne NaN\ndtype: float64\n' _divmod_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.divmod(b, fill_value=0)\n(a 1.0\n b inf\n c inf\n d 0.0\n e NaN\n dtype: float64,\n a 0.0\n b NaN\n c NaN\n d 0.0\n e NaN\n dtype: float64)\n' _mod_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.mod(b, fill_value=0)\na 0.0\nb NaN\nc NaN\nd 0.0\ne NaN\ndtype: float64\n' _pow_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.pow(b, fill_value=0)\na 1.0\nb 1.0\nc 1.0\nd 0.0\ne NaN\ndtype: float64\n' _ne_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.ne(b, fill_value=0)\na False\nb True\nc True\nd True\ne True\ndtype: bool\n' _eq_example_SERIES = _common_examples_algebra_SERIES + '\n>>> a.eq(b, fill_value=0)\na True\nb False\nc False\nd False\ne False\ndtype: bool\n' _lt_example_SERIES = _common_examples_comparison_SERIES + '\n>>> a.lt(b, fill_value=0)\na False\nb False\nc True\nd False\ne False\nf True\ndtype: bool\n' _le_example_SERIES = _common_examples_comparison_SERIES + '\n>>> a.le(b, fill_value=0)\na False\nb True\nc True\nd False\ne False\nf True\ndtype: bool\n' _gt_example_SERIES = _common_examples_comparison_SERIES + '\n>>> a.gt(b, fill_value=0)\na True\nb False\nc False\nd False\ne True\nf False\ndtype: bool\n' _ge_example_SERIES = _common_examples_comparison_SERIES + '\n>>> a.ge(b, fill_value=0)\na True\nb True\nc False\nd False\ne True\nf False\ndtype: bool\n' _returns_series = 'Series\n The result of the operation.' _returns_tuple = '2-Tuple of Series\n The result of the operation.' _op_descriptions: dict[str, dict[str, str | None]] = {'add': {'op': '+', 'desc': 'Addition', 'reverse': 'radd', 'series_examples': _add_example_SERIES, 'series_returns': _returns_series}, 'sub': {'op': '-', 'desc': 'Subtraction', 'reverse': 'rsub', 'series_examples': _sub_example_SERIES, 'series_returns': _returns_series}, 'mul': {'op': '*', 'desc': 'Multiplication', 'reverse': 'rmul', 'series_examples': _mul_example_SERIES, 'series_returns': _returns_series, 'df_examples': None}, 'mod': {'op': '%', 'desc': 'Modulo', 'reverse': 'rmod', 'series_examples': _mod_example_SERIES, 'series_returns': _returns_series}, 'pow': {'op': '**', 'desc': 'Exponential power', 'reverse': 'rpow', 'series_examples': _pow_example_SERIES, 'series_returns': _returns_series, 'df_examples': None}, 'truediv': {'op': '/', 'desc': 'Floating division', 'reverse': 'rtruediv', 'series_examples': _div_example_SERIES, 'series_returns': _returns_series, 'df_examples': None}, 'floordiv': {'op': '//', 'desc': 'Integer division', 'reverse': 'rfloordiv', 'series_examples': _floordiv_example_SERIES, 'series_returns': _returns_series, 'df_examples': None}, 'divmod': {'op': 'divmod', 'desc': 'Integer division and modulo', 'reverse': 'rdivmod', 'series_examples': _divmod_example_SERIES, 'series_returns': _returns_tuple, 'df_examples': None}, 'eq': {'op': '==', 'desc': 'Equal to', 'reverse': None, 'series_examples': _eq_example_SERIES, 'series_returns': _returns_series}, 'ne': {'op': '!=', 'desc': 'Not equal to', 'reverse': 'eq', 'series_examples': _ne_example_SERIES, 'series_returns': _returns_series}, 'lt': {'op': '<', 'desc': 'Less than', 'reverse': None, 'series_examples': _lt_example_SERIES, 'series_returns': _returns_series}, 'le': {'op': '<=', 'desc': 'Less than or equal to', 'reverse': None, 'series_examples': _le_example_SERIES, 'series_returns': _returns_series}, 'gt': {'op': '>', 'desc': 'Greater than', 'reverse': 'lt', 'series_examples': _gt_example_SERIES, 'series_returns': _returns_series}, 'ge': {'op': '>=', 'desc': 'Greater than or equal to', 'reverse': 'le', 'series_examples': _ge_example_SERIES, 'series_returns': _returns_series}} _py_num_ref = 'see\n `Python documentation\n `_\n for more details' _op_names = list(_op_descriptions.keys()) for key in _op_names: reverse_op = _op_descriptions[key]['reverse'] if reverse_op is not None: _op_descriptions[reverse_op] = _op_descriptions[key].copy() _op_descriptions[reverse_op]['reverse'] = key _op_descriptions[key]['see_also_desc'] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}" _op_descriptions[reverse_op]['see_also_desc'] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}" _flex_doc_SERIES = "\nReturn {desc} of series and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``, but with support to substitute a fill_value for\nmissing data in either one of the inputs.\n\nParameters\n----------\nother : Series or scalar value\n The second operand in this operation.\nlevel : int or name\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\nfill_value : None or float value, default None (NaN)\n Fill existing missing (NaN) values, and any new element needed for\n successful Series alignment, with this value before computation.\n If data in both corresponding Series locations is missing\n the result of filling (at that location) will be missing.\naxis : {{0 or 'index'}}\n Unused. Parameter needed for compatibility with DataFrame.\n\nReturns\n-------\n{series_returns}\n" _see_also_reverse_SERIES = '\nSee Also\n--------\nSeries.{reverse} : {see_also_desc}.\n' _flex_doc_FRAME = "\nGet {desc} of dataframe and other, element-wise (binary operator `{op_name}`).\n\nEquivalent to ``{equiv}``, but with support to substitute a fill_value\nfor missing data in one of the inputs. With reverse version, `{reverse}`.\n\nAmong flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to\narithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`.\n\nParameters\n----------\nother : scalar, sequence, Series, dict or DataFrame\n Any single or multiple element data structure, or list-like object.\naxis : {{0 or 'index', 1 or 'columns'}}\n Whether to compare by the index (0 or 'index') or columns.\n (1 or 'columns'). For Series input, axis to match Series index on.\nlevel : int or label\n Broadcast across a level, matching Index values on the\n passed MultiIndex level.\nfill_value : float or None, default None\n Fill existing missing (NaN) values, and any new element needed for\n successful DataFrame alignment, with this value before computation.\n If data in both corresponding DataFrame locations is missing\n the result will be missing.\n\nReturns\n-------\nDataFrame\n Result of the arithmetic operation.\n\nSee Also\n--------\nDataFrame.add : Add DataFrames.\nDataFrame.sub : Subtract DataFrames.\nDataFrame.mul : Multiply DataFrames.\nDataFrame.div : Divide DataFrames (float division).\nDataFrame.truediv : Divide DataFrames (float division).\nDataFrame.floordiv : Divide DataFrames (integer division).\nDataFrame.mod : Calculate modulo (remainder after division).\nDataFrame.pow : Calculate exponential power.\n\nNotes\n-----\nMismatched indices will be unioned together.\n\nExamples\n--------\n>>> df = pd.DataFrame({{'angles': [0, 3, 4],\n... 'degrees': [360, 180, 360]}},\n... index=['circle', 'triangle', 'rectangle'])\n>>> df\n angles degrees\ncircle 0 360\ntriangle 3 180\nrectangle 4 360\n\nAdd a scalar with operator version which return the same\nresults.\n\n>>> df + 1\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\n>>> df.add(1)\n angles degrees\ncircle 1 361\ntriangle 4 181\nrectangle 5 361\n\nDivide by constant with reverse version.\n\n>>> df.div(10)\n angles degrees\ncircle 0.0 36.0\ntriangle 0.3 18.0\nrectangle 0.4 36.0\n\n>>> df.rdiv(10)\n angles degrees\ncircle inf 0.027778\ntriangle 3.333333 0.055556\nrectangle 2.500000 0.027778\n\nSubtract a list and Series by axis with operator version.\n\n>>> df - [1, 2]\n angles degrees\ncircle -1 358\ntriangle 2 178\nrectangle 3 358\n\n>>> df.sub([1, 2], axis='columns')\n angles degrees\ncircle -1 358\ntriangle 2 178\nrectangle 3 358\n\n>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']),\n... axis='index')\n angles degrees\ncircle -1 359\ntriangle 2 179\nrectangle 3 359\n\nMultiply a dictionary by axis.\n\n>>> df.mul({{'angles': 0, 'degrees': 2}})\n angles degrees\ncircle 0 720\ntriangle 0 360\nrectangle 0 720\n\n>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index')\n angles degrees\ncircle 0 0\ntriangle 6 360\nrectangle 12 1080\n\nMultiply a DataFrame of different shape with operator version.\n\n>>> other = pd.DataFrame({{'angles': [0, 3, 4]}},\n... index=['circle', 'triangle', 'rectangle'])\n>>> other\n angles\ncircle 0\ntriangle 3\nrectangle 4\n\n>>> df * other\n angles degrees\ncircle 0 NaN\ntriangle 9 NaN\nrectangle 16 NaN\n\n>>> df.mul(other, fill_value=0)\n angles degrees\ncircle 0 0.0\ntriangle 9 0.0\nrectangle 16 0.0\n\nDivide by a MultiIndex by level.\n\n>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6],\n... 'degrees': [360, 180, 360, 360, 540, 720]}},\n... index=[['A', 'A', 'A', 'B', 'B', 'B'],\n... ['circle', 'triangle', 'rectangle',\n... 'square', 'pentagon', 'hexagon']])\n>>> df_multindex\n angles degrees\nA circle 0 360\n triangle 3 180\n rectangle 4 360\nB square 4 360\n pentagon 5 540\n hexagon 6 720\n\n>>> df.div(df_multindex, level=1, fill_value=0)\n angles degrees\nA circle NaN 1.0\n triangle 1.0 1.0\n rectangle 1.0 1.0\nB square 0.0 0.0\n pentagon 0.0 0.0\n hexagon 0.0 0.0\n\n>>> df_pow = pd.DataFrame({{'A': [2, 3, 4, 5],\n... 'B': [6, 7, 8, 9]}})\n>>> df_pow.pow(2)\n A B\n0 4 36\n1 9 49\n2 16 64\n3 25 81\n" _flex_comp_doc_FRAME = '\nGet {desc} of dataframe and other, element-wise (binary operator `{op_name}`).\n\nAmong flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison\noperators.\n\nEquivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis\n(rows or columns) and level for comparison.\n\nParameters\n----------\nother : scalar, sequence, Series, or DataFrame\n Any single or multiple element data structure, or list-like object.\naxis : {{0 or \'index\', 1 or \'columns\'}}, default \'columns\'\n Whether to compare by the index (0 or \'index\') or columns\n (1 or \'columns\').\nlevel : int or label\n Broadcast across a level, matching Index values on the passed\n MultiIndex level.\n\nReturns\n-------\nDataFrame of bool\n Result of the comparison.\n\nSee Also\n--------\nDataFrame.eq : Compare DataFrames for equality elementwise.\nDataFrame.ne : Compare DataFrames for inequality elementwise.\nDataFrame.le : Compare DataFrames for less than inequality\n or equality elementwise.\nDataFrame.lt : Compare DataFrames for strictly less than\n inequality elementwise.\nDataFrame.ge : Compare DataFrames for greater than inequality\n or equality elementwise.\nDataFrame.gt : Compare DataFrames for strictly greater than\n inequality elementwise.\n\nNotes\n-----\nMismatched indices will be unioned together.\n`NaN` values are considered different (i.e. `NaN` != `NaN`).\n\nExamples\n--------\n>>> df = pd.DataFrame({{\'cost\': [250, 150, 100],\n... \'revenue\': [100, 250, 300]}},\n... index=[\'A\', \'B\', \'C\'])\n>>> df\n cost revenue\nA 250 100\nB 150 250\nC 100 300\n\nComparison with a scalar, using either the operator or method:\n\n>>> df == 100\n cost revenue\nA False True\nB False False\nC True False\n\n>>> df.eq(100)\n cost revenue\nA False True\nB False False\nC True False\n\nWhen `other` is a :class:`Series`, the columns of a DataFrame are aligned\nwith the index of `other` and broadcast:\n\n>>> df != pd.Series([100, 250], index=["cost", "revenue"])\n cost revenue\nA True True\nB True False\nC False True\n\nUse the method to control the broadcast axis:\n\n>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis=\'index\')\n cost revenue\nA True False\nB True True\nC True True\nD True True\n\nWhen comparing to an arbitrary sequence, the number of columns must\nmatch the number elements in `other`:\n\n>>> df == [250, 100]\n cost revenue\nA True True\nB False False\nC False False\n\nUse the method to control the axis:\n\n>>> df.eq([250, 250, 100], axis=\'index\')\n cost revenue\nA True False\nB False True\nC True False\n\nCompare to a DataFrame of different shape.\n\n>>> other = pd.DataFrame({{\'revenue\': [300, 250, 100, 150]}},\n... index=[\'A\', \'B\', \'C\', \'D\'])\n>>> other\n revenue\nA 300\nB 250\nC 100\nD 150\n\n>>> df.gt(other)\n cost revenue\nA False False\nB False False\nC False True\nD False False\n\nCompare to a MultiIndex by level.\n\n>>> df_multindex = pd.DataFrame({{\'cost\': [250, 150, 100, 150, 300, 220],\n... \'revenue\': [100, 250, 300, 200, 175, 225]}},\n... index=[[\'Q1\', \'Q1\', \'Q1\', \'Q2\', \'Q2\', \'Q2\'],\n... [\'A\', \'B\', \'C\', \'A\', \'B\', \'C\']])\n>>> df_multindex\n cost revenue\nQ1 A 250 100\n B 150 250\n C 100 300\nQ2 A 150 200\n B 300 175\n C 220 225\n\n>>> df.le(df_multindex, level=1)\n cost revenue\nQ1 A True True\n B True True\n C True True\nQ2 A False True\n B True False\n C True False\n' # File: pandas-main/pandas/core/ops/invalid.py """""" from __future__ import annotations import operator from typing import TYPE_CHECKING, Any, NoReturn import numpy as np if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import ArrayLike, Scalar, npt def invalid_comparison(left: ArrayLike, right: ArrayLike | Scalar, op: Callable[[Any, Any], bool]) -> npt.NDArray[np.bool_]: if op is operator.eq: res_values = np.zeros(left.shape, dtype=bool) elif op is operator.ne: res_values = np.ones(left.shape, dtype=bool) else: typ = type(right).__name__ raise TypeError(f'Invalid comparison between dtype={left.dtype} and {typ}') return res_values def make_invalid_op(name: str) -> Callable[..., NoReturn]: def invalid_op(self: object, other: object=None) -> NoReturn: typ = type(self).__name__ raise TypeError(f'cannot perform {name} with this index type: {typ}') invalid_op.__name__ = name return invalid_op # File: pandas-main/pandas/core/ops/mask_ops.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs import lib, missing as libmissing if TYPE_CHECKING: from pandas._typing import npt def kleene_or(left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None) -> tuple[npt.NDArray[np.bool_], npt.NDArray[np.bool_]]: if left_mask is None: return kleene_or(right, left, right_mask, left_mask) if not isinstance(left, np.ndarray): raise TypeError('Either `left` or `right` need to be a np.ndarray.') raise_for_nan(right, method='or') if right is libmissing.NA: result = left.copy() else: result = left | right if right_mask is not None: left_false = ~(left | left_mask) right_false = ~(right | right_mask) mask = left_false & right_mask | right_false & left_mask | left_mask & right_mask elif right is True: mask = np.zeros_like(left_mask) elif right is libmissing.NA: mask = ~left & ~left_mask | left_mask else: mask = left_mask.copy() return (result, mask) def kleene_xor(left: bool | np.ndarray | libmissing.NAType, right: bool | np.ndarray | libmissing.NAType, left_mask: np.ndarray | None, right_mask: np.ndarray | None) -> tuple[npt.NDArray[np.bool_], npt.NDArray[np.bool_]]: if left_mask is None: return kleene_xor(right, left, right_mask, left_mask) if not isinstance(left, np.ndarray): raise TypeError('Either `left` or `right` need to be a np.ndarray.') raise_for_nan(right, method='xor') if right is libmissing.NA: result = np.zeros_like(left) else: result = left ^ right if right_mask is None: if right is libmissing.NA: mask = np.ones_like(left_mask) else: mask = left_mask.copy() else: mask = left_mask | right_mask return (result, mask) def kleene_and(left: bool | libmissing.NAType | np.ndarray, right: bool | libmissing.NAType | np.ndarray, left_mask: np.ndarray | None, right_mask: np.ndarray | None) -> tuple[npt.NDArray[np.bool_], npt.NDArray[np.bool_]]: if left_mask is None: return kleene_and(right, left, right_mask, left_mask) if not isinstance(left, np.ndarray): raise TypeError('Either `left` or `right` need to be a np.ndarray.') raise_for_nan(right, method='and') if right is libmissing.NA: result = np.zeros_like(left) else: result = left & right if right_mask is None: if right is libmissing.NA: mask = left & ~left_mask | left_mask else: mask = left_mask.copy() if right is False: mask[:] = False else: left_false = ~(left | left_mask) right_false = ~(right | right_mask) mask = left_mask & ~right_false | right_mask & ~left_false return (result, mask) def raise_for_nan(value: object, method: str) -> None: if lib.is_float(value) and np.isnan(value): raise ValueError(f"Cannot perform logical '{method}' with floating NaN") # File: pandas-main/pandas/core/ops/missing.py """""" from __future__ import annotations import operator import numpy as np from pandas.core import roperator def _fill_zeros(result: np.ndarray, x, y) -> np.ndarray: if result.dtype.kind == 'f': return result is_variable_type = hasattr(y, 'dtype') is_scalar_type = not isinstance(y, np.ndarray) if not is_variable_type and (not is_scalar_type): return result if is_scalar_type: y = np.array(y) if y.dtype.kind in 'iu': ymask = y == 0 if ymask.any(): mask = ymask & ~np.isnan(result) result = result.astype('float64', copy=False) np.putmask(result, mask, np.nan) return result def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray: if not hasattr(y, 'dtype'): y = np.array(y) if not hasattr(x, 'dtype'): x = np.array(x) zmask = y == 0 if zmask.any(): zneg_mask = zmask & np.signbit(y) zpos_mask = zmask & ~zneg_mask x_lt0 = x < 0 x_gt0 = x > 0 nan_mask = zmask & (x == 0) neginf_mask = zpos_mask & x_lt0 | zneg_mask & x_gt0 posinf_mask = zpos_mask & x_gt0 | zneg_mask & x_lt0 if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): result = result.astype('float64', copy=False) result[nan_mask] = np.nan result[posinf_mask] = np.inf result[neginf_mask] = -np.inf return result def dispatch_fill_zeros(op, left, right, result): if op is divmod: result = (mask_zero_div_zero(left, right, result[0]), _fill_zeros(result[1], left, right)) elif op is roperator.rdivmod: result = (mask_zero_div_zero(right, left, result[0]), _fill_zeros(result[1], right, left)) elif op is operator.floordiv: result = mask_zero_div_zero(left, right, result) elif op is roperator.rfloordiv: result = mask_zero_div_zero(right, left, result) elif op is operator.mod: result = _fill_zeros(result, left, right) elif op is roperator.rmod: result = _fill_zeros(result, right, left) return result # File: pandas-main/pandas/core/resample.py from __future__ import annotations import copy from textwrap import dedent from typing import TYPE_CHECKING, Literal, cast, final, no_type_check, overload import warnings import numpy as np from pandas._libs import lib from pandas._libs.tslibs import BaseOffset, IncompatibleFrequency, NaT, Period, Timedelta, Timestamp, to_offset from pandas._typing import NDFrameT from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution, doc from pandas.util._exceptions import find_stack_level, rewrite_warning from pandas.core.dtypes.dtypes import ArrowDtype, PeriodDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries import pandas.core.algorithms as algos from pandas.core.apply import ResamplerWindowApply from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject, SelectionMixin from pandas.core.generic import NDFrame, _shared_docs from pandas.core.groupby.groupby import BaseGroupBy, GroupBy, _apply_groupings_depr, _pipe_template, get_groupby from pandas.core.groupby.grouper import Grouper from pandas.core.groupby.ops import BinGrouper from pandas.core.indexes.api import MultiIndex from pandas.core.indexes.base import Index from pandas.core.indexes.datetimes import DatetimeIndex, date_range from pandas.core.indexes.period import PeriodIndex, period_range from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range from pandas.core.reshape.concat import concat from pandas.tseries.frequencies import is_subperiod, is_superperiod from pandas.tseries.offsets import Day, Tick if TYPE_CHECKING: from collections.abc import Callable, Hashable from pandas._typing import Any, AnyArrayLike, Axis, Concatenate, FreqIndexT, Frequency, IndexLabel, InterpolateOptions, P, Self, T, TimedeltaConvertibleTypes, TimeGrouperOrigin, TimestampConvertibleTypes, npt from pandas import DataFrame, Series _shared_docs_kwargs: dict[str, str] = {} class Resampler(BaseGroupBy, PandasObject): _grouper: BinGrouper _timegrouper: TimeGrouper binner: DatetimeIndex | TimedeltaIndex | PeriodIndex exclusions: frozenset[Hashable] = frozenset() _internal_names_set = set({'obj', 'ax', '_indexer'}) _attributes = ['freq', 'closed', 'label', 'convention', 'origin', 'offset'] def __init__(self, obj: NDFrame, timegrouper: TimeGrouper, *, gpr_index: Index, group_keys: bool=False, selection=None, include_groups: bool=True) -> None: self._timegrouper = timegrouper self.keys = None self.sort = True self.group_keys = group_keys self.as_index = True self.include_groups = include_groups (self.obj, self.ax, self._indexer) = self._timegrouper._set_grouper(self._convert_obj(obj), sort=True, gpr_index=gpr_index) (self.binner, self._grouper) = self._get_binner() self._selection = selection if self._timegrouper.key is not None: self.exclusions = frozenset([self._timegrouper.key]) else: self.exclusions = frozenset() @final def __str__(self) -> str: attrs = (f'{k}={getattr(self._timegrouper, k)}' for k in self._attributes if getattr(self._timegrouper, k, None) is not None) return f"{type(self).__name__} [{', '.join(attrs)}]" @final def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self._attributes: return getattr(self._timegrouper, attr) if attr in self.obj: return self[attr] return object.__getattribute__(self, attr) @final @property def _from_selection(self) -> bool: return self._timegrouper is not None and (self._timegrouper.key is not None or self._timegrouper.level is not None) def _convert_obj(self, obj: NDFrameT) -> NDFrameT: return obj._consolidate() def _get_binner_for_time(self): raise AbstractMethodError(self) @final def _get_binner(self): (binner, bins, binlabels) = self._get_binner_for_time() assert len(bins) == len(binlabels) bin_grouper = BinGrouper(bins, binlabels, indexer=self._indexer) return (binner, bin_grouper) @overload def pipe(self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs) -> T: ... @overload def pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: ... @final @Substitution(klass='Resampler', examples="\n >>> df = pd.DataFrame({'A': [1, 2, 3, 4]},\n ... index=pd.date_range('2012-08-02', periods=4))\n >>> df\n A\n 2012-08-02 1\n 2012-08-03 2\n 2012-08-04 3\n 2012-08-05 4\n\n To get the difference between each 2-day period's maximum and minimum\n value in one pass, you can do\n\n >>> df.resample('2D').pipe(lambda x: x.max() - x.min())\n A\n 2012-08-02 1\n 2012-08-04 1") @Appender(_pipe_template) def pipe(self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: return super().pipe(func, *args, **kwargs) _agg_see_also_doc = dedent('\n See Also\n --------\n DataFrame.groupby.aggregate : Aggregate using callable, string, dict,\n or list of string/callables.\n DataFrame.resample.transform : Transforms the Series on each group\n based on the given function.\n DataFrame.aggregate: Aggregate using one or more\n operations over the specified axis.\n ') _agg_examples_doc = dedent('\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4, 5],\n ... index=pd.date_range(\'20130101\', periods=5, freq=\'s\'))\n >>> s\n 2013-01-01 00:00:00 1\n 2013-01-01 00:00:01 2\n 2013-01-01 00:00:02 3\n 2013-01-01 00:00:03 4\n 2013-01-01 00:00:04 5\n Freq: s, dtype: int64\n\n >>> r = s.resample(\'2s\')\n\n >>> r.agg("sum")\n 2013-01-01 00:00:00 3\n 2013-01-01 00:00:02 7\n 2013-01-01 00:00:04 5\n Freq: 2s, dtype: int64\n\n >>> r.agg([\'sum\', \'mean\', \'max\'])\n sum mean max\n 2013-01-01 00:00:00 3 1.5 2\n 2013-01-01 00:00:02 7 3.5 4\n 2013-01-01 00:00:04 5 5.0 5\n\n >>> r.agg({\'result\': lambda x: x.mean() / x.std(),\n ... \'total\': "sum"})\n result total\n 2013-01-01 00:00:00 2.121320 3\n 2013-01-01 00:00:02 4.949747 7\n 2013-01-01 00:00:04 NaN 5\n\n >>> r.agg(average="mean", total="sum")\n average total\n 2013-01-01 00:00:00 1.5 3\n 2013-01-01 00:00:02 3.5 7\n 2013-01-01 00:00:04 5.0 5\n ') @final @doc(_shared_docs['aggregate'], see_also=_agg_see_also_doc, examples=_agg_examples_doc, klass='DataFrame', axis='') def aggregate(self, func=None, *args, **kwargs): result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: how = func result = self._groupby_and_aggregate(how, *args, **kwargs) return result agg = aggregate apply = aggregate @final def transform(self, arg, *args, **kwargs): return self._selected_obj.groupby(self._timegrouper).transform(arg, *args, **kwargs) def _downsample(self, f, **kwargs): raise AbstractMethodError(self) def _upsample(self, f, limit: int | None=None, fill_value=None): raise AbstractMethodError(self) def _gotitem(self, key, ndim: int, subset=None): grouper = self._grouper if subset is None: subset = self.obj if key is not None: subset = subset[key] else: assert subset.ndim == 1 if ndim == 1: assert subset.ndim == 1 grouped = get_groupby(subset, by=None, grouper=grouper, group_keys=self.group_keys) return grouped def _groupby_and_aggregate(self, how, *args, **kwargs): grouper = self._grouper obj = self._obj_with_exclusions grouped = get_groupby(obj, by=None, grouper=grouper, group_keys=self.group_keys) try: if callable(how): func = lambda x: how(x, *args, **kwargs) result = grouped.aggregate(func) else: result = grouped.aggregate(how, *args, **kwargs) except (AttributeError, KeyError): result = _apply(grouped, how, *args, include_groups=self.include_groups, **kwargs) except ValueError as err: if 'Must produce aggregated value' in str(err): pass else: raise result = _apply(grouped, how, *args, include_groups=self.include_groups, **kwargs) return self._wrap_result(result) @final def _get_resampler_for_grouping(self, groupby: GroupBy, key, include_groups: bool=True): return self._resampler_for_grouping(groupby=groupby, key=key, parent=self, include_groups=include_groups) def _wrap_result(self, result): obj = self.obj if isinstance(result, ABCDataFrame) and len(result) == 0 and (not isinstance(result.index, PeriodIndex)): result = result.set_index(_asfreq_compat(obj.index[:0], freq=self.freq), append=True) if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: result.index = _asfreq_compat(obj.index[:0], freq=self.freq) result.name = getattr(obj, 'name', None) if self._timegrouper._arrow_dtype is not None: result.index = result.index.astype(self._timegrouper._arrow_dtype) return result @final def ffill(self, limit: int | None=None): return self._upsample('ffill', limit=limit) @final def nearest(self, limit: int | None=None): return self._upsample('nearest', limit=limit) @final def bfill(self, limit: int | None=None): return self._upsample('bfill', limit=limit) @final def interpolate(self, method: InterpolateOptions='linear', *, axis: Axis=0, limit: int | None=None, inplace: bool=False, limit_direction: Literal['forward', 'backward', 'both']='forward', limit_area=None, downcast=lib.no_default, **kwargs): assert downcast is lib.no_default result = self._upsample('asfreq') obj = self._selected_obj is_period_index = isinstance(obj.index, PeriodIndex) if not is_period_index: final_index = result.index if isinstance(final_index, MultiIndex): raise NotImplementedError('Direct interpolation of MultiIndex data frames is not supported. If you tried to resample and interpolate on a grouped data frame, please use:\n`df.groupby(...).apply(lambda x: x.resample(...).interpolate(...), include_groups=False)`\ninstead, as resampling and interpolation has to be performed for each group independently.') missing_data_points_index = obj.index.difference(final_index) if len(missing_data_points_index) > 0: result = concat([result, obj.loc[missing_data_points_index]]).sort_index() result_interpolated = result.interpolate(method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs) if is_period_index: return result_interpolated result_interpolated = result_interpolated.loc[final_index] result_interpolated.index = final_index return result_interpolated @final def asfreq(self, fill_value=None): return self._upsample('asfreq', fill_value=fill_value) @final def sum(self, numeric_only: bool=False, min_count: int=0): return self._downsample('sum', numeric_only=numeric_only, min_count=min_count) @final def prod(self, numeric_only: bool=False, min_count: int=0): return self._downsample('prod', numeric_only=numeric_only, min_count=min_count) @final def min(self, numeric_only: bool=False, min_count: int=0): return self._downsample('min', numeric_only=numeric_only, min_count=min_count) @final def max(self, numeric_only: bool=False, min_count: int=0): return self._downsample('max', numeric_only=numeric_only, min_count=min_count) @final @doc(GroupBy.first) def first(self, numeric_only: bool=False, min_count: int=0, skipna: bool=True): return self._downsample('first', numeric_only=numeric_only, min_count=min_count, skipna=skipna) @final @doc(GroupBy.last) def last(self, numeric_only: bool=False, min_count: int=0, skipna: bool=True): return self._downsample('last', numeric_only=numeric_only, min_count=min_count, skipna=skipna) @final @doc(GroupBy.median) def median(self, numeric_only: bool=False): return self._downsample('median', numeric_only=numeric_only) @final def mean(self, numeric_only: bool=False): return self._downsample('mean', numeric_only=numeric_only) @final def std(self, ddof: int=1, numeric_only: bool=False): return self._downsample('std', ddof=ddof, numeric_only=numeric_only) @final def var(self, ddof: int=1, numeric_only: bool=False): return self._downsample('var', ddof=ddof, numeric_only=numeric_only) @final @doc(GroupBy.sem) def sem(self, ddof: int=1, numeric_only: bool=False): return self._downsample('sem', ddof=ddof, numeric_only=numeric_only) @final @doc(GroupBy.ohlc) def ohlc(self): ax = self.ax obj = self._obj_with_exclusions if len(ax) == 0: obj = obj.copy() obj.index = _asfreq_compat(obj.index, self.freq) if obj.ndim == 1: obj = obj.to_frame() obj = obj.reindex(['open', 'high', 'low', 'close'], axis=1) else: mi = MultiIndex.from_product([obj.columns, ['open', 'high', 'low', 'close']]) obj = obj.reindex(mi, axis=1) return obj return self._downsample('ohlc') @final def nunique(self): return self._downsample('nunique') @final @doc(GroupBy.size) def size(self): result = self._downsample('size') if isinstance(result, ABCDataFrame) and (not result.empty): result = result.stack() if not len(self.ax): from pandas import Series if self._selected_obj.ndim == 1: name = self._selected_obj.name else: name = None result = Series([], index=result.index, dtype='int64', name=name) return result @final @doc(GroupBy.count) def count(self): result = self._downsample('count') if not len(self.ax): if self._selected_obj.ndim == 1: result = type(self._selected_obj)([], index=result.index, dtype='int64', name=self._selected_obj.name) else: from pandas import DataFrame result = DataFrame([], index=result.index, columns=result.columns, dtype='int64') return result @final def quantile(self, q: float | list[float] | AnyArrayLike=0.5, **kwargs): return self._downsample('quantile', q=q, **kwargs) class _GroupByMixin(PandasObject, SelectionMixin): _attributes: list[str] _selection: IndexLabel | None = None _groupby: GroupBy _timegrouper: TimeGrouper def __init__(self, *, parent: Resampler, groupby: GroupBy, key=None, selection: IndexLabel | None=None, include_groups: bool=False) -> None: assert isinstance(groupby, GroupBy), type(groupby) assert isinstance(parent, Resampler), type(parent) for attr in self._attributes: setattr(self, attr, getattr(parent, attr)) self._selection = selection self.binner = parent.binner self.key = key self._groupby = groupby self._timegrouper = copy.copy(parent._timegrouper) self.ax = parent.ax self.obj = parent.obj self.include_groups = include_groups @no_type_check def _apply(self, f, *args, **kwargs): def func(x): x = self._resampler_cls(x, timegrouper=self._timegrouper, gpr_index=self.ax) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) result = _apply(self._groupby, func, include_groups=self.include_groups) return self._wrap_result(result) _upsample = _apply _downsample = _apply _groupby_and_aggregate = _apply @final def _gotitem(self, key, ndim, subset=None): if subset is None: subset = self.obj if key is not None: subset = subset[key] else: assert subset.ndim == 1 try: if isinstance(key, list) and self.key not in key and (self.key is not None): key.append(self.key) groupby = self._groupby[key] except IndexError: groupby = self._groupby selection = self._infer_selection(key, subset) new_rs = type(self)(groupby=groupby, parent=cast(Resampler, self), selection=selection) return new_rs class DatetimeIndexResampler(Resampler): ax: DatetimeIndex @property def _resampler_for_grouping(self) -> type[DatetimeIndexResamplerGroupby]: return DatetimeIndexResamplerGroupby def _get_binner_for_time(self): if isinstance(self.ax, PeriodIndex): return self._timegrouper._get_time_period_bins(self.ax) return self._timegrouper._get_time_bins(self.ax) def _downsample(self, how, **kwargs): ax = self.ax obj = self._obj_with_exclusions if not len(ax): obj = obj.copy() obj.index = obj.index._with_freq(self.freq) assert obj.index.freq == self.freq, (obj.index.freq, self.freq) return obj if (ax.freq is not None or ax.inferred_freq is not None) and len(self._grouper.binlabels) > len(ax) and (how is None): return self.asfreq() result = obj.groupby(self._grouper).aggregate(how, **kwargs) return self._wrap_result(result) def _adjust_binner_for_upsample(self, binner): if self.closed == 'right': binner = binner[1:] else: binner = binner[:-1] return binner def _upsample(self, method, limit: int | None=None, fill_value=None): if self._from_selection: raise ValueError('Upsampling from level= or on= selection is not supported, use .set_index(...) to explicitly set index to datetime-like') ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) if limit is None and to_offset(ax.inferred_freq) == self.freq and (len(obj) == len(res_index)): result = obj.copy() result.index = res_index else: if method == 'asfreq': method = None result = obj.reindex(res_index, method=method, limit=limit, fill_value=fill_value) return self._wrap_result(result) def _wrap_result(self, result): result = super()._wrap_result(result) if isinstance(self.ax, PeriodIndex) and (not isinstance(result.index, PeriodIndex)): if isinstance(result.index, MultiIndex): if not isinstance(result.index.levels[-1], PeriodIndex): new_level = result.index.levels[-1].to_period(self.freq) result.index = result.index.set_levels(new_level, level=-1) else: result.index = result.index.to_period(self.freq) return result class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler): @property def _resampler_cls(self): return DatetimeIndexResampler class PeriodIndexResampler(DatetimeIndexResampler): ax: PeriodIndex @property def _resampler_for_grouping(self): warnings.warn('Resampling a groupby with a PeriodIndex is deprecated. Cast to DatetimeIndex before resampling instead.', FutureWarning, stacklevel=find_stack_level()) return PeriodIndexResamplerGroupby def _get_binner_for_time(self): if isinstance(self.ax, DatetimeIndex): return super()._get_binner_for_time() return self._timegrouper._get_period_bins(self.ax) def _convert_obj(self, obj: NDFrameT) -> NDFrameT: obj = super()._convert_obj(obj) if self._from_selection: msg = 'Resampling from level= or on= selection with a PeriodIndex is not currently supported, use .set_index(...) to explicitly set index' raise NotImplementedError(msg) if isinstance(obj, DatetimeIndex): obj = obj.to_timestamp(how=self.convention) return obj def _downsample(self, how, **kwargs): if isinstance(self.ax, DatetimeIndex): return super()._downsample(how, **kwargs) ax = self.ax if is_subperiod(ax.freq, self.freq): return self._groupby_and_aggregate(how, **kwargs) elif is_superperiod(ax.freq, self.freq): if how == 'ohlc': return self._groupby_and_aggregate(how) return self.asfreq() elif ax.freq == self.freq: return self.asfreq() raise IncompatibleFrequency(f'Frequency {ax.freq} cannot be resampled to {self.freq}, as they are not sub or super periods') def _upsample(self, method, limit: int | None=None, fill_value=None): if isinstance(self.ax, DatetimeIndex): return super()._upsample(method, limit=limit, fill_value=fill_value) ax = self.ax obj = self.obj new_index = self.binner memb = ax.asfreq(self.freq, how=self.convention) if method == 'asfreq': method = None indexer = memb.get_indexer(new_index, method=method, limit=limit) new_obj = _take_new_index(obj, indexer, new_index) return self._wrap_result(new_obj) class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler): @property def _resampler_cls(self): return PeriodIndexResampler class TimedeltaIndexResampler(DatetimeIndexResampler): ax: TimedeltaIndex @property def _resampler_for_grouping(self): return TimedeltaIndexResamplerGroupby def _get_binner_for_time(self): return self._timegrouper._get_time_delta_bins(self.ax) def _adjust_binner_for_upsample(self, binner): return binner class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler): @property def _resampler_cls(self): return TimedeltaIndexResampler def get_resampler(obj: Series | DataFrame, **kwds) -> Resampler: tg = TimeGrouper(obj, **kwds) return tg._get_resampler(obj) get_resampler.__doc__ = Resampler.__doc__ def get_resampler_for_grouping(groupby: GroupBy, rule, how=None, fill_method=None, limit: int | None=None, on=None, include_groups: bool=True, **kwargs) -> Resampler: tg = TimeGrouper(freq=rule, key=on, **kwargs) resampler = tg._get_resampler(groupby.obj) return resampler._get_resampler_for_grouping(groupby=groupby, include_groups=include_groups, key=tg.key) class TimeGrouper(Grouper): _attributes = Grouper._attributes + ('closed', 'label', 'how', 'convention', 'origin', 'offset') origin: TimeGrouperOrigin def __init__(self, obj: Grouper | None=None, freq: Frequency='Min', key: str | None=None, closed: Literal['left', 'right'] | None=None, label: Literal['left', 'right'] | None=None, how: str='mean', fill_method=None, limit: int | None=None, convention: Literal['start', 'end', 'e', 's'] | None=None, origin: Literal['epoch', 'start', 'start_day', 'end', 'end_day'] | TimestampConvertibleTypes='start_day', offset: TimedeltaConvertibleTypes | None=None, group_keys: bool=False, **kwargs) -> None: if label not in {None, 'left', 'right'}: raise ValueError(f'Unsupported value {label} for `label`') if closed not in {None, 'left', 'right'}: raise ValueError(f'Unsupported value {closed} for `closed`') if convention not in {None, 'start', 'end', 'e', 's'}: raise ValueError(f'Unsupported value {convention} for `convention`') if key is None and obj is not None and isinstance(obj.index, PeriodIndex) or (key is not None and obj is not None and (getattr(obj[key], 'dtype', None) == 'period')): freq = to_offset(freq, is_period=True) else: freq = to_offset(freq) end_types = {'ME', 'YE', 'QE', 'BME', 'BYE', 'BQE', 'W'} rule = freq.rule_code if rule in end_types or ('-' in rule and rule[:rule.find('-')] in end_types): if closed is None: closed = 'right' if label is None: label = 'right' elif origin in ['end', 'end_day']: if closed is None: closed = 'right' if label is None: label = 'right' else: if closed is None: closed = 'left' if label is None: label = 'left' self.closed = closed self.label = label self.convention = convention if convention is not None else 'e' self.how = how self.fill_method = fill_method self.limit = limit self.group_keys = group_keys self._arrow_dtype: ArrowDtype | None = None if origin in ('epoch', 'start', 'start_day', 'end', 'end_day'): self.origin = origin else: try: self.origin = Timestamp(origin) except (ValueError, TypeError) as err: raise ValueError(f"'origin' should be equal to 'epoch', 'start', 'start_day', 'end', 'end_day' or should be a Timestamp convertible type. Got '{origin}' instead.") from err try: self.offset = Timedelta(offset) if offset is not None else None except (ValueError, TypeError) as err: raise ValueError(f"'offset' should be a Timedelta convertible type. Got '{offset}' instead.") from err kwargs['sort'] = True super().__init__(freq=freq, key=key, **kwargs) def _get_resampler(self, obj: NDFrame) -> Resampler: (_, ax, _) = self._set_grouper(obj, gpr_index=None) if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, timegrouper=self, group_keys=self.group_keys, gpr_index=ax) elif isinstance(ax, PeriodIndex): if isinstance(ax, PeriodIndex): warnings.warn('Resampling with a PeriodIndex is deprecated. Cast index to DatetimeIndex before resampling instead.', FutureWarning, stacklevel=find_stack_level()) return PeriodIndexResampler(obj, timegrouper=self, group_keys=self.group_keys, gpr_index=ax) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, timegrouper=self, group_keys=self.group_keys, gpr_index=ax) raise TypeError(f"Only valid with DatetimeIndex, TimedeltaIndex or PeriodIndex, but got an instance of '{type(ax).__name__}'") def _get_grouper(self, obj: NDFrameT, validate: bool=True) -> tuple[BinGrouper, NDFrameT]: r = self._get_resampler(obj) return (r._grouper, cast(NDFrameT, r.obj)) def _get_time_bins(self, ax: DatetimeIndex): if not isinstance(ax, DatetimeIndex): raise TypeError(f'axis must be a DatetimeIndex, but got an instance of {type(ax).__name__}') if len(ax) == 0: binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name, dtype=ax.dtype) return (binner, [], labels) (first, last) = _get_timestamp_range_edges(ax.min(), ax.max(), self.freq, unit=ax.unit, closed=self.closed, origin=self.origin, offset=self.offset) binner = labels = date_range(freq=self.freq, start=first, end=last, tz=ax.tz, name=ax.name, ambiguous=True, nonexistent='shift_forward', unit=ax.unit) ax_values = ax.asi8 (binner, bin_edges) = self._adjust_bin_edges(binner, ax_values) bins = lib.generate_bins_dt64(ax_values, bin_edges, self.closed, hasnans=ax.hasnans) if self.closed == 'right': labels = binner if self.label == 'right': labels = labels[1:] elif self.label == 'right': labels = labels[1:] if ax.hasnans: binner = binner.insert(0, NaT) labels = labels.insert(0, NaT) if len(bins) < len(labels): labels = labels[:len(bins)] return (binner, bins, labels) def _adjust_bin_edges(self, binner: DatetimeIndex, ax_values: npt.NDArray[np.int64]) -> tuple[DatetimeIndex, npt.NDArray[np.int64]]: if self.freq.name in ('BME', 'ME', 'W') or self.freq.name.split('-')[0] in ('BQE', 'BYE', 'QE', 'YE', 'W'): if self.closed == 'right': edges_dti = binner.tz_localize(None) edges_dti = edges_dti + Timedelta(days=1, unit=edges_dti.unit).as_unit(edges_dti.unit) - Timedelta(1, unit=edges_dti.unit).as_unit(edges_dti.unit) bin_edges = edges_dti.tz_localize(binner.tz).asi8 else: bin_edges = binner.asi8 if bin_edges[-2] > ax_values.max(): bin_edges = bin_edges[:-1] binner = binner[:-1] else: bin_edges = binner.asi8 return (binner, bin_edges) def _get_time_delta_bins(self, ax: TimedeltaIndex): if not isinstance(ax, TimedeltaIndex): raise TypeError(f'axis must be a TimedeltaIndex, but got an instance of {type(ax).__name__}') if not isinstance(self.freq, Tick): raise ValueError(f"Resampling on a TimedeltaIndex requires fixed-duration `freq`, e.g. '24h' or '3D', not {self.freq}") if not len(ax): binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name) return (binner, [], labels) (start, end) = (ax.min(), ax.max()) if self.closed == 'right': end += self.freq labels = binner = timedelta_range(start=start, end=end, freq=self.freq, name=ax.name) end_stamps = labels if self.closed == 'left': end_stamps += self.freq bins = ax.searchsorted(end_stamps, side=self.closed) if self.offset: labels += self.offset return (binner, bins, labels) def _get_time_period_bins(self, ax: DatetimeIndex): if not isinstance(ax, DatetimeIndex): raise TypeError(f'axis must be a DatetimeIndex, but got an instance of {type(ax).__name__}') freq = self.freq if len(ax) == 0: binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name, dtype=ax.dtype) return (binner, [], labels) labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name) end_stamps = (labels + freq).asfreq(freq, 's').to_timestamp() if ax.tz: end_stamps = end_stamps.tz_localize(ax.tz) bins = ax.searchsorted(end_stamps, side='left') return (binner, bins, labels) def _get_period_bins(self, ax: PeriodIndex): if not isinstance(ax, PeriodIndex): raise TypeError(f'axis must be a PeriodIndex, but got an instance of {type(ax).__name__}') memb = ax.asfreq(self.freq, how=self.convention) nat_count = 0 if memb.hasnans: nat_count = np.sum(memb._isnan) memb = memb[~memb._isnan] if not len(memb): bins = np.array([], dtype=np.int64) binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name) if len(ax) > 0: (binner, bins, labels) = _insert_nat_bin(binner, bins, labels, len(ax)) return (binner, bins, labels) freq_mult = self.freq.n start = ax.min().asfreq(self.freq, how=self.convention) end = ax.max().asfreq(self.freq, how='end') bin_shift = 0 if isinstance(self.freq, Tick): (p_start, end) = _get_period_range_edges(start, end, self.freq, closed=self.closed, origin=self.origin, offset=self.offset) start_offset = Period(start, self.freq) - Period(p_start, self.freq) bin_shift = start_offset.n % freq_mult start = p_start labels = binner = period_range(start=start, end=end, freq=self.freq, name=ax.name) i8 = memb.asi8 expected_bins_count = len(binner) * freq_mult i8_extend = expected_bins_count - (i8[-1] - i8[0]) rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult) rng += freq_mult rng -= bin_shift prng = type(memb._data)(rng, dtype=memb.dtype) bins = memb.searchsorted(prng, side='left') if nat_count > 0: (binner, bins, labels) = _insert_nat_bin(binner, bins, labels, nat_count) return (binner, bins, labels) def _set_grouper(self, obj: NDFrameT, sort: bool=False, *, gpr_index: Index | None=None) -> tuple[NDFrameT, Index, npt.NDArray[np.intp] | None]: (obj, ax, indexer) = super()._set_grouper(obj, sort, gpr_index=gpr_index) if isinstance(ax.dtype, ArrowDtype) and ax.dtype.kind in 'Mm': self._arrow_dtype = ax.dtype ax = Index(cast(ArrowExtensionArray, ax.array)._maybe_convert_datelike_array()) return (obj, ax, indexer) @overload def _take_new_index(obj: DataFrame, indexer: npt.NDArray[np.intp], new_index: Index) -> DataFrame: ... @overload def _take_new_index(obj: Series, indexer: npt.NDArray[np.intp], new_index: Index) -> Series: ... def _take_new_index(obj: DataFrame | Series, indexer: npt.NDArray[np.intp], new_index: Index) -> DataFrame | Series: if isinstance(obj, ABCSeries): new_values = algos.take_nd(obj._values, indexer) return obj._constructor(new_values, index=new_index, name=obj.name) elif isinstance(obj, ABCDataFrame): new_mgr = obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) return obj._constructor_from_mgr(new_mgr, axes=new_mgr.axes) else: raise ValueError("'obj' should be either a Series or a DataFrame") def _get_timestamp_range_edges(first: Timestamp, last: Timestamp, freq: BaseOffset, unit: str, closed: Literal['right', 'left']='left', origin: TimeGrouperOrigin='start_day', offset: Timedelta | None=None) -> tuple[Timestamp, Timestamp]: if isinstance(freq, Tick): index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): raise ValueError('The origin must have the same timezone as the index.') if origin == 'epoch': origin = Timestamp('1970-01-01', tz=index_tz) if isinstance(freq, Day): first = first.tz_localize(None) last = last.tz_localize(None) if isinstance(origin, Timestamp): origin = origin.tz_localize(None) (first, last) = _adjust_dates_anchored(first, last, freq, closed=closed, origin=origin, offset=offset, unit=unit) if isinstance(freq, Day): first = first.tz_localize(index_tz) last = last.tz_localize(index_tz, nonexistent='shift_forward') else: first = first.normalize() last = last.normalize() if closed == 'left': first = Timestamp(freq.rollback(first)) else: first = Timestamp(first - freq) last = Timestamp(last + freq) return (first, last) def _get_period_range_edges(first: Period, last: Period, freq: BaseOffset, closed: Literal['right', 'left']='left', origin: TimeGrouperOrigin='start_day', offset: Timedelta | None=None) -> tuple[Period, Period]: if not all((isinstance(obj, Period) for obj in [first, last])): raise TypeError("'first' and 'last' must be instances of type Period") first_ts = first.to_timestamp() last_ts = last.to_timestamp() adjust_first = not freq.is_on_offset(first_ts) adjust_last = freq.is_on_offset(last_ts) (first_ts, last_ts) = _get_timestamp_range_edges(first_ts, last_ts, freq, unit='ns', closed=closed, origin=origin, offset=offset) first = (first_ts + int(adjust_first) * freq).to_period(freq) last = (last_ts - int(adjust_last) * freq).to_period(freq) return (first, last) def _insert_nat_bin(binner: PeriodIndex, bins: np.ndarray, labels: PeriodIndex, nat_count: int) -> tuple[PeriodIndex, np.ndarray, PeriodIndex]: assert nat_count > 0 bins += nat_count bins = np.insert(bins, 0, nat_count) binner = binner.insert(0, NaT) labels = labels.insert(0, NaT) return (binner, bins, labels) def _adjust_dates_anchored(first: Timestamp, last: Timestamp, freq: Tick, closed: Literal['right', 'left']='right', origin: TimeGrouperOrigin='start_day', offset: Timedelta | None=None, unit: str='ns') -> tuple[Timestamp, Timestamp]: first = first.as_unit(unit) last = last.as_unit(unit) if offset is not None: offset = offset.as_unit(unit) freq_value = Timedelta(freq).as_unit(unit)._value origin_timestamp = 0 if origin == 'start_day': origin_timestamp = first.normalize()._value elif origin == 'start': origin_timestamp = first._value elif isinstance(origin, Timestamp): origin_timestamp = origin.as_unit(unit)._value elif origin in ['end', 'end_day']: origin_last = last if origin == 'end' else last.ceil('D') sub_freq_times = (origin_last._value - first._value) // freq_value if closed == 'left': sub_freq_times += 1 first = origin_last - sub_freq_times * freq origin_timestamp = first._value origin_timestamp += offset._value if offset else 0 first_tzinfo = first.tzinfo last_tzinfo = last.tzinfo if first_tzinfo is not None: first = first.tz_convert('UTC') if last_tzinfo is not None: last = last.tz_convert('UTC') foffset = (first._value - origin_timestamp) % freq_value loffset = (last._value - origin_timestamp) % freq_value if closed == 'right': if foffset > 0: fresult_int = first._value - foffset else: fresult_int = first._value - freq_value if loffset > 0: lresult_int = last._value + (freq_value - loffset) else: lresult_int = last._value else: if foffset > 0: fresult_int = first._value - foffset else: fresult_int = first._value if loffset > 0: lresult_int = last._value + (freq_value - loffset) else: lresult_int = last._value + freq_value fresult = Timestamp(fresult_int, unit=unit) lresult = Timestamp(lresult_int, unit=unit) if first_tzinfo is not None: fresult = fresult.tz_localize('UTC').tz_convert(first_tzinfo) if last_tzinfo is not None: lresult = lresult.tz_localize('UTC').tz_convert(last_tzinfo) return (fresult, lresult) def asfreq(obj: NDFrameT, freq, method=None, how=None, normalize: bool=False, fill_value=None) -> NDFrameT: if isinstance(obj.index, PeriodIndex): if method is not None: raise NotImplementedError("'method' argument is not supported") if how is None: how = 'E' if isinstance(freq, BaseOffset): if hasattr(freq, '_period_dtype_code'): freq = PeriodDtype(freq)._freqstr new_obj = obj.copy() new_obj.index = obj.index.asfreq(freq, how=how) elif len(obj.index) == 0: new_obj = obj.copy() new_obj.index = _asfreq_compat(obj.index, freq) else: unit = None if isinstance(obj.index, DatetimeIndex): unit = obj.index.unit dti = date_range(obj.index.min(), obj.index.max(), freq=freq, unit=unit) dti.name = obj.index.name new_obj = obj.reindex(dti, method=method, fill_value=fill_value) if normalize: new_obj.index = new_obj.index.normalize() return new_obj def _asfreq_compat(index: FreqIndexT, freq) -> FreqIndexT: if len(index) != 0: raise ValueError('Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex') if isinstance(index, PeriodIndex): new_index = index.asfreq(freq=freq) elif isinstance(index, DatetimeIndex): new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name) elif isinstance(index, TimedeltaIndex): new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name) else: raise TypeError(type(index)) return new_index def _apply(grouped: GroupBy, how: Callable, *args, include_groups: bool, **kwargs) -> DataFrame: target_message = 'DataFrameGroupBy.apply operated on the grouping columns' new_message = _apply_groupings_depr.format('DataFrameGroupBy', 'resample') with rewrite_warning(target_message=target_message, target_category=DeprecationWarning, new_message=new_message): result = grouped.apply(how, *args, include_groups=include_groups, **kwargs) return result # File: pandas-main/pandas/core/reshape/api.py from pandas.core.reshape.concat import concat from pandas.core.reshape.encoding import from_dummies, get_dummies from pandas.core.reshape.melt import lreshape, melt, wide_to_long from pandas.core.reshape.merge import merge, merge_asof, merge_ordered from pandas.core.reshape.pivot import crosstab, pivot, pivot_table from pandas.core.reshape.tile import cut, qcut __all__ = ['concat', 'crosstab', 'cut', 'from_dummies', 'get_dummies', 'lreshape', 'melt', 'merge', 'merge_asof', 'merge_ordered', 'pivot', 'pivot_table', 'qcut', 'wide_to_long'] # File: pandas-main/pandas/core/reshape/concat.py """""" from __future__ import annotations from collections import abc import types from typing import TYPE_CHECKING, Literal, cast, overload import warnings import numpy as np from pandas._libs import lib from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_bool, is_scalar from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core.arrays.categorical import factorize_from_iterable, factorize_from_iterables import pandas.core.common as com from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same, default_index, ensure_index, get_objs_combined_axis, get_unanimous_names from pandas.core.internals import concatenate_managers if TYPE_CHECKING: from collections.abc import Callable, Hashable, Iterable, Mapping from pandas._typing import Axis, AxisInt, HashableT from pandas import DataFrame, Series @overload def concat(objs: Iterable[DataFrame] | Mapping[HashableT, DataFrame], *, axis: Literal[0, 'index']=..., join: str=..., ignore_index: bool=..., keys: Iterable[Hashable] | None=..., levels=..., names: list[HashableT] | None=..., verify_integrity: bool=..., sort: bool=..., copy: bool | lib.NoDefault=...) -> DataFrame: ... @overload def concat(objs: Iterable[Series] | Mapping[HashableT, Series], *, axis: Literal[0, 'index']=..., join: str=..., ignore_index: bool=..., keys: Iterable[Hashable] | None=..., levels=..., names: list[HashableT] | None=..., verify_integrity: bool=..., sort: bool=..., copy: bool | lib.NoDefault=...) -> Series: ... @overload def concat(objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], *, axis: Literal[0, 'index']=..., join: str=..., ignore_index: bool=..., keys: Iterable[Hashable] | None=..., levels=..., names: list[HashableT] | None=..., verify_integrity: bool=..., sort: bool=..., copy: bool | lib.NoDefault=...) -> DataFrame | Series: ... @overload def concat(objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], *, axis: Literal[1, 'columns'], join: str=..., ignore_index: bool=..., keys: Iterable[Hashable] | None=..., levels=..., names: list[HashableT] | None=..., verify_integrity: bool=..., sort: bool=..., copy: bool | lib.NoDefault=...) -> DataFrame: ... @overload def concat(objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], *, axis: Axis=..., join: str=..., ignore_index: bool=..., keys: Iterable[Hashable] | None=..., levels=..., names: list[HashableT] | None=..., verify_integrity: bool=..., sort: bool=..., copy: bool | lib.NoDefault=...) -> DataFrame | Series: ... def concat(objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], *, axis: Axis=0, join: str='outer', ignore_index: bool=False, keys: Iterable[Hashable] | None=None, levels=None, names: list[HashableT] | None=None, verify_integrity: bool=False, sort: bool=False, copy: bool | lib.NoDefault=lib.no_default) -> DataFrame | Series: if ignore_index and keys is not None: raise ValueError(f'Cannot set ignore_index={ignore_index!r} and specify keys. Either should be used.') if copy is not lib.no_default: warnings.warn('The copy keyword is deprecated and will be removed in a future version. Copy-on-Write is active in pandas since 3.0 which utilizes a lazy copy mechanism that defers copies until necessary. Use .copy() to make an eager copy if necessary.', DeprecationWarning, stacklevel=find_stack_level()) if join == 'outer': intersect = False elif join == 'inner': intersect = True else: raise ValueError('Only can inner (intersect) or outer (union) join the other axis') if not is_bool(sort): raise ValueError(f"The 'sort' keyword only accepts boolean values; {sort} was passed.") sort = bool(sort) (objs, keys, ndims) = _clean_keys_and_objs(objs, keys) (sample, objs) = _get_sample_object(objs, ndims, keys, names, levels, intersect) if sample.ndim == 1: from pandas import DataFrame bm_axis = DataFrame._get_axis_number(axis) is_frame = False is_series = True else: bm_axis = sample._get_axis_number(axis) is_frame = True is_series = False bm_axis = sample._get_block_manager_axis(bm_axis) if len(ndims) > 1: objs = _sanitize_mixed_ndim(objs, sample, ignore_index, bm_axis) axis = 1 - bm_axis if is_frame else 0 names = names or getattr(keys, 'names', None) return _get_result(objs, is_series, bm_axis, ignore_index, intersect, sort, keys, levels, verify_integrity, names, axis) def _sanitize_mixed_ndim(objs: list[Series | DataFrame], sample: Series | DataFrame, ignore_index: bool, axis: AxisInt) -> list[Series | DataFrame]: new_objs = [] current_column = 0 max_ndim = sample.ndim for obj in objs: ndim = obj.ndim if ndim == max_ndim: pass elif ndim != max_ndim - 1: raise ValueError('cannot concatenate unaligned mixed dimensional NDFrame objects') else: name = getattr(obj, 'name', None) if ignore_index or name is None: if axis == 1: name = 0 else: name = current_column current_column += 1 obj = sample._constructor(obj, copy=False) if isinstance(obj, ABCDataFrame): obj.columns = range(name, name + 1, 1) else: obj = sample._constructor({name: obj}, copy=False) new_objs.append(obj) return new_objs def _get_result(objs: list[Series | DataFrame], is_series: bool, bm_axis: AxisInt, ignore_index: bool, intersect: bool, sort: bool, keys: Iterable[Hashable] | None, levels, verify_integrity: bool, names: list[HashableT] | None, axis: AxisInt): cons: Callable[..., DataFrame | Series] sample: DataFrame | Series if is_series: sample = cast('Series', objs[0]) if bm_axis == 0: name = com.consensus_name_attr(objs) cons = sample._constructor arrs = [ser._values for ser in objs] res = concat_compat(arrs, axis=0) if ignore_index: new_index: Index = default_index(len(res)) else: new_index = _get_concat_axis_series(objs, ignore_index, bm_axis, keys, levels, verify_integrity, names) mgr = type(sample._mgr).from_array(res, index=new_index) result = sample._constructor_from_mgr(mgr, axes=mgr.axes) result._name = name return result.__finalize__(types.SimpleNamespace(objs=objs), method='concat') else: data = dict(enumerate(objs)) cons = sample._constructor_expanddim index = get_objs_combined_axis(objs, axis=objs[0]._get_block_manager_axis(0), intersect=intersect, sort=sort) columns = _get_concat_axis_series(objs, ignore_index, bm_axis, keys, levels, verify_integrity, names) df = cons(data, index=index, copy=False) df.columns = columns return df.__finalize__(types.SimpleNamespace(objs=objs), method='concat') else: sample = cast('DataFrame', objs[0]) mgrs_indexers = [] result_axes = new_axes(objs, bm_axis, intersect, sort, keys, names, axis, levels, verify_integrity, ignore_index) for obj in objs: indexers = {} for (ax, new_labels) in enumerate(result_axes): if ax == bm_axis: continue obj_labels = obj.axes[1 - ax] if not new_labels.equals(obj_labels): indexers[ax] = obj_labels.get_indexer(new_labels) mgrs_indexers.append((obj._mgr, indexers)) new_data = concatenate_managers(mgrs_indexers, result_axes, concat_axis=bm_axis, copy=False) out = sample._constructor_from_mgr(new_data, axes=new_data.axes) return out.__finalize__(types.SimpleNamespace(objs=objs), method='concat') def new_axes(objs: list[Series | DataFrame], bm_axis: AxisInt, intersect: bool, sort: bool, keys: Iterable[Hashable] | None, names: list[HashableT] | None, axis: AxisInt, levels, verify_integrity: bool, ignore_index: bool) -> list[Index]: return [_get_concat_axis_dataframe(objs, axis, ignore_index, keys, names, levels, verify_integrity) if i == bm_axis else get_objs_combined_axis(objs, axis=objs[0]._get_block_manager_axis(i), intersect=intersect, sort=sort) for i in range(2)] def _get_concat_axis_series(objs: list[Series | DataFrame], ignore_index: bool, bm_axis: AxisInt, keys: Iterable[Hashable] | None, levels, verify_integrity: bool, names: list[HashableT] | None) -> Index: if ignore_index: return default_index(len(objs)) elif bm_axis == 0: indexes = [x.index for x in objs] if keys is None: if levels is not None: raise ValueError('levels supported only when keys is not None') concat_axis = _concat_indexes(indexes) else: concat_axis = _make_concat_multiindex(indexes, keys, levels, names) if verify_integrity and (not concat_axis.is_unique): overlap = concat_axis[concat_axis.duplicated()].unique() raise ValueError(f'Indexes have overlapping values: {overlap}') return concat_axis elif keys is None: result_names: list[Hashable] = [None] * len(objs) num = 0 has_names = False for (i, x) in enumerate(objs): if x.ndim != 1: raise TypeError(f"Cannot concatenate type 'Series' with object of type '{type(x).__name__}'") if x.name is not None: result_names[i] = x.name has_names = True else: result_names[i] = num num += 1 if has_names: return Index(result_names) else: return default_index(len(objs)) else: return ensure_index(keys).set_names(names) def _get_concat_axis_dataframe(objs: list[Series | DataFrame], axis: AxisInt, ignore_index: bool, keys: Iterable[Hashable] | None, names: list[HashableT] | None, levels, verify_integrity: bool) -> Index: indexes_gen = (x.axes[axis] for x in objs) if ignore_index: return default_index(sum((len(i) for i in indexes_gen))) else: indexes = list(indexes_gen) if keys is None: if levels is not None: raise ValueError('levels supported only when keys is not None') concat_axis = _concat_indexes(indexes) else: concat_axis = _make_concat_multiindex(indexes, keys, levels, names) if verify_integrity and (not concat_axis.is_unique): overlap = concat_axis[concat_axis.duplicated()].unique() raise ValueError(f'Indexes have overlapping values: {overlap}') return concat_axis def _clean_keys_and_objs(objs: Iterable[Series | DataFrame] | Mapping[HashableT, Series | DataFrame], keys) -> tuple[list[Series | DataFrame], Index | None, set[int]]: if isinstance(objs, abc.Mapping): if keys is None: keys = objs.keys() objs = [objs[k] for k in keys] elif isinstance(objs, (ABCSeries, ABCDataFrame)) or is_scalar(objs): raise TypeError(f'first argument must be an iterable of pandas objects, you passed an object of type "{type(objs).__name__}"') elif not isinstance(objs, abc.Sized): objs = list(objs) if len(objs) == 0: raise ValueError('No objects to concatenate') if keys is not None: if not isinstance(keys, Index): keys = Index(keys) if len(keys) != len(objs): raise ValueError(f'The length of the keys ({len(keys)}) must match the length of the objects to concatenate ({len(objs)})') key_indices = [] clean_objs = [] ndims = set() for (i, obj) in enumerate(objs): if obj is None: continue elif isinstance(obj, (ABCSeries, ABCDataFrame)): key_indices.append(i) clean_objs.append(obj) ndims.add(obj.ndim) else: msg = f"cannot concatenate object of type '{type(obj)}'; only Series and DataFrame objs are valid" raise TypeError(msg) if keys is not None and len(key_indices) < len(keys): keys = keys.take(key_indices) if len(clean_objs) == 0: raise ValueError('All objects passed were None') return (clean_objs, keys, ndims) def _get_sample_object(objs: list[Series | DataFrame], ndims: set[int], keys, names, levels, intersect: bool) -> tuple[Series | DataFrame, list[Series | DataFrame]]: if len(ndims) > 1: max_ndim = max(ndims) for obj in objs: if obj.ndim == max_ndim and sum(obj.shape): return (obj, objs) elif keys is None and names is None and (levels is None) and (not intersect): if ndims.pop() == 2: non_empties = [obj for obj in objs if sum(obj.shape)] else: non_empties = objs if len(non_empties): return (non_empties[0], non_empties) return (objs[0], objs) def _concat_indexes(indexes) -> Index: return indexes[0].append(indexes[1:]) def validate_unique_levels(levels: list[Index]) -> None: for level in levels: if not level.is_unique: raise ValueError(f'Level values not unique: {level.tolist()}') def _make_concat_multiindex(indexes, keys, levels=None, names=None) -> MultiIndex: if levels is None and isinstance(keys[0], tuple) or (levels is not None and len(levels) > 1): zipped = list(zip(*keys)) if names is None: names = [None] * len(zipped) if levels is None: (_, levels) = factorize_from_iterables(zipped) else: levels = [ensure_index(x) for x in levels] validate_unique_levels(levels) else: zipped = [keys] if names is None: names = [None] if levels is None: levels = [ensure_index(keys).unique()] else: levels = [ensure_index(x) for x in levels] validate_unique_levels(levels) if not all_indexes_same(indexes): codes_list = [] for (hlevel, level) in zip(zipped, levels): to_concat = [] if isinstance(hlevel, Index) and hlevel.equals(level): lens = [len(idx) for idx in indexes] codes_list.append(np.repeat(np.arange(len(hlevel)), lens)) else: for (key, index) in zip(hlevel, indexes): mask = isna(level) & isna(key) | (level == key) if not mask.any(): raise ValueError(f'Key {key} not in level {level}') i = np.nonzero(mask)[0][0] to_concat.append(np.repeat(i, len(index))) codes_list.append(np.concatenate(to_concat)) concat_index = _concat_indexes(indexes) if isinstance(concat_index, MultiIndex): levels.extend(concat_index.levels) codes_list.extend(concat_index.codes) else: (codes, categories) = factorize_from_iterable(concat_index) levels.append(categories) codes_list.append(codes) if len(names) == len(levels): names = list(names) else: if not len({idx.nlevels for idx in indexes}) == 1: raise AssertionError('Cannot concat indices that do not have the same number of levels') names = list(names) + list(get_unanimous_names(*indexes)) return MultiIndex(levels=levels, codes=codes_list, names=names, verify_integrity=False) new_index = indexes[0] n = len(new_index) kpieces = len(indexes) new_names = list(names) new_levels = list(levels) new_codes = [] for (hlevel, level) in zip(zipped, levels): hlevel_index = ensure_index(hlevel) mapped = level.get_indexer(hlevel_index) mask = mapped == -1 if mask.any(): raise ValueError(f'Values not found in passed level: {hlevel_index[mask]!s}') new_codes.append(np.repeat(mapped, n)) if isinstance(new_index, MultiIndex): new_levels.extend(new_index.levels) new_codes.extend((np.tile(lab, kpieces) for lab in new_index.codes)) else: new_levels.append(new_index.unique()) single_codes = new_index.unique().get_indexer(new_index) new_codes.append(np.tile(single_codes, kpieces)) if len(new_names) < len(new_levels): new_names.extend(new_index.names) return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) # File: pandas-main/pandas/core/reshape/encoding.py from __future__ import annotations from collections import defaultdict from collections.abc import Hashable, Iterable import itertools from typing import TYPE_CHECKING import numpy as np from pandas._libs import missing as libmissing from pandas._libs.sparse import IntIndex from pandas.core.dtypes.common import is_integer_dtype, is_list_like, is_object_dtype, pandas_dtype from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype from pandas.core.arrays import SparseArray from pandas.core.arrays.categorical import factorize_from_iterable from pandas.core.arrays.string_ import StringDtype from pandas.core.frame import DataFrame from pandas.core.indexes.api import Index, default_index from pandas.core.series import Series if TYPE_CHECKING: from pandas._typing import NpDtype def get_dummies(data, prefix=None, prefix_sep: str | Iterable[str] | dict[str, str]='_', dummy_na: bool=False, columns=None, sparse: bool=False, drop_first: bool=False, dtype: NpDtype | None=None) -> DataFrame: from pandas.core.reshape.concat import concat dtypes_to_encode = ['object', 'string', 'category'] if isinstance(data, DataFrame): if columns is None: data_to_encode = data.select_dtypes(include=dtypes_to_encode) elif not is_list_like(columns): raise TypeError('Input must be a list-like for parameter `columns`') else: data_to_encode = data[columns] def check_len(item, name: str) -> None: if is_list_like(item): if not len(item) == data_to_encode.shape[1]: len_msg = f"Length of '{name}' ({len(item)}) did not match the length of the columns being encoded ({data_to_encode.shape[1]})." raise ValueError(len_msg) check_len(prefix, 'prefix') check_len(prefix_sep, 'prefix_sep') if isinstance(prefix, str): prefix = itertools.cycle([prefix]) if isinstance(prefix, dict): prefix = [prefix[col] for col in data_to_encode.columns] if prefix is None: prefix = data_to_encode.columns if isinstance(prefix_sep, str): prefix_sep = itertools.cycle([prefix_sep]) elif isinstance(prefix_sep, dict): prefix_sep = [prefix_sep[col] for col in data_to_encode.columns] with_dummies: list[DataFrame] if data_to_encode.shape == data.shape: with_dummies = [] elif columns is not None: with_dummies = [data.drop(columns, axis=1)] else: with_dummies = [data.select_dtypes(exclude=dtypes_to_encode)] for (col, pre, sep) in zip(data_to_encode.items(), prefix, prefix_sep): dummy = _get_dummies_1d(col[1], prefix=pre, prefix_sep=sep, dummy_na=dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype) with_dummies.append(dummy) result = concat(with_dummies, axis=1) else: result = _get_dummies_1d(data, prefix, prefix_sep, dummy_na, sparse=sparse, drop_first=drop_first, dtype=dtype) return result def _get_dummies_1d(data, prefix, prefix_sep: str | Iterable[str] | dict[str, str]='_', dummy_na: bool=False, sparse: bool=False, drop_first: bool=False, dtype: NpDtype | None=None) -> DataFrame: from pandas.core.reshape.concat import concat (codes, levels) = factorize_from_iterable(Series(data, copy=False)) if dtype is None and hasattr(data, 'dtype'): input_dtype = data.dtype if isinstance(input_dtype, CategoricalDtype): input_dtype = input_dtype.categories.dtype if isinstance(input_dtype, ArrowDtype): import pyarrow as pa dtype = ArrowDtype(pa.bool_()) elif isinstance(input_dtype, StringDtype) and input_dtype.na_value is libmissing.NA: dtype = pandas_dtype('boolean') else: dtype = np.dtype(bool) elif dtype is None: dtype = np.dtype(bool) _dtype = pandas_dtype(dtype) if is_object_dtype(_dtype): raise ValueError('dtype=object is not a valid dtype for get_dummies') def get_empty_frame(data) -> DataFrame: index: Index | np.ndarray if isinstance(data, Series): index = data.index else: index = default_index(len(data)) return DataFrame(index=index) if not dummy_na and len(levels) == 0: return get_empty_frame(data) codes = codes.copy() if dummy_na: codes[codes == -1] = len(levels) levels = levels.insert(len(levels), np.nan) if drop_first and len(levels) == 1: return get_empty_frame(data) number_of_cols = len(levels) if prefix is None: dummy_cols = levels else: dummy_cols = Index([f'{prefix}{prefix_sep}{level}' for level in levels]) index: Index | None if isinstance(data, Series): index = data.index else: index = None if sparse: fill_value: bool | float if is_integer_dtype(dtype): fill_value = 0 elif dtype == np.dtype(bool): fill_value = False else: fill_value = 0.0 sparse_series = [] N = len(data) sp_indices: list[list] = [[] for _ in range(len(dummy_cols))] mask = codes != -1 codes = codes[mask] n_idx = np.arange(N)[mask] for (ndx, code) in zip(n_idx, codes): sp_indices[code].append(ndx) if drop_first: sp_indices = sp_indices[1:] dummy_cols = dummy_cols[1:] for (col, ixs) in zip(dummy_cols, sp_indices): sarr = SparseArray(np.ones(len(ixs), dtype=dtype), sparse_index=IntIndex(N, ixs), fill_value=fill_value, dtype=dtype) sparse_series.append(Series(data=sarr, index=index, name=col, copy=False)) return concat(sparse_series, axis=1) else: shape = (len(codes), number_of_cols) dummy_dtype: NpDtype if isinstance(_dtype, np.dtype): dummy_dtype = _dtype else: dummy_dtype = np.bool_ dummy_mat = np.zeros(shape=shape, dtype=dummy_dtype, order='F') dummy_mat[np.arange(len(codes)), codes] = 1 if not dummy_na: dummy_mat[codes == -1] = 0 if drop_first: dummy_mat = dummy_mat[:, 1:] dummy_cols = dummy_cols[1:] return DataFrame(dummy_mat, index=index, columns=dummy_cols, dtype=_dtype) def from_dummies(data: DataFrame, sep: None | str=None, default_category: None | Hashable | dict[str, Hashable]=None) -> DataFrame: from pandas.core.reshape.concat import concat if not isinstance(data, DataFrame): raise TypeError(f"Expected 'data' to be a 'DataFrame'; Received 'data' of type: {type(data).__name__}") col_isna_mask = data.isna().any() if col_isna_mask.any(): raise ValueError(f"Dummy DataFrame contains NA value in column: '{col_isna_mask.idxmax()}'") try: data_to_decode = data.astype('boolean') except TypeError as err: raise TypeError('Passed DataFrame contains non-dummy data') from err variables_slice = defaultdict(list) if sep is None: variables_slice[''] = list(data.columns) elif isinstance(sep, str): for col in data_to_decode.columns: prefix = col.split(sep)[0] if len(prefix) == len(col): raise ValueError(f'Separator not specified for column: {col}') variables_slice[prefix].append(col) else: raise TypeError(f"Expected 'sep' to be of type 'str' or 'None'; Received 'sep' of type: {type(sep).__name__}") if default_category is not None: if isinstance(default_category, dict): if not len(default_category) == len(variables_slice): len_msg = f"Length of 'default_category' ({len(default_category)}) did not match the length of the columns being encoded ({len(variables_slice)})" raise ValueError(len_msg) elif isinstance(default_category, Hashable): default_category = dict(zip(variables_slice, [default_category] * len(variables_slice))) else: raise TypeError(f"Expected 'default_category' to be of type 'None', 'Hashable', or 'dict'; Received 'default_category' of type: {type(default_category).__name__}") cat_data = {} for (prefix, prefix_slice) in variables_slice.items(): if sep is None: cats = prefix_slice.copy() else: cats = [col[len(prefix + sep):] for col in prefix_slice] assigned = data_to_decode.loc[:, prefix_slice].sum(axis=1) if any(assigned > 1): raise ValueError(f'Dummy DataFrame contains multi-assignment(s); First instance in row: {assigned.idxmax()}') if any(assigned == 0): if isinstance(default_category, dict): cats.append(default_category[prefix]) else: raise ValueError(f'Dummy DataFrame contains unassigned value(s); First instance in row: {assigned.idxmin()}') data_slice = concat((data_to_decode.loc[:, prefix_slice], assigned == 0), axis=1) else: data_slice = data_to_decode.loc[:, prefix_slice] cats_array = data._constructor_sliced(cats, dtype=data.columns.dtype) true_values = data_slice.idxmax(axis=1) indexer = data_slice.columns.get_indexer_for(true_values) cat_data[prefix] = cats_array.take(indexer).set_axis(data.index) result = DataFrame(cat_data) if sep is not None: result.columns = result.columns.astype(data.columns.dtype) return result # File: pandas-main/pandas/core/reshape/melt.py from __future__ import annotations import re from typing import TYPE_CHECKING import numpy as np from pandas.core.dtypes.common import is_iterator, is_list_like from pandas.core.dtypes.concat import concat_compat from pandas.core.dtypes.missing import notna import pandas.core.algorithms as algos from pandas.core.indexes.api import MultiIndex from pandas.core.reshape.concat import concat from pandas.core.tools.numeric import to_numeric if TYPE_CHECKING: from collections.abc import Hashable from pandas._typing import AnyArrayLike from pandas import DataFrame def ensure_list_vars(arg_vars, variable: str, columns) -> list: if arg_vars is not None: if not is_list_like(arg_vars): return [arg_vars] elif isinstance(columns, MultiIndex) and (not isinstance(arg_vars, list)): raise ValueError(f'{variable} must be a list of tuples when columns are a MultiIndex') else: return list(arg_vars) else: return [] def melt(frame: DataFrame, id_vars=None, value_vars=None, var_name=None, value_name: Hashable='value', col_level=None, ignore_index: bool=True) -> DataFrame: if value_name in frame.columns: raise ValueError(f'value_name ({value_name}) cannot match an element in the DataFrame columns.') id_vars = ensure_list_vars(id_vars, 'id_vars', frame.columns) value_vars_was_not_none = value_vars is not None value_vars = ensure_list_vars(value_vars, 'value_vars', frame.columns) if id_vars or value_vars: if col_level is not None: level = frame.columns.get_level_values(col_level) else: level = frame.columns labels = id_vars + value_vars idx = level.get_indexer_for(labels) missing = idx == -1 if missing.any(): missing_labels = [lab for (lab, not_found) in zip(labels, missing) if not_found] raise KeyError(f'The following id_vars or value_vars are not present in the DataFrame: {missing_labels}') if value_vars_was_not_none: frame = frame.iloc[:, algos.unique(idx)] else: frame = frame.copy(deep=False) else: frame = frame.copy(deep=False) if col_level is not None: frame.columns = frame.columns.get_level_values(col_level) if var_name is None: if isinstance(frame.columns, MultiIndex): if len(frame.columns.names) == len(set(frame.columns.names)): var_name = frame.columns.names else: var_name = [f'variable_{i}' for i in range(len(frame.columns.names))] else: var_name = [frame.columns.name if frame.columns.name is not None else 'variable'] elif is_list_like(var_name): if isinstance(frame.columns, MultiIndex): if is_iterator(var_name): var_name = list(var_name) if len(var_name) > len(frame.columns): raise ValueError(f'var_name={var_name!r} has {len(var_name)} items, but the dataframe columns only have {len(frame.columns)} levels.') else: raise ValueError(f'var_name={var_name!r} must be a scalar.') else: var_name = [var_name] (num_rows, K) = frame.shape num_cols_adjusted = K - len(id_vars) mdata: dict[Hashable, AnyArrayLike] = {} for col in id_vars: id_data = frame.pop(col) if not isinstance(id_data.dtype, np.dtype): if num_cols_adjusted > 0: mdata[col] = concat([id_data] * num_cols_adjusted, ignore_index=True) else: mdata[col] = type(id_data)([], name=id_data.name, dtype=id_data.dtype) else: mdata[col] = np.tile(id_data._values, num_cols_adjusted) mcolumns = id_vars + var_name + [value_name] if frame.shape[1] > 0 and (not any((not isinstance(dt, np.dtype) and dt._supports_2d for dt in frame.dtypes))): mdata[value_name] = concat([frame.iloc[:, i] for i in range(frame.shape[1])], ignore_index=True).values else: mdata[value_name] = frame._values.ravel('F') for (i, col) in enumerate(var_name): mdata[col] = frame.columns._get_level_values(i).repeat(num_rows) result = frame._constructor(mdata, columns=mcolumns) if not ignore_index: taker = np.tile(np.arange(len(frame)), num_cols_adjusted) result.index = frame.index.take(taker) return result def lreshape(data: DataFrame, groups: dict, dropna: bool=True) -> DataFrame: mdata = {} pivot_cols = [] all_cols: set[Hashable] = set() K = len(next(iter(groups.values()))) for (target, names) in groups.items(): if len(names) != K: raise ValueError('All column lists must be same length') to_concat = [data[col]._values for col in names] mdata[target] = concat_compat(to_concat) pivot_cols.append(target) all_cols = all_cols.union(names) id_cols = list(data.columns.difference(all_cols)) for col in id_cols: mdata[col] = np.tile(data[col]._values, K) if dropna: mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool) for c in pivot_cols: mask &= notna(mdata[c]) if not mask.all(): mdata = {k: v[mask] for (k, v) in mdata.items()} return data._constructor(mdata, columns=id_cols + pivot_cols) def wide_to_long(df: DataFrame, stubnames, i, j, sep: str='', suffix: str='\\d+') -> DataFrame: def get_var_names(df, stub: str, sep: str, suffix: str): regex = f'^{re.escape(stub)}{re.escape(sep)}{suffix}$' return df.columns[df.columns.str.match(regex)] def melt_stub(df, stub: str, i, j, value_vars, sep: str): newdf = melt(df, id_vars=i, value_vars=value_vars, value_name=stub.rstrip(sep), var_name=j) newdf[j] = newdf[j].str.replace(re.escape(stub + sep), '', regex=True) try: newdf[j] = to_numeric(newdf[j]) except (TypeError, ValueError, OverflowError): pass return newdf.set_index(i + [j]) if not is_list_like(stubnames): stubnames = [stubnames] else: stubnames = list(stubnames) if df.columns.isin(stubnames).any(): raise ValueError("stubname can't be identical to a column name") if not is_list_like(i): i = [i] else: i = list(i) if df[i].duplicated().any(): raise ValueError('the id variables need to uniquely identify each row') _melted = [] value_vars_flattened = [] for stub in stubnames: value_var = get_var_names(df, stub, sep, suffix) value_vars_flattened.extend(value_var) _melted.append(melt_stub(df, stub, i, j, value_var, sep)) melted = concat(_melted, axis=1) id_vars = df.columns.difference(value_vars_flattened) new = df[id_vars] if len(i) == 1: return new.set_index(i).join(melted) else: return new.merge(melted.reset_index(), on=i).set_index(i + [j]) # File: pandas-main/pandas/core/reshape/merge.py """""" from __future__ import annotations from collections.abc import Hashable, Sequence import datetime from functools import partial from typing import TYPE_CHECKING, Literal, cast, final import uuid import warnings import numpy as np from pandas._libs import Timedelta, hashtable as libhashtable, join as libjoin, lib from pandas._libs.lib import is_range_indexer from pandas._typing import AnyArrayLike, ArrayLike, IndexLabel, JoinHow, MergeHow, Shape, Suffixes, npt from pandas.errors import MergeError from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.cast import find_common_type from pandas.core.dtypes.common import ensure_int64, ensure_object, is_bool, is_bool_dtype, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_number, is_numeric_dtype, is_object_dtype, is_string_dtype, needs_i8_conversion from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.missing import isna, na_value_for_dtype from pandas import ArrowDtype, Categorical, Index, MultiIndex, Series import pandas.core.algorithms as algos from pandas.core.arrays import ArrowExtensionArray, BaseMaskedArray, ExtensionArray from pandas.core.arrays.string_ import StringDtype import pandas.core.common as com from pandas.core.construction import ensure_wrapped_if_datetimelike, extract_array from pandas.core.indexes.api import default_index from pandas.core.sorting import get_group_index, is_int64_overflow_possible if TYPE_CHECKING: from pandas import DataFrame from pandas.core import groupby from pandas.core.arrays import DatetimeArray from pandas.core.indexes.frozen import FrozenList _factorizers = {np.int64: libhashtable.Int64Factorizer, np.longlong: libhashtable.Int64Factorizer, np.int32: libhashtable.Int32Factorizer, np.int16: libhashtable.Int16Factorizer, np.int8: libhashtable.Int8Factorizer, np.uint64: libhashtable.UInt64Factorizer, np.uint32: libhashtable.UInt32Factorizer, np.uint16: libhashtable.UInt16Factorizer, np.uint8: libhashtable.UInt8Factorizer, np.bool_: libhashtable.UInt8Factorizer, np.float64: libhashtable.Float64Factorizer, np.float32: libhashtable.Float32Factorizer, np.complex64: libhashtable.Complex64Factorizer, np.complex128: libhashtable.Complex128Factorizer, np.object_: libhashtable.ObjectFactorizer} if np.intc is not np.int32: _factorizers[np.intc] = libhashtable.Int64Factorizer _known = (np.ndarray, ExtensionArray, Index, ABCSeries) def merge(left: DataFrame | Series, right: DataFrame | Series, how: MergeHow='inner', on: IndexLabel | AnyArrayLike | None=None, left_on: IndexLabel | AnyArrayLike | None=None, right_on: IndexLabel | AnyArrayLike | None=None, left_index: bool=False, right_index: bool=False, sort: bool=False, suffixes: Suffixes=('_x', '_y'), copy: bool | lib.NoDefault=lib.no_default, indicator: str | bool=False, validate: str | None=None) -> DataFrame: left_df = _validate_operand(left) left._check_copy_deprecation(copy) right_df = _validate_operand(right) if how == 'cross': return _cross_merge(left_df, right_df, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate) else: op = _MergeOperation(left_df, right_df, how=how, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate) return op.get_result() def _cross_merge(left: DataFrame, right: DataFrame, on: IndexLabel | AnyArrayLike | None=None, left_on: IndexLabel | AnyArrayLike | None=None, right_on: IndexLabel | AnyArrayLike | None=None, left_index: bool=False, right_index: bool=False, sort: bool=False, suffixes: Suffixes=('_x', '_y'), indicator: str | bool=False, validate: str | None=None) -> DataFrame: if left_index or right_index or right_on is not None or (left_on is not None) or (on is not None): raise MergeError('Can not pass on, right_on, left_on or set right_index=True or left_index=True') cross_col = f'_cross_{uuid.uuid4()}' left = left.assign(**{cross_col: 1}) right = right.assign(**{cross_col: 1}) left_on = right_on = [cross_col] res = merge(left, right, how='inner', on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, sort=sort, suffixes=suffixes, indicator=indicator, validate=validate) del res[cross_col] return res def _groupby_and_merge(by, left: DataFrame | Series, right: DataFrame | Series, merge_pieces): pieces = [] if not isinstance(by, (list, tuple)): by = [by] lby = left.groupby(by, sort=False) rby: groupby.DataFrameGroupBy | groupby.SeriesGroupBy | None = None if all((item in right.columns for item in by)): rby = right.groupby(by, sort=False) for (key, lhs) in lby._grouper.get_iterator(lby._selected_obj): if rby is None: rhs = right else: try: rhs = right.take(rby.indices[key]) except KeyError: lcols = lhs.columns.tolist() cols = lcols + [r for r in right.columns if r not in set(lcols)] merged = lhs.reindex(columns=cols) merged.index = range(len(merged)) pieces.append(merged) continue merged = merge_pieces(lhs, rhs) merged[by] = key pieces.append(merged) from pandas.core.reshape.concat import concat result = concat(pieces, ignore_index=True) result = result.reindex(columns=pieces[0].columns) return (result, lby) def merge_ordered(left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None=None, left_on: IndexLabel | None=None, right_on: IndexLabel | None=None, left_by=None, right_by=None, fill_method: str | None=None, suffixes: Suffixes=('_x', '_y'), how: JoinHow='outer') -> DataFrame: def _merger(x, y) -> DataFrame: op = _OrderedMerge(x, y, on=on, left_on=left_on, right_on=right_on, suffixes=suffixes, fill_method=fill_method, how=how) return op.get_result() if left_by is not None and right_by is not None: raise ValueError('Can only group either left or right frames') if left_by is not None: if isinstance(left_by, str): left_by = [left_by] check = set(left_by).difference(left.columns) if len(check) != 0: raise KeyError(f'{check} not found in left columns') (result, _) = _groupby_and_merge(left_by, left, right, lambda x, y: _merger(x, y)) elif right_by is not None: if isinstance(right_by, str): right_by = [right_by] check = set(right_by).difference(right.columns) if len(check) != 0: raise KeyError(f'{check} not found in right columns') (result, _) = _groupby_and_merge(right_by, right, left, lambda x, y: _merger(y, x)) else: result = _merger(left, right) return result def merge_asof(left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None=None, left_on: IndexLabel | None=None, right_on: IndexLabel | None=None, left_index: bool=False, right_index: bool=False, by=None, left_by=None, right_by=None, suffixes: Suffixes=('_x', '_y'), tolerance: int | datetime.timedelta | None=None, allow_exact_matches: bool=True, direction: str='backward') -> DataFrame: op = _AsOfMerge(left, right, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, by=by, left_by=left_by, right_by=right_by, suffixes=suffixes, how='asof', tolerance=tolerance, allow_exact_matches=allow_exact_matches, direction=direction) return op.get_result() class _MergeOperation: _merge_type = 'merge' how: JoinHow | Literal['asof'] on: IndexLabel | None left_on: Sequence[Hashable | AnyArrayLike] right_on: Sequence[Hashable | AnyArrayLike] left_index: bool right_index: bool sort: bool suffixes: Suffixes indicator: str | bool validate: str | None join_names: list[Hashable] right_join_keys: list[ArrayLike] left_join_keys: list[ArrayLike] def __init__(self, left: DataFrame | Series, right: DataFrame | Series, how: JoinHow | Literal['asof']='inner', on: IndexLabel | AnyArrayLike | None=None, left_on: IndexLabel | AnyArrayLike | None=None, right_on: IndexLabel | AnyArrayLike | None=None, left_index: bool=False, right_index: bool=False, sort: bool=True, suffixes: Suffixes=('_x', '_y'), indicator: str | bool=False, validate: str | None=None) -> None: _left = _validate_operand(left) _right = _validate_operand(right) self.left = self.orig_left = _left self.right = self.orig_right = _right self.how = how self.on = com.maybe_make_list(on) self.suffixes = suffixes self.sort = sort or how == 'outer' self.left_index = left_index self.right_index = right_index self.indicator = indicator if not is_bool(left_index): raise ValueError(f'left_index parameter must be of type bool, not {type(left_index)}') if not is_bool(right_index): raise ValueError(f'right_index parameter must be of type bool, not {type(right_index)}') if _left.columns.nlevels != _right.columns.nlevels: msg = f'Not allowed to merge between different levels. ({_left.columns.nlevels} levels on the left, {_right.columns.nlevels} on the right)' raise MergeError(msg) merge_type = {'left', 'right', 'inner', 'outer', 'cross', 'asof'} if how not in merge_type: raise ValueError(f"'{how}' is not a valid Merge type: left, right, inner, outer, cross, asof") (self.left_on, self.right_on) = self._validate_left_right_on(left_on, right_on) (self.left_join_keys, self.right_join_keys, self.join_names, left_drop, right_drop) = self._get_merge_keys() if left_drop: self.left = self.left._drop_labels_or_levels(left_drop) if right_drop: self.right = self.right._drop_labels_or_levels(right_drop) self._maybe_require_matching_dtypes(self.left_join_keys, self.right_join_keys) self._validate_tolerance(self.left_join_keys) self._maybe_coerce_merge_keys() if validate is not None: self._validate_validate_kwd(validate) def _maybe_require_matching_dtypes(self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike]) -> None: pass def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None: pass @final def _reindex_and_concat(self, join_index: Index, left_indexer: npt.NDArray[np.intp] | None, right_indexer: npt.NDArray[np.intp] | None) -> DataFrame: left = self.left[:] right = self.right[:] (llabels, rlabels) = _items_overlap_with_suffix(self.left._info_axis, self.right._info_axis, self.suffixes) if left_indexer is not None and (not is_range_indexer(left_indexer, len(left))): lmgr = left._mgr.reindex_indexer(join_index, left_indexer, axis=1, only_slice=True, allow_dups=True, use_na_proxy=True) left = left._constructor_from_mgr(lmgr, axes=lmgr.axes) left.index = join_index if right_indexer is not None and (not is_range_indexer(right_indexer, len(right))): rmgr = right._mgr.reindex_indexer(join_index, right_indexer, axis=1, only_slice=True, allow_dups=True, use_na_proxy=True) right = right._constructor_from_mgr(rmgr, axes=rmgr.axes) right.index = join_index from pandas import concat left.columns = llabels right.columns = rlabels result = concat([left, right], axis=1) return result def get_result(self) -> DataFrame: if self.indicator: (self.left, self.right) = self._indicator_pre_merge(self.left, self.right) (join_index, left_indexer, right_indexer) = self._get_join_info() result = self._reindex_and_concat(join_index, left_indexer, right_indexer) result = result.__finalize__(self, method=self._merge_type) if self.indicator: result = self._indicator_post_merge(result) self._maybe_add_join_keys(result, left_indexer, right_indexer) self._maybe_restore_index_levels(result) return result.__finalize__(self, method='merge') @final @cache_readonly def _indicator_name(self) -> str | None: if isinstance(self.indicator, str): return self.indicator elif isinstance(self.indicator, bool): return '_merge' if self.indicator else None else: raise ValueError('indicator option can only accept boolean or string arguments') @final def _indicator_pre_merge(self, left: DataFrame, right: DataFrame) -> tuple[DataFrame, DataFrame]: columns = left.columns.union(right.columns) for i in ['_left_indicator', '_right_indicator']: if i in columns: raise ValueError(f'Cannot use `indicator=True` option when data contains a column named {i}') if self._indicator_name in columns: raise ValueError('Cannot use name of an existing column for indicator column') left = left.copy() right = right.copy() left['_left_indicator'] = 1 left['_left_indicator'] = left['_left_indicator'].astype('int8') right['_right_indicator'] = 2 right['_right_indicator'] = right['_right_indicator'].astype('int8') return (left, right) @final def _indicator_post_merge(self, result: DataFrame) -> DataFrame: result['_left_indicator'] = result['_left_indicator'].fillna(0) result['_right_indicator'] = result['_right_indicator'].fillna(0) result[self._indicator_name] = Categorical(result['_left_indicator'] + result['_right_indicator'], categories=[1, 2, 3]) result[self._indicator_name] = result[self._indicator_name].cat.rename_categories(['left_only', 'right_only', 'both']) result = result.drop(labels=['_left_indicator', '_right_indicator'], axis=1) return result @final def _maybe_restore_index_levels(self, result: DataFrame) -> None: names_to_restore = [] for (name, left_key, right_key) in zip(self.join_names, self.left_on, self.right_on): if self.orig_left._is_level_reference(left_key) and self.orig_right._is_level_reference(right_key) and (left_key == right_key) and (name not in result.index.names): names_to_restore.append(name) if names_to_restore: result.set_index(names_to_restore, inplace=True) @final def _maybe_add_join_keys(self, result: DataFrame, left_indexer: npt.NDArray[np.intp] | None, right_indexer: npt.NDArray[np.intp] | None) -> None: left_has_missing = None right_has_missing = None assert all((isinstance(x, _known) for x in self.left_join_keys)) keys = zip(self.join_names, self.left_on, self.right_on) for (i, (name, lname, rname)) in enumerate(keys): if not _should_fill(lname, rname): continue (take_left, take_right) = (None, None) if name in result: if left_indexer is not None or right_indexer is not None: if name in self.left: if left_has_missing is None: left_has_missing = False if left_indexer is None else (left_indexer == -1).any() if left_has_missing: take_right = self.right_join_keys[i] if result[name].dtype != self.left[name].dtype: take_left = self.left[name]._values elif name in self.right: if right_has_missing is None: right_has_missing = False if right_indexer is None else (right_indexer == -1).any() if right_has_missing: take_left = self.left_join_keys[i] if result[name].dtype != self.right[name].dtype: take_right = self.right[name]._values else: take_left = self.left_join_keys[i] take_right = self.right_join_keys[i] if take_left is not None or take_right is not None: if take_left is None: lvals = result[name]._values elif left_indexer is None: lvals = take_left else: take_left = extract_array(take_left, extract_numpy=True) lfill = na_value_for_dtype(take_left.dtype) lvals = algos.take_nd(take_left, left_indexer, fill_value=lfill) if take_right is None: rvals = result[name]._values elif right_indexer is None: rvals = take_right else: taker = extract_array(take_right, extract_numpy=True) rfill = na_value_for_dtype(taker.dtype) rvals = algos.take_nd(taker, right_indexer, fill_value=rfill) if left_indexer is not None and (left_indexer == -1).all(): key_col = Index(rvals) result_dtype = rvals.dtype elif right_indexer is not None and (right_indexer == -1).all(): key_col = Index(lvals) result_dtype = lvals.dtype else: key_col = Index(lvals) if left_indexer is not None: mask_left = left_indexer == -1 key_col = key_col.where(~mask_left, rvals) result_dtype = find_common_type([lvals.dtype, rvals.dtype]) if lvals.dtype.kind == 'M' and rvals.dtype.kind == 'M' and (result_dtype.kind == 'O'): result_dtype = key_col.dtype if result._is_label_reference(name): result[name] = result._constructor_sliced(key_col, dtype=result_dtype, index=result.index) elif result._is_level_reference(name): if isinstance(result.index, MultiIndex): key_col.name = name idx_list = [result.index.get_level_values(level_name) if level_name != name else key_col for level_name in result.index.names] result.set_index(idx_list, inplace=True) else: result.index = Index(key_col, name=name) else: result.insert(i, name or f'key_{i}', key_col) def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: assert self.how != 'asof' return get_join_indexers(self.left_join_keys, self.right_join_keys, sort=self.sort, how=self.how) @final def _get_join_info(self) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: left_ax = self.left.index right_ax = self.right.index if self.left_index and self.right_index and (self.how != 'asof'): (join_index, left_indexer, right_indexer) = left_ax.join(right_ax, how=self.how, return_indexers=True, sort=self.sort) elif self.right_index and self.how == 'left': (join_index, left_indexer, right_indexer) = _left_join_on_index(left_ax, right_ax, self.left_join_keys, sort=self.sort) elif self.left_index and self.how == 'right': (join_index, right_indexer, left_indexer) = _left_join_on_index(right_ax, left_ax, self.right_join_keys, sort=self.sort) else: (left_indexer, right_indexer) = self._get_join_indexers() if self.right_index: if len(self.left) > 0: join_index = self._create_join_index(left_ax, right_ax, left_indexer, how='right') elif right_indexer is None: join_index = right_ax.copy() else: join_index = right_ax.take(right_indexer) elif self.left_index: if self.how == 'asof': join_index = self._create_join_index(left_ax, right_ax, left_indexer, how='left') elif len(self.right) > 0: join_index = self._create_join_index(right_ax, left_ax, right_indexer, how='left') elif left_indexer is None: join_index = left_ax.copy() else: join_index = left_ax.take(left_indexer) else: n = len(left_ax) if left_indexer is None else len(left_indexer) join_index = default_index(n) return (join_index, left_indexer, right_indexer) @final def _create_join_index(self, index: Index, other_index: Index, indexer: npt.NDArray[np.intp] | None, how: JoinHow='left') -> Index: if self.how in (how, 'outer') and (not isinstance(other_index, MultiIndex)): mask = indexer == -1 if np.any(mask): fill_value = na_value_for_dtype(index.dtype, compat=False) index = index.append(Index([fill_value])) if indexer is None: return index.copy() return index.take(indexer) @final def _get_merge_keys(self) -> tuple[list[ArrayLike], list[ArrayLike], list[Hashable], list[Hashable], list[Hashable]]: left_keys: list[ArrayLike] = [] right_keys: list[ArrayLike] = [] join_names: list[Hashable] = [] right_drop: list[Hashable] = [] left_drop: list[Hashable] = [] (left, right) = (self.left, self.right) is_lkey = lambda x: isinstance(x, _known) and len(x) == len(left) is_rkey = lambda x: isinstance(x, _known) and len(x) == len(right) if _any(self.left_on) and _any(self.right_on): for (lk, rk) in zip(self.left_on, self.right_on): lk = extract_array(lk, extract_numpy=True) rk = extract_array(rk, extract_numpy=True) if is_lkey(lk): lk = cast(ArrayLike, lk) left_keys.append(lk) if is_rkey(rk): rk = cast(ArrayLike, rk) right_keys.append(rk) join_names.append(None) else: rk = cast(Hashable, rk) if rk is not None: right_keys.append(right._get_label_or_level_values(rk)) join_names.append(rk) else: right_keys.append(right.index._values) join_names.append(right.index.name) else: if not is_rkey(rk): rk = cast(Hashable, rk) if rk is not None: right_keys.append(right._get_label_or_level_values(rk)) else: right_keys.append(right.index._values) if lk is not None and lk == rk: right_drop.append(rk) else: rk = cast(ArrayLike, rk) right_keys.append(rk) if lk is not None: lk = cast(Hashable, lk) left_keys.append(left._get_label_or_level_values(lk)) join_names.append(lk) else: left_keys.append(left.index._values) join_names.append(left.index.name) elif _any(self.left_on): for k in self.left_on: if is_lkey(k): k = extract_array(k, extract_numpy=True) k = cast(ArrayLike, k) left_keys.append(k) join_names.append(None) else: k = cast(Hashable, k) left_keys.append(left._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.right.index, MultiIndex): right_keys = [lev._values.take(lev_codes) for (lev, lev_codes) in zip(self.right.index.levels, self.right.index.codes)] else: right_keys = [self.right.index._values] elif _any(self.right_on): for k in self.right_on: k = extract_array(k, extract_numpy=True) if is_rkey(k): k = cast(ArrayLike, k) right_keys.append(k) join_names.append(None) else: k = cast(Hashable, k) right_keys.append(right._get_label_or_level_values(k)) join_names.append(k) if isinstance(self.left.index, MultiIndex): left_keys = [lev._values.take(lev_codes) for (lev, lev_codes) in zip(self.left.index.levels, self.left.index.codes)] else: left_keys = [self.left.index._values] return (left_keys, right_keys, join_names, left_drop, right_drop) @final def _maybe_coerce_merge_keys(self) -> None: for (lk, rk, name) in zip(self.left_join_keys, self.right_join_keys, self.join_names): if len(lk) and (not len(rk)) or (not len(lk) and len(rk)): continue lk = extract_array(lk, extract_numpy=True) rk = extract_array(rk, extract_numpy=True) lk_is_cat = isinstance(lk.dtype, CategoricalDtype) rk_is_cat = isinstance(rk.dtype, CategoricalDtype) lk_is_object_or_string = is_object_dtype(lk.dtype) or is_string_dtype(lk.dtype) rk_is_object_or_string = is_object_dtype(rk.dtype) or is_string_dtype(rk.dtype) if lk_is_cat and rk_is_cat: lk = cast(Categorical, lk) rk = cast(Categorical, rk) if lk._categories_match_up_to_permutation(rk): continue elif lk_is_cat or rk_is_cat: pass elif lk.dtype == rk.dtype: continue msg = f"You are trying to merge on {lk.dtype} and {rk.dtype} columns for key '{name}'. If you wish to proceed you should use pd.concat" if is_numeric_dtype(lk.dtype) and is_numeric_dtype(rk.dtype): if lk.dtype.kind == rk.dtype.kind: continue if isinstance(lk.dtype, ExtensionDtype) and (not isinstance(rk.dtype, ExtensionDtype)): ct = find_common_type([lk.dtype, rk.dtype]) if isinstance(ct, ExtensionDtype): com_cls = ct.construct_array_type() rk = com_cls._from_sequence(rk, dtype=ct, copy=False) else: rk = rk.astype(ct) elif isinstance(rk.dtype, ExtensionDtype): ct = find_common_type([lk.dtype, rk.dtype]) if isinstance(ct, ExtensionDtype): com_cls = ct.construct_array_type() lk = com_cls._from_sequence(lk, dtype=ct, copy=False) else: lk = lk.astype(ct) if is_integer_dtype(rk.dtype) and is_float_dtype(lk.dtype): with np.errstate(invalid='ignore'): casted = lk.astype(rk.dtype) mask = ~np.isnan(lk) match = lk == casted if not match[mask].all(): warnings.warn('You are merging on int and float columns where the float values are not equal to their int representation.', UserWarning, stacklevel=find_stack_level()) continue if is_float_dtype(rk.dtype) and is_integer_dtype(lk.dtype): with np.errstate(invalid='ignore'): casted = rk.astype(lk.dtype) mask = ~np.isnan(rk) match = rk == casted if not match[mask].all(): warnings.warn('You are merging on int and float columns where the float values are not equal to their int representation.', UserWarning, stacklevel=find_stack_level()) continue if lib.infer_dtype(lk, skipna=False) == lib.infer_dtype(rk, skipna=False): continue elif lk_is_object_or_string and is_bool_dtype(rk.dtype) or (is_bool_dtype(lk.dtype) and rk_is_object_or_string): pass elif lk_is_object_or_string and is_numeric_dtype(rk.dtype) or (is_numeric_dtype(lk.dtype) and rk_is_object_or_string): inferred_left = lib.infer_dtype(lk, skipna=False) inferred_right = lib.infer_dtype(rk, skipna=False) bool_types = ['integer', 'mixed-integer', 'boolean', 'empty'] string_types = ['string', 'unicode', 'mixed', 'bytes', 'empty'] if inferred_left in bool_types and inferred_right in bool_types: pass elif inferred_left in string_types and inferred_right not in string_types or (inferred_right in string_types and inferred_left not in string_types): raise ValueError(msg) elif needs_i8_conversion(lk.dtype) and (not needs_i8_conversion(rk.dtype)): raise ValueError(msg) elif not needs_i8_conversion(lk.dtype) and needs_i8_conversion(rk.dtype): raise ValueError(msg) elif isinstance(lk.dtype, DatetimeTZDtype) and (not isinstance(rk.dtype, DatetimeTZDtype)): raise ValueError(msg) elif not isinstance(lk.dtype, DatetimeTZDtype) and isinstance(rk.dtype, DatetimeTZDtype): raise ValueError(msg) elif isinstance(lk.dtype, DatetimeTZDtype) and isinstance(rk.dtype, DatetimeTZDtype) or (lk.dtype.kind == 'M' and rk.dtype.kind == 'M'): continue elif lk.dtype.kind == 'M' and rk.dtype.kind == 'm': raise ValueError(msg) elif lk.dtype.kind == 'm' and rk.dtype.kind == 'M': raise ValueError(msg) elif is_object_dtype(lk.dtype) and is_object_dtype(rk.dtype): continue if name in self.left.columns: typ = cast(Categorical, lk).categories.dtype if lk_is_cat else object self.left = self.left.copy() self.left[name] = self.left[name].astype(typ) if name in self.right.columns: typ = cast(Categorical, rk).categories.dtype if rk_is_cat else object self.right = self.right.copy() self.right[name] = self.right[name].astype(typ) def _validate_left_right_on(self, left_on, right_on): left_on = com.maybe_make_list(left_on) right_on = com.maybe_make_list(right_on) if self.on is None and left_on is None and (right_on is None): if self.left_index and self.right_index: (left_on, right_on) = ((), ()) elif self.left_index: raise MergeError('Must pass right_on or right_index=True') elif self.right_index: raise MergeError('Must pass left_on or left_index=True') else: left_cols = self.left.columns right_cols = self.right.columns common_cols = left_cols.intersection(right_cols) if len(common_cols) == 0: raise MergeError(f'No common columns to perform merge on. Merge options: left_on={left_on}, right_on={right_on}, left_index={self.left_index}, right_index={self.right_index}') if not left_cols.join(common_cols, how='inner').is_unique or not right_cols.join(common_cols, how='inner').is_unique: raise MergeError(f'Data columns not unique: {common_cols!r}') left_on = right_on = common_cols elif self.on is not None: if left_on is not None or right_on is not None: raise MergeError('Can only pass argument "on" OR "left_on" and "right_on", not a combination of both.') if self.left_index or self.right_index: raise MergeError('Can only pass argument "on" OR "left_index" and "right_index", not a combination of both.') left_on = right_on = self.on elif left_on is not None: if self.left_index: raise MergeError('Can only pass argument "left_on" OR "left_index" not both.') if not self.right_index and right_on is None: raise MergeError('Must pass "right_on" OR "right_index".') n = len(left_on) if self.right_index: if len(left_on) != self.right.index.nlevels: raise ValueError('len(left_on) must equal the number of levels in the index of "right"') right_on = [None] * n elif right_on is not None: if self.right_index: raise MergeError('Can only pass argument "right_on" OR "right_index" not both.') if not self.left_index and left_on is None: raise MergeError('Must pass "left_on" OR "left_index".') n = len(right_on) if self.left_index: if len(right_on) != self.left.index.nlevels: raise ValueError('len(right_on) must equal the number of levels in the index of "left"') left_on = [None] * n if len(right_on) != len(left_on): raise ValueError('len(right_on) must equal len(left_on)') return (left_on, right_on) @final def _validate_validate_kwd(self, validate: str) -> None: if self.left_index: left_unique = self.orig_left.index.is_unique else: left_unique = MultiIndex.from_arrays(self.left_join_keys).is_unique if self.right_index: right_unique = self.orig_right.index.is_unique else: right_unique = MultiIndex.from_arrays(self.right_join_keys).is_unique if validate in ['one_to_one', '1:1']: if not left_unique and (not right_unique): raise MergeError('Merge keys are not unique in either left or right dataset; not a one-to-one merge') if not left_unique: raise MergeError('Merge keys are not unique in left dataset; not a one-to-one merge') if not right_unique: raise MergeError('Merge keys are not unique in right dataset; not a one-to-one merge') elif validate in ['one_to_many', '1:m']: if not left_unique: raise MergeError('Merge keys are not unique in left dataset; not a one-to-many merge') elif validate in ['many_to_one', 'm:1']: if not right_unique: raise MergeError('Merge keys are not unique in right dataset; not a many-to-one merge') elif validate in ['many_to_many', 'm:m']: pass else: raise ValueError(f'"{validate}" is not a valid argument. Valid arguments are:\n- "1:1"\n- "1:m"\n- "m:1"\n- "m:m"\n- "one_to_one"\n- "one_to_many"\n- "many_to_one"\n- "many_to_many"') def get_join_indexers(left_keys: list[ArrayLike], right_keys: list[ArrayLike], sort: bool=False, how: JoinHow='inner') -> tuple[npt.NDArray[np.intp] | None, npt.NDArray[np.intp] | None]: assert len(left_keys) == len(right_keys), 'left_keys and right_keys must be the same length' left_n = len(left_keys[0]) right_n = len(right_keys[0]) if left_n == 0: if how in ['left', 'inner']: return _get_empty_indexer() elif not sort and how in ['right', 'outer']: return _get_no_sort_one_missing_indexer(right_n, True) elif right_n == 0: if how in ['right', 'inner']: return _get_empty_indexer() elif not sort and how in ['left', 'outer']: return _get_no_sort_one_missing_indexer(left_n, False) lkey: ArrayLike rkey: ArrayLike if len(left_keys) > 1: mapped = (_factorize_keys(left_keys[n], right_keys[n], sort=sort) for n in range(len(left_keys))) zipped = zip(*mapped) (llab, rlab, shape) = (list(x) for x in zipped) (lkey, rkey) = _get_join_keys(llab, rlab, tuple(shape), sort) else: lkey = left_keys[0] rkey = right_keys[0] left = Index(lkey) right = Index(rkey) if left.is_monotonic_increasing and right.is_monotonic_increasing and (left.is_unique or right.is_unique): (_, lidx, ridx) = left.join(right, how=how, return_indexers=True, sort=sort) else: (lidx, ridx) = get_join_indexers_non_unique(left._values, right._values, sort, how) if lidx is not None and is_range_indexer(lidx, len(left)): lidx = None if ridx is not None and is_range_indexer(ridx, len(right)): ridx = None return (lidx, ridx) def get_join_indexers_non_unique(left: ArrayLike, right: ArrayLike, sort: bool=False, how: JoinHow='inner') -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: (lkey, rkey, count) = _factorize_keys(left, right, sort=sort, how=how) if count == -1: return (lkey, rkey) if how == 'left': (lidx, ridx) = libjoin.left_outer_join(lkey, rkey, count, sort=sort) elif how == 'right': (ridx, lidx) = libjoin.left_outer_join(rkey, lkey, count, sort=sort) elif how == 'inner': (lidx, ridx) = libjoin.inner_join(lkey, rkey, count, sort=sort) elif how == 'outer': (lidx, ridx) = libjoin.full_outer_join(lkey, rkey, count) return (lidx, ridx) def restore_dropped_levels_multijoin(left: MultiIndex, right: MultiIndex, dropped_level_names, join_index: Index, lindexer: npt.NDArray[np.intp], rindexer: npt.NDArray[np.intp]) -> tuple[FrozenList, FrozenList, FrozenList]: def _convert_to_multiindex(index: Index) -> MultiIndex: if isinstance(index, MultiIndex): return index else: return MultiIndex.from_arrays([index._values], names=[index.name]) join_index = _convert_to_multiindex(join_index) join_levels = join_index.levels join_codes = join_index.codes join_names = join_index.names for dropped_level_name in dropped_level_names: if dropped_level_name in left.names: idx = left indexer = lindexer else: idx = right indexer = rindexer name_idx = idx.names.index(dropped_level_name) restore_levels = idx.levels[name_idx] codes = idx.codes[name_idx] if indexer is None: restore_codes = codes else: restore_codes = algos.take_nd(codes, indexer, fill_value=-1) join_levels = join_levels + [restore_levels] join_codes = join_codes + [restore_codes] join_names = join_names + [dropped_level_name] return (join_levels, join_codes, join_names) class _OrderedMerge(_MergeOperation): _merge_type = 'ordered_merge' def __init__(self, left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None=None, left_on: IndexLabel | None=None, right_on: IndexLabel | None=None, left_index: bool=False, right_index: bool=False, suffixes: Suffixes=('_x', '_y'), fill_method: str | None=None, how: JoinHow | Literal['asof']='outer') -> None: self.fill_method = fill_method _MergeOperation.__init__(self, left, right, on=on, left_on=left_on, left_index=left_index, right_index=right_index, right_on=right_on, how=how, suffixes=suffixes, sort=True) def get_result(self) -> DataFrame: (join_index, left_indexer, right_indexer) = self._get_join_info() left_join_indexer: npt.NDArray[np.intp] | None right_join_indexer: npt.NDArray[np.intp] | None if self.fill_method == 'ffill': if left_indexer is None: left_join_indexer = None else: left_join_indexer = libjoin.ffill_indexer(left_indexer) if right_indexer is None: right_join_indexer = None else: right_join_indexer = libjoin.ffill_indexer(right_indexer) elif self.fill_method is None: left_join_indexer = left_indexer right_join_indexer = right_indexer else: raise ValueError("fill_method must be 'ffill' or None") result = self._reindex_and_concat(join_index, left_join_indexer, right_join_indexer) self._maybe_add_join_keys(result, left_indexer, right_indexer) return result def _asof_by_function(direction: str): name = f'asof_join_{direction}_on_X_by_Y' return getattr(libjoin, name, None) class _AsOfMerge(_OrderedMerge): _merge_type = 'asof_merge' def __init__(self, left: DataFrame | Series, right: DataFrame | Series, on: IndexLabel | None=None, left_on: IndexLabel | None=None, right_on: IndexLabel | None=None, left_index: bool=False, right_index: bool=False, by=None, left_by=None, right_by=None, suffixes: Suffixes=('_x', '_y'), how: Literal['asof']='asof', tolerance=None, allow_exact_matches: bool=True, direction: str='backward') -> None: self.by = by self.left_by = left_by self.right_by = right_by self.tolerance = tolerance self.allow_exact_matches = allow_exact_matches self.direction = direction if self.direction not in ['backward', 'forward', 'nearest']: raise MergeError(f'direction invalid: {self.direction}') if not is_bool(self.allow_exact_matches): msg = f'allow_exact_matches must be boolean, passed {self.allow_exact_matches}' raise MergeError(msg) _OrderedMerge.__init__(self, left, right, on=on, left_on=left_on, right_on=right_on, left_index=left_index, right_index=right_index, how=how, suffixes=suffixes, fill_method=None) def _validate_left_right_on(self, left_on, right_on): (left_on, right_on) = super()._validate_left_right_on(left_on, right_on) if len(left_on) != 1 and (not self.left_index): raise MergeError('can only asof on a key for left') if len(right_on) != 1 and (not self.right_index): raise MergeError('can only asof on a key for right') if self.left_index and isinstance(self.left.index, MultiIndex): raise MergeError('left can only have one index') if self.right_index and isinstance(self.right.index, MultiIndex): raise MergeError('right can only have one index') if self.by is not None: if self.left_by is not None or self.right_by is not None: raise MergeError('Can only pass by OR left_by and right_by') self.left_by = self.right_by = self.by if self.left_by is None and self.right_by is not None: raise MergeError('missing left_by') if self.left_by is not None and self.right_by is None: raise MergeError('missing right_by') if not self.left_index: left_on_0 = left_on[0] if isinstance(left_on_0, _known): lo_dtype = left_on_0.dtype else: lo_dtype = self.left._get_label_or_level_values(left_on_0).dtype if left_on_0 in self.left.columns else self.left.index.get_level_values(left_on_0) else: lo_dtype = self.left.index.dtype if not self.right_index: right_on_0 = right_on[0] if isinstance(right_on_0, _known): ro_dtype = right_on_0.dtype else: ro_dtype = self.right._get_label_or_level_values(right_on_0).dtype if right_on_0 in self.right.columns else self.right.index.get_level_values(right_on_0) else: ro_dtype = self.right.index.dtype if is_object_dtype(lo_dtype) or is_object_dtype(ro_dtype) or is_string_dtype(lo_dtype) or is_string_dtype(ro_dtype): raise MergeError(f'Incompatible merge dtype, {lo_dtype!r} and {ro_dtype!r}, both sides must have numeric dtype') if self.left_by is not None: if not is_list_like(self.left_by): self.left_by = [self.left_by] if not is_list_like(self.right_by): self.right_by = [self.right_by] if len(self.left_by) != len(self.right_by): raise MergeError('left_by and right_by must be the same length') left_on = self.left_by + list(left_on) right_on = self.right_by + list(right_on) return (left_on, right_on) def _maybe_require_matching_dtypes(self, left_join_keys: list[ArrayLike], right_join_keys: list[ArrayLike]) -> None: def _check_dtype_match(left: ArrayLike, right: ArrayLike, i: int) -> None: if left.dtype != right.dtype: if isinstance(left.dtype, CategoricalDtype) and isinstance(right.dtype, CategoricalDtype): msg = f'incompatible merge keys [{i}] {left.dtype!r} and {right.dtype!r}, both sides category, but not equal ones' else: msg = f'incompatible merge keys [{i}] {left.dtype!r} and {right.dtype!r}, must be the same type' raise MergeError(msg) for (i, (lk, rk)) in enumerate(zip(left_join_keys, right_join_keys)): _check_dtype_match(lk, rk, i) if self.left_index: lt = self.left.index._values else: lt = left_join_keys[-1] if self.right_index: rt = self.right.index._values else: rt = right_join_keys[-1] _check_dtype_match(lt, rt, 0) def _validate_tolerance(self, left_join_keys: list[ArrayLike]) -> None: if self.tolerance is not None: if self.left_index: lt = self.left.index._values else: lt = left_join_keys[-1] msg = f'incompatible tolerance {self.tolerance}, must be compat with type {lt.dtype!r}' if needs_i8_conversion(lt.dtype) or (isinstance(lt, ArrowExtensionArray) and lt.dtype.kind in 'mM'): if not isinstance(self.tolerance, datetime.timedelta): raise MergeError(msg) if self.tolerance < Timedelta(0): raise MergeError('tolerance must be positive') elif is_integer_dtype(lt.dtype): if not is_integer(self.tolerance): raise MergeError(msg) if self.tolerance < 0: raise MergeError('tolerance must be positive') elif is_float_dtype(lt.dtype): if not is_number(self.tolerance): raise MergeError(msg) if self.tolerance < 0: raise MergeError('tolerance must be positive') else: raise MergeError('key must be integer, timestamp or float') def _convert_values_for_libjoin(self, values: AnyArrayLike, side: str) -> np.ndarray: if not Index(values).is_monotonic_increasing: if isna(values).any(): raise ValueError(f'Merge keys contain null values on {side} side') raise ValueError(f'{side} keys must be sorted') if isinstance(values, ArrowExtensionArray): values = values._maybe_convert_datelike_array() if needs_i8_conversion(values.dtype): values = values.view('i8') elif isinstance(values, BaseMaskedArray): values = values._data elif isinstance(values, ExtensionArray): values = values.to_numpy() return values def _get_join_indexers(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: left_values = self.left.index._values if self.left_index else self.left_join_keys[-1] right_values = self.right.index._values if self.right_index else self.right_join_keys[-1] assert left_values.dtype == right_values.dtype tolerance = self.tolerance if tolerance is not None: if needs_i8_conversion(left_values.dtype) or (isinstance(left_values, ArrowExtensionArray) and left_values.dtype.kind in 'mM'): tolerance = Timedelta(tolerance) if left_values.dtype.kind in 'mM': if isinstance(left_values, ArrowExtensionArray): unit = left_values.dtype.pyarrow_dtype.unit else: unit = ensure_wrapped_if_datetimelike(left_values).unit tolerance = tolerance.as_unit(unit) tolerance = tolerance._value left_values = self._convert_values_for_libjoin(left_values, 'left') right_values = self._convert_values_for_libjoin(right_values, 'right') if self.left_by is not None: if self.left_index and self.right_index: left_join_keys = self.left_join_keys right_join_keys = self.right_join_keys else: left_join_keys = self.left_join_keys[0:-1] right_join_keys = self.right_join_keys[0:-1] mapped = [_factorize_keys(left_join_keys[n], right_join_keys[n], sort=False) for n in range(len(left_join_keys))] if len(left_join_keys) == 1: left_by_values = mapped[0][0] right_by_values = mapped[0][1] else: arrs = [np.concatenate(m[:2]) for m in mapped] shape = tuple((m[2] for m in mapped)) group_index = get_group_index(arrs, shape=shape, sort=False, xnull=False) left_len = len(left_join_keys[0]) left_by_values = group_index[:left_len] right_by_values = group_index[left_len:] left_by_values = ensure_int64(left_by_values) right_by_values = ensure_int64(right_by_values) func = _asof_by_function(self.direction) return func(left_values, right_values, left_by_values, right_by_values, self.allow_exact_matches, tolerance) else: func = _asof_by_function(self.direction) return func(left_values, right_values, None, None, self.allow_exact_matches, tolerance, False) def _get_multiindex_indexer(join_keys: list[ArrayLike], index: MultiIndex, sort: bool) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: mapped = (_factorize_keys(index.levels[n]._values, join_keys[n], sort=sort) for n in range(index.nlevels)) zipped = zip(*mapped) (rcodes, lcodes, shape) = (list(x) for x in zipped) if sort: rcodes = list(map(np.take, rcodes, index.codes)) else: i8copy = lambda a: a.astype('i8', subok=False) rcodes = list(map(i8copy, index.codes)) for (i, join_key) in enumerate(join_keys): mask = index.codes[i] == -1 if mask.any(): a = join_key[lcodes[i] == shape[i] - 1] if a.size == 0 or not a[0] != a[0]: shape[i] += 1 rcodes[i][mask] = shape[i] - 1 (lkey, rkey) = _get_join_keys(lcodes, rcodes, tuple(shape), sort) return (lkey, rkey) def _get_empty_indexer() -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: return (np.array([], dtype=np.intp), np.array([], dtype=np.intp)) def _get_no_sort_one_missing_indexer(n: int, left_missing: bool) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: idx = np.arange(n, dtype=np.intp) idx_missing = np.full(shape=n, fill_value=-1, dtype=np.intp) if left_missing: return (idx_missing, idx) return (idx, idx_missing) def _left_join_on_index(left_ax: Index, right_ax: Index, join_keys: list[ArrayLike], sort: bool=False) -> tuple[Index, npt.NDArray[np.intp] | None, npt.NDArray[np.intp]]: if isinstance(right_ax, MultiIndex): (lkey, rkey) = _get_multiindex_indexer(join_keys, right_ax, sort=sort) else: lkey = join_keys[0] rkey = right_ax._values (left_key, right_key, count) = _factorize_keys(lkey, rkey, sort=sort) (left_indexer, right_indexer) = libjoin.left_outer_join(left_key, right_key, count, sort=sort) if sort or len(left_ax) != len(left_indexer): join_index = left_ax.take(left_indexer) return (join_index, left_indexer, right_indexer) return (left_ax, None, right_indexer) def _factorize_keys(lk: ArrayLike, rk: ArrayLike, sort: bool=True, how: str | None=None) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp], int]: if isinstance(lk.dtype, DatetimeTZDtype) and isinstance(rk.dtype, DatetimeTZDtype) or (lib.is_np_dtype(lk.dtype, 'M') and lib.is_np_dtype(rk.dtype, 'M')): (lk, rk) = cast('DatetimeArray', lk)._ensure_matching_resos(rk) lk = cast('DatetimeArray', lk)._ndarray rk = cast('DatetimeArray', rk)._ndarray elif isinstance(lk.dtype, CategoricalDtype) and isinstance(rk.dtype, CategoricalDtype) and (lk.dtype == rk.dtype): assert isinstance(lk, Categorical) assert isinstance(rk, Categorical) rk = lk._encode_with_my_categories(rk) lk = ensure_int64(lk.codes) rk = ensure_int64(rk.codes) elif isinstance(lk, ExtensionArray) and lk.dtype == rk.dtype: if isinstance(lk.dtype, ArrowDtype) and is_string_dtype(lk.dtype) or (isinstance(lk.dtype, StringDtype) and lk.dtype.storage == 'pyarrow'): import pyarrow as pa import pyarrow.compute as pc len_lk = len(lk) lk = lk._pa_array rk = rk._pa_array dc = pa.chunked_array(lk.chunks + rk.chunks).combine_chunks().dictionary_encode() (llab, rlab, count) = (pc.fill_null(dc.indices[slice(len_lk)], -1).to_numpy().astype(np.intp, copy=False), pc.fill_null(dc.indices[slice(len_lk, None)], -1).to_numpy().astype(np.intp, copy=False), len(dc.dictionary)) if sort: uniques = dc.dictionary.to_numpy(zero_copy_only=False) (llab, rlab) = _sort_labels(uniques, llab, rlab) if dc.null_count > 0: lmask = llab == -1 lany = lmask.any() rmask = rlab == -1 rany = rmask.any() if lany: np.putmask(llab, lmask, count) if rany: np.putmask(rlab, rmask, count) count += 1 return (llab, rlab, count) if not isinstance(lk, BaseMaskedArray) and (not (isinstance(lk.dtype, ArrowDtype) and (is_numeric_dtype(lk.dtype.numpy_dtype) or (is_string_dtype(lk.dtype) and (not sort))))): (lk, _) = lk._values_for_factorize() (rk, _) = rk._values_for_factorize() if needs_i8_conversion(lk.dtype) and lk.dtype == rk.dtype: lk = np.asarray(lk, dtype=np.int64) rk = np.asarray(rk, dtype=np.int64) (klass, lk, rk) = _convert_arrays_and_get_rizer_klass(lk, rk) rizer = klass(max(len(lk), len(rk)), uses_mask=isinstance(rk, (BaseMaskedArray, ArrowExtensionArray))) if isinstance(lk, BaseMaskedArray): assert isinstance(rk, BaseMaskedArray) (lk_data, lk_mask) = (lk._data, lk._mask) (rk_data, rk_mask) = (rk._data, rk._mask) elif isinstance(lk, ArrowExtensionArray): assert isinstance(rk, ArrowExtensionArray) lk_data = lk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype) rk_data = rk.to_numpy(na_value=1, dtype=lk.dtype.numpy_dtype) (lk_mask, rk_mask) = (lk.isna(), rk.isna()) else: (lk_data, rk_data) = (lk, rk) (lk_mask, rk_mask) = (None, None) hash_join_available = how == 'inner' and (not sort) and (lk.dtype.kind in 'iufb') if hash_join_available: rlab = rizer.factorize(rk_data, mask=rk_mask) if rizer.get_count() == len(rlab): (ridx, lidx) = rizer.hash_inner_join(lk_data, lk_mask) return (lidx, ridx, -1) else: llab = rizer.factorize(lk_data, mask=lk_mask) else: llab = rizer.factorize(lk_data, mask=lk_mask) rlab = rizer.factorize(rk_data, mask=rk_mask) assert llab.dtype == np.dtype(np.intp), llab.dtype assert rlab.dtype == np.dtype(np.intp), rlab.dtype count = rizer.get_count() if sort: uniques = rizer.uniques.to_array() (llab, rlab) = _sort_labels(uniques, llab, rlab) lmask = llab == -1 lany = lmask.any() rmask = rlab == -1 rany = rmask.any() if lany or rany: if lany: np.putmask(llab, lmask, count) if rany: np.putmask(rlab, rmask, count) count += 1 return (llab, rlab, count) def _convert_arrays_and_get_rizer_klass(lk: ArrayLike, rk: ArrayLike) -> tuple[type[libhashtable.Factorizer], ArrayLike, ArrayLike]: klass: type[libhashtable.Factorizer] if is_numeric_dtype(lk.dtype): if lk.dtype != rk.dtype: dtype = find_common_type([lk.dtype, rk.dtype]) if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() if not isinstance(lk, ExtensionArray): lk = cls._from_sequence(lk, dtype=dtype, copy=False) else: lk = lk.astype(dtype, copy=False) if not isinstance(rk, ExtensionArray): rk = cls._from_sequence(rk, dtype=dtype, copy=False) else: rk = rk.astype(dtype, copy=False) else: lk = lk.astype(dtype, copy=False) rk = rk.astype(dtype, copy=False) if isinstance(lk, BaseMaskedArray): klass = _factorizers[lk.dtype.type] elif isinstance(lk.dtype, ArrowDtype): klass = _factorizers[lk.dtype.numpy_dtype.type] else: klass = _factorizers[lk.dtype.type] else: klass = libhashtable.ObjectFactorizer lk = ensure_object(lk) rk = ensure_object(rk) return (klass, lk, rk) def _sort_labels(uniques: np.ndarray, left: npt.NDArray[np.intp], right: npt.NDArray[np.intp]) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: llength = len(left) labels = np.concatenate([left, right]) (_, new_labels) = algos.safe_sort(uniques, labels, use_na_sentinel=True) (new_left, new_right) = (new_labels[:llength], new_labels[llength:]) return (new_left, new_right) def _get_join_keys(llab: list[npt.NDArray[np.int64 | np.intp]], rlab: list[npt.NDArray[np.int64 | np.intp]], shape: Shape, sort: bool) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: nlev = next((lev for lev in range(len(shape), 0, -1) if not is_int64_overflow_possible(shape[:lev]))) stride = np.prod(shape[1:nlev], dtype='i8') lkey = stride * llab[0].astype('i8', subok=False, copy=False) rkey = stride * rlab[0].astype('i8', subok=False, copy=False) for i in range(1, nlev): with np.errstate(divide='ignore'): stride //= shape[i] lkey += llab[i] * stride rkey += rlab[i] * stride if nlev == len(shape): return (lkey, rkey) (lkey, rkey, count) = _factorize_keys(lkey, rkey, sort=sort) llab = [lkey] + llab[nlev:] rlab = [rkey] + rlab[nlev:] shape = (count,) + shape[nlev:] return _get_join_keys(llab, rlab, shape, sort) def _should_fill(lname, rname) -> bool: if not isinstance(lname, str) or not isinstance(rname, str): return True return lname == rname def _any(x) -> bool: return x is not None and com.any_not_none(*x) def _validate_operand(obj: DataFrame | Series) -> DataFrame: if isinstance(obj, ABCDataFrame): return obj elif isinstance(obj, ABCSeries): if obj.name is None: raise ValueError('Cannot merge a Series without a name') return obj.to_frame() else: raise TypeError(f'Can only merge Series or DataFrame objects, a {type(obj)} was passed') def _items_overlap_with_suffix(left: Index, right: Index, suffixes: Suffixes) -> tuple[Index, Index]: if not is_list_like(suffixes, allow_sets=False) or isinstance(suffixes, dict): raise TypeError(f"Passing 'suffixes' as a {type(suffixes)}, is not supported. Provide 'suffixes' as a tuple instead.") to_rename = left.intersection(right) if len(to_rename) == 0: return (left, right) (lsuffix, rsuffix) = suffixes if not lsuffix and (not rsuffix): raise ValueError(f'columns overlap but no suffix specified: {to_rename}') def renamer(x, suffix: str | None): if x in to_rename and suffix is not None: return f'{x}{suffix}' return x lrenamer = partial(renamer, suffix=lsuffix) rrenamer = partial(renamer, suffix=rsuffix) llabels = left._transform_index(lrenamer) rlabels = right._transform_index(rrenamer) dups = [] if not llabels.is_unique: dups = llabels[llabels.duplicated() & ~left.duplicated()].tolist() if not rlabels.is_unique: dups.extend(rlabels[rlabels.duplicated() & ~right.duplicated()].tolist()) if dups: raise MergeError(f"Passing 'suffixes' which cause duplicate columns {set(dups)} is not allowed.") return (llabels, rlabels) # File: pandas-main/pandas/core/reshape/pivot.py from __future__ import annotations import itertools from typing import TYPE_CHECKING, Literal, cast import numpy as np from pandas._libs import lib from pandas.core.dtypes.cast import maybe_downcast_to_dtype from pandas.core.dtypes.common import is_list_like, is_nested_list_like, is_scalar from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries import pandas.core.common as com from pandas.core.groupby import Grouper from pandas.core.indexes.api import Index, MultiIndex, get_objs_combined_axis from pandas.core.reshape.concat import concat from pandas.core.series import Series if TYPE_CHECKING: from collections.abc import Callable, Hashable from pandas._typing import AggFuncType, AggFuncTypeBase, AggFuncTypeDict, IndexLabel, SequenceNotStr from pandas import DataFrame def pivot_table(data: DataFrame, values=None, index=None, columns=None, aggfunc: AggFuncType='mean', fill_value=None, margins: bool=False, dropna: bool=True, margins_name: Hashable='All', observed: bool=True, sort: bool=True, **kwargs) -> DataFrame: index = _convert_by(index) columns = _convert_by(columns) if isinstance(aggfunc, list): pieces: list[DataFrame] = [] keys = [] for func in aggfunc: _table = __internal_pivot_table(data, values=values, index=index, columns=columns, fill_value=fill_value, aggfunc=func, margins=margins, dropna=dropna, margins_name=margins_name, observed=observed, sort=sort, kwargs=kwargs) pieces.append(_table) keys.append(getattr(func, '__name__', func)) table = concat(pieces, keys=keys, axis=1) return table.__finalize__(data, method='pivot_table') table = __internal_pivot_table(data, values, index, columns, aggfunc, fill_value, margins, dropna, margins_name, observed, sort, kwargs) return table.__finalize__(data, method='pivot_table') def __internal_pivot_table(data: DataFrame, values, index, columns, aggfunc: AggFuncTypeBase | AggFuncTypeDict, fill_value, margins: bool, dropna: bool, margins_name: Hashable, observed: bool, sort: bool, kwargs) -> DataFrame: keys = index + columns values_passed = values is not None if values_passed: if is_list_like(values): values_multi = True values = list(values) else: values_multi = False values = [values] for i in values: if i not in data: raise KeyError(i) to_filter = [] for x in keys + values: if isinstance(x, Grouper): x = x.key try: if x in data: to_filter.append(x) except TypeError: pass if len(to_filter) < len(data.columns): data = data[to_filter] else: values = data.columns for key in keys: try: values = values.drop(key) except (TypeError, ValueError, KeyError): pass values = list(values) grouped = data.groupby(keys, observed=observed, sort=sort, dropna=dropna) agged = grouped.agg(aggfunc, **kwargs) if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns): agged = agged.dropna(how='all') table = agged if table.index.nlevels > 1 and index: index_names = agged.index.names[:len(index)] to_unstack = [] for i in range(len(index), len(keys)): name = agged.index.names[i] if name is None or name in index_names: to_unstack.append(i) else: to_unstack.append(name) table = agged.unstack(to_unstack, fill_value=fill_value) if not dropna: if isinstance(table.index, MultiIndex): m = MultiIndex.from_product(table.index.levels, names=table.index.names) table = table.reindex(m, axis=0, fill_value=fill_value) if isinstance(table.columns, MultiIndex): m = MultiIndex.from_product(table.columns.levels, names=table.columns.names) table = table.reindex(m, axis=1, fill_value=fill_value) if sort is True and isinstance(table, ABCDataFrame): table = table.sort_index(axis=1) if fill_value is not None: table = table.fillna(fill_value) if aggfunc is len and (not observed) and lib.is_integer(fill_value): table = table.astype(np.int64) if margins: if dropna: data = data[data.notna().all(axis=1)] table = _add_margins(table, data, values, rows=index, cols=columns, aggfunc=aggfunc, kwargs=kwargs, observed=dropna, margins_name=margins_name, fill_value=fill_value) if values_passed and (not values_multi) and (table.columns.nlevels > 1): table.columns = table.columns.droplevel(0) if len(index) == 0 and len(columns) > 0: table = table.T if isinstance(table, ABCDataFrame) and dropna: table = table.dropna(how='all', axis=1) return table def _add_margins(table: DataFrame | Series, data: DataFrame, values, rows, cols, aggfunc, kwargs, observed: bool, margins_name: Hashable='All', fill_value=None): if not isinstance(margins_name, str): raise ValueError('margins_name argument must be a string') msg = f'Conflicting name "{margins_name}" in margins' for level in table.index.names: if margins_name in table.index.get_level_values(level): raise ValueError(msg) grand_margin = _compute_grand_margin(data, values, aggfunc, kwargs, margins_name) if table.ndim == 2: for level in table.columns.names[1:]: if margins_name in table.columns.get_level_values(level): raise ValueError(msg) key: str | tuple[str, ...] if len(rows) > 1: key = (margins_name,) + ('',) * (len(rows) - 1) else: key = margins_name if not values and isinstance(table, ABCSeries): return table._append(table._constructor({key: grand_margin[margins_name]})) elif values: marginal_result_set = _generate_marginal_results(table, data, values, rows, cols, aggfunc, kwargs, observed, margins_name) if not isinstance(marginal_result_set, tuple): return marginal_result_set (result, margin_keys, row_margin) = marginal_result_set else: assert isinstance(table, ABCDataFrame) marginal_result_set = _generate_marginal_results_without_values(table, data, rows, cols, aggfunc, kwargs, observed, margins_name) if not isinstance(marginal_result_set, tuple): return marginal_result_set (result, margin_keys, row_margin) = marginal_result_set row_margin = row_margin.reindex(result.columns, fill_value=fill_value) for k in margin_keys: if isinstance(k, str): row_margin[k] = grand_margin[k] else: row_margin[k] = grand_margin[k[0]] from pandas import DataFrame margin_dummy = DataFrame(row_margin, columns=Index([key])).T row_names = result.index.names for dtype in set(result.dtypes): if isinstance(dtype, ExtensionDtype): continue cols = result.select_dtypes([dtype]).columns margin_dummy[cols] = margin_dummy[cols].apply(maybe_downcast_to_dtype, args=(dtype,)) result = result._append(margin_dummy) result.index.names = row_names return result def _compute_grand_margin(data: DataFrame, values, aggfunc, kwargs, margins_name: Hashable='All'): if values: grand_margin = {} for (k, v) in data[values].items(): try: if isinstance(aggfunc, str): grand_margin[k] = getattr(v, aggfunc)(**kwargs) elif isinstance(aggfunc, dict): if isinstance(aggfunc[k], str): grand_margin[k] = getattr(v, aggfunc[k])(**kwargs) else: grand_margin[k] = aggfunc[k](v, **kwargs) else: grand_margin[k] = aggfunc(v, **kwargs) except TypeError: pass return grand_margin else: return {margins_name: aggfunc(data.index, **kwargs)} def _generate_marginal_results(table, data: DataFrame, values, rows, cols, aggfunc, kwargs, observed: bool, margins_name: Hashable='All'): margin_keys: list | Index if len(cols) > 0: table_pieces = [] margin_keys = [] def _all_key(key): return (key, margins_name) + ('',) * (len(cols) - 1) if len(rows) > 0: margin = data[rows + values].groupby(rows, observed=observed).agg(aggfunc, **kwargs) cat_axis = 1 for (key, piece) in table.T.groupby(level=0, observed=observed): piece = piece.T all_key = _all_key(key) piece[all_key] = margin[key] table_pieces.append(piece) margin_keys.append(all_key) else: margin = data[cols[:1] + values].groupby(cols[:1], observed=observed).agg(aggfunc, **kwargs).T cat_axis = 0 for (key, piece) in table.groupby(level=0, observed=observed): if len(cols) > 1: all_key = _all_key(key) else: all_key = margins_name table_pieces.append(piece) transformed_piece = margin[key].to_frame().T if isinstance(piece.index, MultiIndex): transformed_piece.index = MultiIndex.from_tuples([all_key], names=piece.index.names + [None]) else: transformed_piece.index = Index([all_key], name=piece.index.name) table_pieces.append(transformed_piece) margin_keys.append(all_key) if not table_pieces: return table else: result = concat(table_pieces, axis=cat_axis) if len(rows) == 0: return result else: result = table margin_keys = table.columns if len(cols) > 0: row_margin = data[cols + values].groupby(cols, observed=observed).agg(aggfunc, **kwargs) row_margin = row_margin.stack() new_order_indices = itertools.chain([len(cols)], range(len(cols))) new_order_names = [row_margin.index.names[i] for i in new_order_indices] row_margin.index = row_margin.index.reorder_levels(new_order_names) else: row_margin = data._constructor_sliced(np.nan, index=result.columns) return (result, margin_keys, row_margin) def _generate_marginal_results_without_values(table: DataFrame, data: DataFrame, rows, cols, aggfunc, kwargs, observed: bool, margins_name: Hashable='All'): margin_keys: list | Index if len(cols) > 0: margin_keys = [] def _all_key(): if len(cols) == 1: return margins_name return (margins_name,) + ('',) * (len(cols) - 1) if len(rows) > 0: margin = data.groupby(rows, observed=observed)[rows].apply(aggfunc, **kwargs) all_key = _all_key() table[all_key] = margin result = table margin_keys.append(all_key) else: margin = data.groupby(level=0, observed=observed).apply(aggfunc, **kwargs) all_key = _all_key() table[all_key] = margin result = table margin_keys.append(all_key) return result else: result = table margin_keys = table.columns if len(cols): row_margin = data.groupby(cols, observed=observed)[cols].apply(aggfunc, **kwargs) else: row_margin = Series(np.nan, index=result.columns) return (result, margin_keys, row_margin) def _convert_by(by): if by is None: by = [] elif is_scalar(by) or isinstance(by, (np.ndarray, Index, ABCSeries, Grouper)) or callable(by): by = [by] else: by = list(by) return by def pivot(data: DataFrame, *, columns: IndexLabel, index: IndexLabel | lib.NoDefault=lib.no_default, values: IndexLabel | lib.NoDefault=lib.no_default) -> DataFrame: columns_listlike = com.convert_to_list_like(columns) if any((name is None for name in data.index.names)): data = data.copy(deep=False) data.index.names = [name if name is not None else lib.no_default for name in data.index.names] indexed: DataFrame | Series if values is lib.no_default: if index is not lib.no_default: cols = com.convert_to_list_like(index) else: cols = [] append = index is lib.no_default indexed = data.set_index(cols + columns_listlike, append=append) else: index_list: list[Index] | list[Series] if index is lib.no_default: if isinstance(data.index, MultiIndex): index_list = [data.index.get_level_values(i) for i in range(data.index.nlevels)] else: index_list = [data._constructor_sliced(data.index, name=data.index.name)] else: index_list = [data[idx] for idx in com.convert_to_list_like(index)] data_columns = [data[col] for col in columns_listlike] index_list.extend(data_columns) multiindex = MultiIndex.from_arrays(index_list) if is_list_like(values) and (not isinstance(values, tuple)): indexed = data._constructor(data[values]._values, index=multiindex, columns=cast('SequenceNotStr', values)) else: indexed = data._constructor_sliced(data[values]._values, index=multiindex) result = cast('DataFrame', indexed.unstack(columns_listlike)) result.index.names = [name if name is not lib.no_default else None for name in result.index.names] return result def crosstab(index, columns, values=None, rownames=None, colnames=None, aggfunc=None, margins: bool=False, margins_name: Hashable='All', dropna: bool=True, normalize: bool | Literal[0, 1, 'all', 'index', 'columns']=False) -> DataFrame: if values is None and aggfunc is not None: raise ValueError('aggfunc cannot be used without values.') if values is not None and aggfunc is None: raise ValueError('values cannot be used without an aggfunc.') if not is_nested_list_like(index): index = [index] if not is_nested_list_like(columns): columns = [columns] common_idx = None pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))] if pass_objs: common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False) rownames = _get_names(index, rownames, prefix='row') colnames = _get_names(columns, colnames, prefix='col') (rownames_mapper, unique_rownames, colnames_mapper, unique_colnames) = _build_names_mapper(rownames, colnames) from pandas import DataFrame data = {**dict(zip(unique_rownames, index)), **dict(zip(unique_colnames, columns))} df = DataFrame(data, index=common_idx) if values is None: df['__dummy__'] = 0 kwargs = {'aggfunc': len, 'fill_value': 0} else: df['__dummy__'] = values kwargs = {'aggfunc': aggfunc} table = df.pivot_table('__dummy__', index=unique_rownames, columns=unique_colnames, margins=margins, margins_name=margins_name, dropna=dropna, observed=dropna, **kwargs) if normalize is not False: table = _normalize(table, normalize=normalize, margins=margins, margins_name=margins_name) table = table.rename_axis(index=rownames_mapper, axis=0) table = table.rename_axis(columns=colnames_mapper, axis=1) return table def _normalize(table: DataFrame, normalize, margins: bool, margins_name: Hashable='All') -> DataFrame: if not isinstance(normalize, (bool, str)): axis_subs = {0: 'index', 1: 'columns'} try: normalize = axis_subs[normalize] except KeyError as err: raise ValueError('Not a valid normalize argument') from err if margins is False: normalizers: dict[bool | str, Callable] = {'all': lambda x: x / x.sum(axis=1).sum(axis=0), 'columns': lambda x: x / x.sum(), 'index': lambda x: x.div(x.sum(axis=1), axis=0)} normalizers[True] = normalizers['all'] try: f = normalizers[normalize] except KeyError as err: raise ValueError('Not a valid normalize argument') from err table = f(table) table = table.fillna(0) elif margins is True: table_index = table.index table_columns = table.columns last_ind_or_col = table.iloc[-1, :].name if (margins_name not in last_ind_or_col) & (margins_name != last_ind_or_col): raise ValueError(f'{margins_name} not in pivoted DataFrame') column_margin = table.iloc[:-1, -1] index_margin = table.iloc[-1, :-1] table = table.iloc[:-1, :-1] table = _normalize(table, normalize=normalize, margins=False) if normalize == 'columns': column_margin = column_margin / column_margin.sum() table = concat([table, column_margin], axis=1) table = table.fillna(0) table.columns = table_columns elif normalize == 'index': index_margin = index_margin / index_margin.sum() table = table._append(index_margin, ignore_index=True) table = table.fillna(0) table.index = table_index elif normalize == 'all' or normalize is True: column_margin = column_margin / column_margin.sum() index_margin = index_margin / index_margin.sum() index_margin.loc[margins_name] = 1 table = concat([table, column_margin], axis=1) table = table._append(index_margin, ignore_index=True) table = table.fillna(0) table.index = table_index table.columns = table_columns else: raise ValueError('Not a valid normalize argument') else: raise ValueError('Not a valid margins argument') return table def _get_names(arrs, names, prefix: str='row') -> list: if names is None: names = [] for (i, arr) in enumerate(arrs): if isinstance(arr, ABCSeries) and arr.name is not None: names.append(arr.name) else: names.append(f'{prefix}_{i}') else: if len(names) != len(arrs): raise AssertionError('arrays and names must have the same length') if not isinstance(names, list): names = list(names) return names def _build_names_mapper(rownames: list[str], colnames: list[str]) -> tuple[dict[str, str], list[str], dict[str, str], list[str]]: dup_names = set(rownames) | set(colnames) rownames_mapper = {f'row_{i}': name for (i, name) in enumerate(rownames) if name in dup_names} unique_rownames = [f'row_{i}' if name in dup_names else name for (i, name) in enumerate(rownames)] colnames_mapper = {f'col_{i}': name for (i, name) in enumerate(colnames) if name in dup_names} unique_colnames = [f'col_{i}' if name in dup_names else name for (i, name) in enumerate(colnames)] return (rownames_mapper, unique_rownames, colnames_mapper, unique_colnames) # File: pandas-main/pandas/core/reshape/reshape.py from __future__ import annotations import itertools from typing import TYPE_CHECKING, cast, overload import warnings import numpy as np from pandas._config.config import get_option import pandas._libs.reshape as libreshape from pandas.errors import PerformanceWarning from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.cast import find_common_type, maybe_promote from pandas.core.dtypes.common import ensure_platform_int, is_1d_only_ea_dtype, is_integer, needs_i8_conversion from pandas.core.dtypes.dtypes import ExtensionDtype from pandas.core.dtypes.missing import notna import pandas.core.algorithms as algos from pandas.core.algorithms import factorize, unique from pandas.core.arrays._mixins import NDArrayBackedExtensionArray from pandas.core.arrays.categorical import factorize_from_iterable from pandas.core.construction import ensure_wrapped_if_datetimelike from pandas.core.frame import DataFrame from pandas.core.indexes.api import Index, MultiIndex, default_index from pandas.core.reshape.concat import concat from pandas.core.series import Series from pandas.core.sorting import compress_group_index, decons_obs_group_ids, get_compressed_ids, get_group_index, get_group_index_sorter if TYPE_CHECKING: from pandas._typing import ArrayLike, Level, npt from pandas.core.arrays import ExtensionArray from pandas.core.indexes.frozen import FrozenList class _Unstacker: def __init__(self, index: MultiIndex, level: Level, constructor, sort: bool=True) -> None: self.constructor = constructor self.sort = sort self.index = index.remove_unused_levels() self.level = self.index._get_level_number(level) self.lift = 1 if -1 in self.index.codes[self.level] else 0 self.new_index_levels = list(self.index.levels) self.new_index_names = list(self.index.names) self.removed_name = self.new_index_names.pop(self.level) self.removed_level = self.new_index_levels.pop(self.level) self.removed_level_full = index.levels[self.level] if not self.sort: unique_codes = unique(self.index.codes[self.level]) self.removed_level = self.removed_level.take(unique_codes) self.removed_level_full = self.removed_level_full.take(unique_codes) if get_option('performance_warnings'): num_rows = max((index_level.size for index_level in self.new_index_levels)) num_columns = self.removed_level.size num_cells = num_rows * num_columns if num_cells > np.iinfo(np.int32).max: warnings.warn(f'The following operation may generate {num_cells} cells in the resulting pandas object.', PerformanceWarning, stacklevel=find_stack_level()) self._make_selectors() @cache_readonly def _indexer_and_to_sort(self) -> tuple[npt.NDArray[np.intp], list[np.ndarray]]: v = self.level codes = list(self.index.codes) if not self.sort: codes = [factorize(code)[0] for code in codes] levs = list(self.index.levels) to_sort = codes[:v] + codes[v + 1:] + [codes[v]] sizes = tuple((len(x) for x in levs[:v] + levs[v + 1:] + [levs[v]])) (comp_index, obs_ids) = get_compressed_ids(to_sort, sizes) ngroups = len(obs_ids) indexer = get_group_index_sorter(comp_index, ngroups) return (indexer, to_sort) @cache_readonly def sorted_labels(self) -> list[np.ndarray]: (indexer, to_sort) = self._indexer_and_to_sort if self.sort: return [line.take(indexer) for line in to_sort] return to_sort def _make_sorted_values(self, values: np.ndarray) -> np.ndarray: (indexer, _) = self._indexer_and_to_sort sorted_values = algos.take_nd(values, indexer, axis=0) return sorted_values def _make_selectors(self) -> None: new_levels = self.new_index_levels remaining_labels = self.sorted_labels[:-1] level_sizes = tuple((len(x) for x in new_levels)) (comp_index, obs_ids) = get_compressed_ids(remaining_labels, level_sizes) ngroups = len(obs_ids) comp_index = ensure_platform_int(comp_index) stride = self.index.levshape[self.level] + self.lift self.full_shape = (ngroups, stride) selector = self.sorted_labels[-1] + stride * comp_index + self.lift mask = np.zeros(np.prod(self.full_shape), dtype=bool) mask.put(selector, True) if mask.sum() < len(self.index): raise ValueError('Index contains duplicate entries, cannot reshape') self.group_index = comp_index self.mask = mask if self.sort: self.compressor = comp_index.searchsorted(np.arange(ngroups)) else: self.compressor = np.sort(np.unique(comp_index, return_index=True)[1]) @cache_readonly def mask_all(self) -> bool: return bool(self.mask.all()) @cache_readonly def arange_result(self) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.bool_]]: dummy_arr = np.arange(len(self.index), dtype=np.intp) (new_values, mask) = self.get_new_values(dummy_arr, fill_value=-1) return (new_values, mask.any(0)) def get_result(self, obj, value_columns, fill_value) -> DataFrame: values = obj._values if values.ndim == 1: values = values[:, np.newaxis] if value_columns is None and values.shape[1] != 1: raise ValueError('must pass column labels for multi-column data') (new_values, _) = self.get_new_values(values, fill_value) columns = self.get_new_columns(value_columns) index = self.new_index result = self.constructor(new_values, index=index, columns=columns, dtype=new_values.dtype, copy=False) if isinstance(values, np.ndarray): (base, new_base) = (values.base, new_values.base) elif isinstance(values, NDArrayBackedExtensionArray): (base, new_base) = (values._ndarray.base, new_values._ndarray.base) else: (base, new_base) = (1, 2) if base is new_base: result._mgr.add_references(obj._mgr) return result def get_new_values(self, values, fill_value=None): if values.ndim == 1: values = values[:, np.newaxis] sorted_values = self._make_sorted_values(values) (length, width) = self.full_shape stride = values.shape[1] result_width = width * stride result_shape = (length, result_width) mask = self.mask mask_all = self.mask_all if mask_all and len(values): new_values = sorted_values.reshape(length, width, stride).swapaxes(1, 2).reshape(result_shape) new_mask = np.ones(result_shape, dtype=bool) return (new_values, new_mask) dtype = values.dtype if isinstance(dtype, ExtensionDtype): cls = dtype.construct_array_type() new_values = cls._empty(result_shape, dtype=dtype) if not mask_all: new_values[:] = fill_value else: if not mask_all: (dtype, fill_value) = maybe_promote(dtype, fill_value) new_values = np.empty(result_shape, dtype=dtype) if not mask_all: new_values.fill(fill_value) name = dtype.name new_mask = np.zeros(result_shape, dtype=bool) if needs_i8_conversion(values.dtype): sorted_values = sorted_values.view('i8') new_values = new_values.view('i8') else: sorted_values = sorted_values.astype(name, copy=False) libreshape.unstack(sorted_values, mask.view('u1'), stride, length, width, new_values, new_mask.view('u1')) if needs_i8_conversion(values.dtype): new_values = new_values.view('M8[ns]') new_values = ensure_wrapped_if_datetimelike(new_values) new_values = new_values.view(values.dtype) return (new_values, new_mask) def get_new_columns(self, value_columns: Index | None): if value_columns is None: if self.lift == 0: return self.removed_level._rename(name=self.removed_name) lev = self.removed_level.insert(0, item=self.removed_level._na_value) return lev.rename(self.removed_name) stride = len(self.removed_level) + self.lift width = len(value_columns) propagator = np.repeat(np.arange(width), stride) new_levels: FrozenList | list[Index] if isinstance(value_columns, MultiIndex): new_levels = value_columns.levels + (self.removed_level_full,) new_names = value_columns.names + (self.removed_name,) new_codes = [lab.take(propagator) for lab in value_columns.codes] else: new_levels = [value_columns, self.removed_level_full] new_names = [value_columns.name, self.removed_name] new_codes = [propagator] repeater = self._repeater new_codes.append(np.tile(repeater, width)) return MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) @cache_readonly def _repeater(self) -> np.ndarray: if len(self.removed_level_full) != len(self.removed_level): repeater = self.removed_level_full.get_indexer(self.removed_level) if self.lift: repeater = np.insert(repeater, 0, -1) else: stride = len(self.removed_level) + self.lift repeater = np.arange(stride) - self.lift return repeater @cache_readonly def new_index(self) -> MultiIndex | Index: if self.sort: labels = self.sorted_labels[:-1] else: v = self.level codes = list(self.index.codes) labels = codes[:v] + codes[v + 1:] result_codes = [lab.take(self.compressor) for lab in labels] if len(self.new_index_levels) == 1: (level, level_codes) = (self.new_index_levels[0], result_codes[0]) if (level_codes == -1).any(): level = level.insert(len(level), level._na_value) return level.take(level_codes).rename(self.new_index_names[0]) return MultiIndex(levels=self.new_index_levels, codes=result_codes, names=self.new_index_names, verify_integrity=False) def _unstack_multiple(data: Series | DataFrame, clocs, fill_value=None, sort: bool=True): if len(clocs) == 0: return data index = data.index index = cast(MultiIndex, index) if clocs in index.names: clocs = [clocs] clocs = [index._get_level_number(i) for i in clocs] rlocs = [i for i in range(index.nlevels) if i not in clocs] clevels = [index.levels[i] for i in clocs] ccodes = [index.codes[i] for i in clocs] cnames = [index.names[i] for i in clocs] rlevels = [index.levels[i] for i in rlocs] rcodes = [index.codes[i] for i in rlocs] rnames = [index.names[i] for i in rlocs] shape = tuple((len(x) for x in clevels)) group_index = get_group_index(ccodes, shape, sort=False, xnull=False) (comp_ids, obs_ids) = compress_group_index(group_index, sort=False) recons_codes = decons_obs_group_ids(comp_ids, obs_ids, shape, ccodes, xnull=False) if not rlocs: dummy_index = Index(obs_ids, name='__placeholder__') else: dummy_index = MultiIndex(levels=rlevels + [obs_ids], codes=rcodes + [comp_ids], names=rnames + ['__placeholder__'], verify_integrity=False) if isinstance(data, Series): dummy = data.copy(deep=False) dummy.index = dummy_index unstacked = dummy.unstack('__placeholder__', fill_value=fill_value, sort=sort) new_levels = clevels new_names = cnames new_codes = recons_codes else: if isinstance(data.columns, MultiIndex): result = data while clocs: val = clocs.pop(0) result = result.unstack(val, fill_value=fill_value, sort=sort) clocs = [v if v < val else v - 1 for v in clocs] return result dummy_df = data.copy(deep=False) dummy_df.index = dummy_index unstacked = dummy_df.unstack('__placeholder__', fill_value=fill_value, sort=sort) if isinstance(unstacked, Series): unstcols = unstacked.index else: unstcols = unstacked.columns assert isinstance(unstcols, MultiIndex) new_levels = [unstcols.levels[0]] + clevels new_names = [data.columns.name] + cnames new_codes = [unstcols.codes[0]] new_codes.extend((rec.take(unstcols.codes[-1]) for rec in recons_codes)) new_columns = MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) if isinstance(unstacked, Series): unstacked.index = new_columns else: unstacked.columns = new_columns return unstacked @overload def unstack(obj: Series, level, fill_value=..., sort: bool=...) -> DataFrame: ... @overload def unstack(obj: Series | DataFrame, level, fill_value=..., sort: bool=...) -> Series | DataFrame: ... def unstack(obj: Series | DataFrame, level, fill_value=None, sort: bool=True) -> Series | DataFrame: if isinstance(level, (tuple, list)): if len(level) != 1: return _unstack_multiple(obj, level, fill_value=fill_value, sort=sort) else: level = level[0] if not is_integer(level) and (not level == '__placeholder__'): obj.index._get_level_number(level) if isinstance(obj, DataFrame): if isinstance(obj.index, MultiIndex): return _unstack_frame(obj, level, fill_value=fill_value, sort=sort) else: return obj.T.stack() elif not isinstance(obj.index, MultiIndex): raise ValueError(f'index must be a MultiIndex to unstack, {type(obj.index)} was passed') else: if is_1d_only_ea_dtype(obj.dtype): return _unstack_extension_series(obj, level, fill_value, sort=sort) unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor_expanddim, sort=sort) return unstacker.get_result(obj, value_columns=None, fill_value=fill_value) def _unstack_frame(obj: DataFrame, level, fill_value=None, sort: bool=True) -> DataFrame: assert isinstance(obj.index, MultiIndex) unstacker = _Unstacker(obj.index, level=level, constructor=obj._constructor, sort=sort) if not obj._can_fast_transpose: mgr = obj._mgr.unstack(unstacker, fill_value=fill_value) return obj._constructor_from_mgr(mgr, axes=mgr.axes) else: return unstacker.get_result(obj, value_columns=obj.columns, fill_value=fill_value) def _unstack_extension_series(series: Series, level, fill_value, sort: bool) -> DataFrame: df = series.to_frame() result = df.unstack(level=level, fill_value=fill_value, sort=sort) result.columns = result.columns._drop_level_numbers([0]) return result def stack(frame: DataFrame, level=-1, dropna: bool=True, sort: bool=True) -> Series | DataFrame: def stack_factorize(index): if index.is_unique: return (index, np.arange(len(index))) (codes, categories) = factorize_from_iterable(index) return (categories, codes) (N, K) = frame.shape level_num = frame.columns._get_level_number(level) if isinstance(frame.columns, MultiIndex): return _stack_multi_columns(frame, level_num=level_num, dropna=dropna, sort=sort) elif isinstance(frame.index, MultiIndex): new_levels = list(frame.index.levels) new_codes = [lab.repeat(K) for lab in frame.index.codes] (clev, clab) = stack_factorize(frame.columns) new_levels.append(clev) new_codes.append(np.tile(clab, N).ravel()) new_names = list(frame.index.names) new_names.append(frame.columns.name) new_index = MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) else: (levels, (ilab, clab)) = zip(*map(stack_factorize, (frame.index, frame.columns))) codes = (ilab.repeat(K), np.tile(clab, N).ravel()) new_index = MultiIndex(levels=levels, codes=codes, names=[frame.index.name, frame.columns.name], verify_integrity=False) new_values: ArrayLike if not frame.empty and frame._is_homogeneous_type: dtypes = list(frame.dtypes._values) dtype = dtypes[0] if isinstance(dtype, ExtensionDtype): arr = dtype.construct_array_type() new_values = arr._concat_same_type([col._values for (_, col) in frame.items()]) new_values = _reorder_for_extension_array_stack(new_values, N, K) else: new_values = frame._values.ravel() else: new_values = frame._values.ravel() if dropna: mask = notna(new_values) new_values = new_values[mask] new_index = new_index[mask] return frame._constructor_sliced(new_values, index=new_index) def stack_multiple(frame: DataFrame, level, dropna: bool=True, sort: bool=True): if all((lev in frame.columns.names for lev in level)): result = frame for lev in level: result = stack(result, lev, dropna=dropna, sort=sort) elif all((isinstance(lev, int) for lev in level)): result = frame level = [frame.columns._get_level_number(lev) for lev in level] while level: lev = level.pop(0) result = stack(result, lev, dropna=dropna, sort=sort) level = [v if v <= lev else v - 1 for v in level] else: raise ValueError('level should contain all level names or all level numbers, not a mixture of the two.') return result def _stack_multi_column_index(columns: MultiIndex) -> MultiIndex | Index: if len(columns.levels) <= 2: return columns.levels[0]._rename(name=columns.names[0]) levs = ([lev[c] if c >= 0 else None for c in codes] for (lev, codes) in zip(columns.levels[:-1], columns.codes[:-1])) tuples = zip(*levs) unique_tuples = (key for (key, _) in itertools.groupby(tuples)) new_levs = zip(*unique_tuples) return MultiIndex.from_arrays([Index(new_lev, dtype=lev.dtype) if None not in new_lev else new_lev for (new_lev, lev) in zip(new_levs, columns.levels)], names=columns.names[:-1]) def _stack_multi_columns(frame: DataFrame, level_num: int=-1, dropna: bool=True, sort: bool=True) -> DataFrame: def _convert_level_number(level_num: int, columns: Index): if level_num in columns.names: return columns.names[level_num] return level_num this = frame.copy(deep=False) mi_cols = this.columns assert isinstance(mi_cols, MultiIndex) if level_num != mi_cols.nlevels - 1: roll_columns = mi_cols for i in range(level_num, mi_cols.nlevels - 1): lev1 = _convert_level_number(i, roll_columns) lev2 = _convert_level_number(i + 1, roll_columns) roll_columns = roll_columns.swaplevel(lev1, lev2) this.columns = mi_cols = roll_columns if not mi_cols._is_lexsorted() and sort: level_to_sort = _convert_level_number(0, mi_cols) this = this.sort_index(level=level_to_sort, axis=1) mi_cols = this.columns mi_cols = cast(MultiIndex, mi_cols) new_columns = _stack_multi_column_index(mi_cols) new_data = {} level_vals = mi_cols.levels[-1] level_codes = unique(mi_cols.codes[-1]) if sort: level_codes = np.sort(level_codes) level_vals_nan = level_vals.insert(len(level_vals), None) level_vals_used = np.take(level_vals_nan, level_codes) levsize = len(level_codes) drop_cols = [] for key in new_columns: try: loc = this.columns.get_loc(key) except KeyError: drop_cols.append(key) continue if not isinstance(loc, slice): slice_len = len(loc) else: slice_len = loc.stop - loc.start if slice_len != levsize: chunk = this.loc[:, this.columns[loc]] chunk.columns = level_vals_nan.take(chunk.columns.codes[-1]) value_slice = chunk.reindex(columns=level_vals_used).values else: subset = this.iloc[:, loc] dtype = find_common_type(subset.dtypes.tolist()) if isinstance(dtype, ExtensionDtype): value_slice = dtype.construct_array_type()._concat_same_type([x._values.astype(dtype, copy=False) for (_, x) in subset.items()]) (N, K) = subset.shape idx = np.arange(N * K).reshape(K, N).T.reshape(-1) value_slice = value_slice.take(idx) else: value_slice = subset.values if value_slice.ndim > 1: value_slice = value_slice.ravel() new_data[key] = value_slice if len(drop_cols) > 0: new_columns = new_columns.difference(drop_cols) N = len(this) if isinstance(this.index, MultiIndex): new_levels = list(this.index.levels) new_names = list(this.index.names) new_codes = [lab.repeat(levsize) for lab in this.index.codes] else: (old_codes, old_levels) = factorize_from_iterable(this.index) new_levels = [old_levels] new_codes = [old_codes.repeat(levsize)] new_names = [this.index.name] new_levels.append(level_vals) new_codes.append(np.tile(level_codes, N)) new_names.append(frame.columns.names[level_num]) new_index = MultiIndex(levels=new_levels, codes=new_codes, names=new_names, verify_integrity=False) result = frame._constructor(new_data, index=new_index, columns=new_columns) if frame.columns.nlevels > 1: desired_columns = frame.columns._drop_level_numbers([level_num]).unique() if not result.columns.equals(desired_columns): result = result[desired_columns] if dropna: result = result.dropna(axis=0, how='all') return result def _reorder_for_extension_array_stack(arr: ExtensionArray, n_rows: int, n_columns: int) -> ExtensionArray: idx = np.arange(n_rows * n_columns).reshape(n_columns, n_rows).T.reshape(-1) return arr.take(idx) def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame: if frame.columns.nunique() != len(frame.columns): raise ValueError('Columns with duplicate values are not supported in stack') set_levels = set(level) stack_cols = frame.columns._drop_level_numbers([k for k in range(frame.columns.nlevels - 1, -1, -1) if k not in set_levels]) result = stack_reshape(frame, level, set_levels, stack_cols) ratio = 0 if frame.empty else len(result) // len(frame) index_levels: list | FrozenList if isinstance(frame.index, MultiIndex): index_levels = frame.index.levels index_codes = list(np.tile(frame.index.codes, (1, ratio))) else: (codes, uniques) = factorize(frame.index, use_na_sentinel=False) index_levels = [uniques] index_codes = list(np.tile(codes, (1, ratio))) if len(level) > 1: sorter = np.argsort(level) assert isinstance(stack_cols, MultiIndex) ordered_stack_cols = stack_cols._reorder_ilevels(sorter) else: ordered_stack_cols = stack_cols ordered_stack_cols_unique = ordered_stack_cols.unique() if isinstance(ordered_stack_cols, MultiIndex): column_levels = ordered_stack_cols.levels column_codes = ordered_stack_cols.drop_duplicates().codes else: column_levels = [ordered_stack_cols_unique] column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]] column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] result.index = MultiIndex(levels=index_levels + column_levels, codes=index_codes + column_codes, names=frame.index.names + list(ordered_stack_cols.names), verify_integrity=False) len_df = len(frame) n_uniques = len(ordered_stack_cols_unique) indexer = np.arange(n_uniques) idxs = np.tile(len_df * indexer, len_df) + np.repeat(np.arange(len_df), n_uniques) result = result.take(idxs) if result.ndim == 2 and frame.columns.nlevels == len(level): if len(result.columns) == 0: result = Series(index=result.index) else: result = result.iloc[:, 0] if result.ndim == 1: result.name = None return result def stack_reshape(frame: DataFrame, level: list[int], set_levels: set[int], stack_cols: Index) -> Series | DataFrame: drop_levnums = sorted(level, reverse=True) buf = [] for idx in stack_cols.unique(): if len(frame.columns) == 1: data = frame.copy(deep=False) else: if not isinstance(frame.columns, MultiIndex) and (not isinstance(idx, tuple)): column_indexer = idx else: if len(level) == 1: idx = (idx,) gen = iter(idx) column_indexer = tuple((next(gen) if k in set_levels else slice(None) for k in range(frame.columns.nlevels))) data = frame.loc[:, column_indexer] if len(level) < frame.columns.nlevels: data.columns = data.columns._drop_level_numbers(drop_levnums) elif stack_cols.nlevels == 1: if data.ndim == 1: data.name = 0 else: data.columns = default_index(len(data.columns)) buf.append(data) if len(buf) > 0 and (not frame.empty): result = concat(buf, ignore_index=True) else: if len(level) < frame.columns.nlevels: new_columns = frame.columns._drop_level_numbers(drop_levnums).unique() else: new_columns = [0] result = DataFrame(columns=new_columns, dtype=frame._values.dtype) if len(level) < frame.columns.nlevels: desired_columns = frame.columns._drop_level_numbers(drop_levnums).unique() if not result.columns.equals(desired_columns): result = result[desired_columns] return result # File: pandas-main/pandas/core/reshape/tile.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any, Literal import numpy as np from pandas._libs import Timedelta, Timestamp, lib from pandas.core.dtypes.common import ensure_platform_int, is_bool_dtype, is_integer, is_list_like, is_numeric_dtype, is_scalar from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, ExtensionDtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna from pandas import Categorical, Index, IntervalIndex import pandas.core.algorithms as algos from pandas.core.arrays.datetimelike import dtype_to_unit if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import DtypeObj, IntervalLeftRight def cut(x, bins, right: bool=True, labels=None, retbins: bool=False, precision: int=3, include_lowest: bool=False, duplicates: str='raise', ordered: bool=True): original = x x_idx = _preprocess_for_cut(x) (x_idx, _) = _coerce_to_type(x_idx) if not np.iterable(bins): bins = _nbins_to_bins(x_idx, bins, right) elif isinstance(bins, IntervalIndex): if bins.is_overlapping: raise ValueError('Overlapping IntervalIndex is not accepted.') else: bins = Index(bins) if not bins.is_monotonic_increasing: raise ValueError('bins must increase monotonically.') (fac, bins) = _bins_to_cuts(x_idx, bins, right=right, labels=labels, precision=precision, include_lowest=include_lowest, duplicates=duplicates, ordered=ordered) return _postprocess_for_cut(fac, bins, retbins, original) def qcut(x, q, labels=None, retbins: bool=False, precision: int=3, duplicates: str='raise'): original = x x_idx = _preprocess_for_cut(x) (x_idx, _) = _coerce_to_type(x_idx) if is_integer(q): quantiles = np.linspace(0, 1, q + 1) np.putmask(quantiles, q * quantiles != np.arange(q + 1), np.nextafter(quantiles, 1)) else: quantiles = q bins = x_idx.to_series().dropna().quantile(quantiles) (fac, bins) = _bins_to_cuts(x_idx, Index(bins), labels=labels, precision=precision, include_lowest=True, duplicates=duplicates) return _postprocess_for_cut(fac, bins, retbins, original) def _nbins_to_bins(x_idx: Index, nbins: int, right: bool) -> Index: if is_scalar(nbins) and nbins < 1: raise ValueError('`bins` should be a positive integer.') if x_idx.size == 0: raise ValueError('Cannot cut empty array') rng = (x_idx.min(), x_idx.max()) (mn, mx) = rng if is_numeric_dtype(x_idx.dtype) and (np.isinf(mn) or np.isinf(mx)): raise ValueError('cannot specify integer `bins` when input data contains infinity') if mn == mx: if _is_dt_or_td(x_idx.dtype): unit = dtype_to_unit(x_idx.dtype) td = Timedelta(seconds=1).as_unit(unit) bins = x_idx._values._generate_range(start=mn - td, end=mx + td, periods=nbins + 1, freq=None, unit=unit) else: mn -= 0.001 * abs(mn) if mn != 0 else 0.001 mx += 0.001 * abs(mx) if mx != 0 else 0.001 bins = np.linspace(mn, mx, nbins + 1, endpoint=True) else: if _is_dt_or_td(x_idx.dtype): unit = dtype_to_unit(x_idx.dtype) bins = x_idx._values._generate_range(start=mn, end=mx, periods=nbins + 1, freq=None, unit=unit) else: bins = np.linspace(mn, mx, nbins + 1, endpoint=True) adj = (mx - mn) * 0.001 if right: bins[0] -= adj else: bins[-1] += adj return Index(bins) def _bins_to_cuts(x_idx: Index, bins: Index, right: bool=True, labels=None, precision: int=3, include_lowest: bool=False, duplicates: str='raise', ordered: bool=True): if not ordered and labels is None: raise ValueError("'labels' must be provided if 'ordered = False'") if duplicates not in ['raise', 'drop']: raise ValueError("invalid value for 'duplicates' parameter, valid options are: raise, drop") result: Categorical | np.ndarray if isinstance(bins, IntervalIndex): ids = bins.get_indexer(x_idx) cat_dtype = CategoricalDtype(bins, ordered=True) result = Categorical.from_codes(ids, dtype=cat_dtype, validate=False) return (result, bins) unique_bins = algos.unique(bins) if len(unique_bins) < len(bins) and len(bins) != 2: if duplicates == 'raise': raise ValueError(f"Bin edges must be unique: {bins!r}.\nYou can drop duplicate edges by setting the 'duplicates' kwarg") bins = unique_bins side: Literal['left', 'right'] = 'left' if right else 'right' try: ids = bins.searchsorted(x_idx, side=side) except TypeError as err: if x_idx.dtype.kind == 'm': raise ValueError('bins must be of timedelta64 dtype') from err elif x_idx.dtype.kind == bins.dtype.kind == 'M': raise ValueError('Cannot use timezone-naive bins with timezone-aware values, or vice-versa') from err elif x_idx.dtype.kind == 'M': raise ValueError('bins must be of datetime64 dtype') from err else: raise ids = ensure_platform_int(ids) if include_lowest: ids[x_idx == bins[0]] = 1 na_mask = isna(x_idx) | (ids == len(bins)) | (ids == 0) has_nas = na_mask.any() if labels is not False: if not (labels is None or is_list_like(labels)): raise ValueError('Bin labels must either be False, None or passed in as a list-like argument') if labels is None: labels = _format_labels(bins, precision, right=right, include_lowest=include_lowest) elif ordered and len(set(labels)) != len(labels): raise ValueError('labels must be unique if ordered=True; pass ordered=False for duplicate labels') elif len(labels) != len(bins) - 1: raise ValueError('Bin labels must be one fewer than the number of bin edges') if not isinstance(getattr(labels, 'dtype', None), CategoricalDtype): labels = Categorical(labels, categories=labels if len(set(labels)) == len(labels) else None, ordered=ordered) np.putmask(ids, na_mask, 0) result = algos.take_nd(labels, ids - 1) else: result = ids - 1 if has_nas: result = result.astype(np.float64) np.putmask(result, na_mask, np.nan) return (result, bins) def _coerce_to_type(x: Index) -> tuple[Index, DtypeObj | None]: dtype: DtypeObj | None = None if _is_dt_or_td(x.dtype): dtype = x.dtype elif is_bool_dtype(x.dtype): x = x.astype(np.int64) elif isinstance(x.dtype, ExtensionDtype) and is_numeric_dtype(x.dtype): x_arr = x.to_numpy(dtype=np.float64, na_value=np.nan) x = Index(x_arr) return (Index(x), dtype) def _is_dt_or_td(dtype: DtypeObj) -> bool: return isinstance(dtype, DatetimeTZDtype) or lib.is_np_dtype(dtype, 'mM') def _format_labels(bins: Index, precision: int, right: bool=True, include_lowest: bool=False) -> IntervalIndex: closed: IntervalLeftRight = 'right' if right else 'left' formatter: Callable[[Any], Timestamp] | Callable[[Any], Timedelta] if _is_dt_or_td(bins.dtype): unit = dtype_to_unit(bins.dtype) formatter = lambda x: x adjust = lambda x: x - Timedelta(1, unit=unit).as_unit(unit) else: precision = _infer_precision(precision, bins) formatter = lambda x: _round_frac(x, precision) adjust = lambda x: x - 10 ** (-precision) breaks = [formatter(b) for b in bins] if right and include_lowest: breaks[0] = adjust(breaks[0]) if _is_dt_or_td(bins.dtype): breaks = type(bins)(breaks).as_unit(unit) return IntervalIndex.from_breaks(breaks, closed=closed) def _preprocess_for_cut(x) -> Index: ndim = getattr(x, 'ndim', None) if ndim is None: x = np.asarray(x) if x.ndim != 1: raise ValueError('Input array must be 1 dimensional') return Index(x) def _postprocess_for_cut(fac, bins, retbins: bool, original): if isinstance(original, ABCSeries): fac = original._constructor(fac, index=original.index, name=original.name) if not retbins: return fac if isinstance(bins, Index) and is_numeric_dtype(bins.dtype): bins = bins._values return (fac, bins) def _round_frac(x, precision: int): if not np.isfinite(x) or x == 0: return x else: (frac, whole) = np.modf(x) if whole == 0: digits = -int(np.floor(np.log10(abs(frac)))) - 1 + precision else: digits = precision return np.around(x, digits) def _infer_precision(base_precision: int, bins: Index) -> int: for precision in range(base_precision, 20): levels = np.asarray([_round_frac(b, precision) for b in bins]) if algos.unique(levels).size == bins.size: return precision return base_precision # File: pandas-main/pandas/core/roperator.py """""" from __future__ import annotations import operator def radd(left, right): return right + left def rsub(left, right): return right - left def rmul(left, right): return right * left def rdiv(left, right): return right / left def rtruediv(left, right): return right / left def rfloordiv(left, right): return right // left def rmod(left, right): if isinstance(right, str): typ = type(left).__name__ raise TypeError(f'{typ} cannot perform the operation mod') return right % left def rdivmod(left, right): return divmod(right, left) def rpow(left, right): return right ** left def rand_(left, right): return operator.and_(right, left) def ror_(left, right): return operator.or_(right, left) def rxor(left, right): return operator.xor(right, left) # File: pandas-main/pandas/core/sample.py """""" from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs import lib from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries if TYPE_CHECKING: from pandas._typing import AxisInt from pandas.core.generic import NDFrame def preprocess_weights(obj: NDFrame, weights, axis: AxisInt) -> np.ndarray: if isinstance(weights, ABCSeries): weights = weights.reindex(obj.axes[axis]) if isinstance(weights, str): if isinstance(obj, ABCDataFrame): if axis == 0: try: weights = obj[weights] except KeyError as err: raise KeyError('String passed to weights not a valid column') from err else: raise ValueError('Strings can only be passed to weights when sampling from rows on a DataFrame') else: raise ValueError('Strings cannot be passed as weights when sampling from a Series.') if isinstance(obj, ABCSeries): func = obj._constructor else: func = obj._constructor_sliced weights = func(weights, dtype='float64')._values if len(weights) != obj.shape[axis]: raise ValueError('Weights and axis to be sampled must be of same length') if lib.has_infs(weights): raise ValueError('weight vector may not include `inf` values') if (weights < 0).any(): raise ValueError('weight vector many not include negative values') missing = np.isnan(weights) if missing.any(): weights = weights.copy() weights[missing] = 0 return weights def process_sampling_size(n: int | None, frac: float | None, replace: bool) -> int | None: if n is None and frac is None: n = 1 elif n is not None and frac is not None: raise ValueError('Please enter a value for `frac` OR `n`, not both') elif n is not None: if n < 0: raise ValueError('A negative number of rows requested. Please provide `n` >= 0.') if n % 1 != 0: raise ValueError('Only integers accepted as `n` values') else: assert frac is not None if frac > 1 and (not replace): raise ValueError('Replace has to be set to `True` when upsampling the population `frac` > 1.') if frac < 0: raise ValueError('A negative number of rows requested. Please provide `frac` >= 0.') return n def sample(obj_len: int, size: int, replace: bool, weights: np.ndarray | None, random_state: np.random.RandomState | np.random.Generator) -> np.ndarray: if weights is not None: weight_sum = weights.sum() if weight_sum != 0: weights = weights / weight_sum else: raise ValueError('Invalid weights: weights sum to zero') return random_state.choice(obj_len, size=size, replace=replace, p=weights).astype(np.intp, copy=False) # File: pandas-main/pandas/core/series.py """""" from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence import operator import sys from textwrap import dedent from typing import IO, TYPE_CHECKING, Any, Literal, cast, overload import warnings import numpy as np from pandas._libs import lib, properties, reshape from pandas._libs.lib import is_range_indexer from pandas.compat import PYPY from pandas.compat._constants import REF_COUNT from pandas.compat._optional import import_optional_dependency from pandas.compat.numpy import function as nv from pandas.errors import ChainedAssignmentError, InvalidIndexError from pandas.errors.cow import _chained_assignment_method_msg, _chained_assignment_msg from pandas.util._decorators import Appender, Substitution, deprecate_nonkeyword_arguments, doc from pandas.util._validators import validate_ascending, validate_bool_kwarg, validate_percentile from pandas.core.dtypes.astype import astype_is_view from pandas.core.dtypes.cast import LossySetitemError, construct_1d_arraylike_from_scalar, find_common_type, infer_dtype_from, maybe_box_native, maybe_cast_pointwise_result from pandas.core.dtypes.common import is_dict_like, is_float, is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar, pandas_dtype, validate_all_hashable from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype, SparseDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.inference import is_hashable from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna, remove_na_arraylike from pandas.core import algorithms, base, common as com, nanops, ops, roperator from pandas.core.accessor import Accessor from pandas.core.apply import SeriesApply from pandas.core.arrays import ExtensionArray from pandas.core.arrays.arrow import ListAccessor, StructAccessor from pandas.core.arrays.categorical import CategoricalAccessor from pandas.core.arrays.sparse import SparseAccessor from pandas.core.arrays.string_ import StringDtype from pandas.core.construction import array as pd_array, extract_array, sanitize_array from pandas.core.generic import NDFrame, make_doc from pandas.core.indexers import disallow_ndim_indexing, unpack_1tuple from pandas.core.indexes.accessors import CombinedDatetimelikeProperties from pandas.core.indexes.api import DatetimeIndex, Index, MultiIndex, PeriodIndex, default_index, ensure_index, maybe_sequence_to_range import pandas.core.indexes.base as ibase from pandas.core.indexes.multi import maybe_droplevels from pandas.core.indexing import check_bool_indexer, check_dict_or_set_indexers from pandas.core.internals import SingleBlockManager from pandas.core.methods import selectn from pandas.core.shared_docs import _shared_docs from pandas.core.sorting import ensure_key_mapped, nargsort from pandas.core.strings.accessor import StringMethods from pandas.core.tools.datetimes import to_datetime import pandas.io.formats.format as fmt from pandas.io.formats.info import INFO_DOCSTRING, SeriesInfo, series_sub_kwargs import pandas.plotting if TYPE_CHECKING: from pandas._libs.internals import BlockValuesRefs from pandas._typing import AggFuncType, AnyAll, AnyArrayLike, ArrayLike, Axis, AxisInt, CorrelationMethod, DropKeep, Dtype, DtypeObj, FilePath, Frequency, IgnoreRaise, IndexKeyFunc, IndexLabel, Level, ListLike, MutableMappingT, NaPosition, NumpySorter, NumpyValueArrayLike, QuantileInterpolation, ReindexMethod, Renamer, Scalar, Self, SortKind, StorageOptions, Suffixes, ValueKeyFunc, WriteBuffer, npt from pandas.core.frame import DataFrame from pandas.core.groupby.generic import SeriesGroupBy __all__ = ['Series'] _shared_doc_kwargs = {'axes': 'index', 'klass': 'Series', 'axes_single_arg': "{0 or 'index'}", 'axis': "axis : {0 or 'index'}\n Unused. Parameter needed for compatibility with DataFrame.", 'inplace': 'inplace : bool, default False\n If True, performs operation inplace and returns None.', 'unique': 'np.ndarray', 'duplicated': 'Series', 'optional_by': '', 'optional_reindex': '\nindex : array-like, optional\n New labels for the index. Preferably an Index object to avoid\n duplicating data.\naxis : int or str, optional\n Unused.'} class Series(base.IndexOpsMixin, NDFrame): _typ = 'series' _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) _name: Hashable _metadata: list[str] = ['_name'] _internal_names_set = {'index', 'name'} | NDFrame._internal_names_set _accessors = {'dt', 'cat', 'str', 'sparse'} _hidden_attrs = base.IndexOpsMixin._hidden_attrs | NDFrame._hidden_attrs | frozenset([]) __pandas_priority__ = 3000 hasnans = property(base.IndexOpsMixin.hasnans.fget, doc=base.IndexOpsMixin.hasnans.__doc__) _mgr: SingleBlockManager def __init__(self, data=None, index=None, dtype: Dtype | None=None, name=None, copy: bool | None=None) -> None: allow_mgr = False if isinstance(data, SingleBlockManager) and index is None and (dtype is None) and (copy is False or copy is None): if not allow_mgr: warnings.warn(f'Passing a {type(data).__name__} to {type(self).__name__} is deprecated and will raise in a future version. Use public APIs instead.', DeprecationWarning, stacklevel=2) data = data.copy(deep=False) NDFrame.__init__(self, data) self.name = name return if isinstance(data, (ExtensionArray, np.ndarray)): if copy is not False: if dtype is None or astype_is_view(data.dtype, pandas_dtype(dtype)): data = data.copy() if copy is None: copy = False if isinstance(data, SingleBlockManager) and (not copy): data = data.copy(deep=False) if not allow_mgr: warnings.warn(f'Passing a {type(data).__name__} to {type(self).__name__} is deprecated and will raise in a future version. Use public APIs instead.', DeprecationWarning, stacklevel=2) name = ibase.maybe_extract_name(name, data, type(self)) if index is not None: index = ensure_index(index) if dtype is not None: dtype = self._validate_dtype(dtype) if data is None: index = index if index is not None else default_index(0) if len(index) or dtype is not None: data = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: data = [] if isinstance(data, MultiIndex): raise NotImplementedError('initializing a Series from a MultiIndex is not supported') refs = None if isinstance(data, Index): if dtype is not None: data = data.astype(dtype) refs = data._references copy = False elif isinstance(data, np.ndarray): if len(data.dtype): raise ValueError('Cannot construct a Series from an ndarray with compound dtype. Use DataFrame instead.') elif isinstance(data, Series): if index is None: index = data.index data = data._mgr.copy(deep=False) else: data = data.reindex(index) copy = False data = data._mgr elif isinstance(data, Mapping): (data, index) = self._init_dict(data, index, dtype) dtype = None copy = False elif isinstance(data, SingleBlockManager): if index is None: index = data.index elif not data.index.equals(index) or copy: raise AssertionError('Cannot pass both SingleBlockManager `data` argument and a different `index` argument. `copy` must be False.') if not allow_mgr: warnings.warn(f'Passing a {type(data).__name__} to {type(self).__name__} is deprecated and will raise in a future version. Use public APIs instead.', DeprecationWarning, stacklevel=2) allow_mgr = True elif isinstance(data, ExtensionArray): pass else: data = com.maybe_iterable_to_list(data) if is_list_like(data) and (not len(data)) and (dtype is None): dtype = np.dtype(object) if index is None: if not is_list_like(data): data = [data] index = default_index(len(data)) elif is_list_like(data): com.require_length_match(data, index) if isinstance(data, SingleBlockManager): if dtype is not None: data = data.astype(dtype=dtype, errors='ignore') elif copy: data = data.copy() else: data = sanitize_array(data, index, dtype, copy) data = SingleBlockManager.from_array(data, index, refs=refs) NDFrame.__init__(self, data) self.name = name self._set_axis(0, index) def _init_dict(self, data: Mapping, index: Index | None=None, dtype: DtypeObj | None=None): if data: keys = maybe_sequence_to_range(tuple(data.keys())) values = list(data.values()) elif index is not None: if len(index) or dtype is not None: values = na_value_for_dtype(pandas_dtype(dtype), compat=False) else: values = [] keys = index else: (keys, values) = (default_index(0), []) s = Series(values, index=keys, dtype=dtype) if data and index is not None: s = s.reindex(index) return (s._mgr, s.index) def __arrow_c_stream__(self, requested_schema=None): pa = import_optional_dependency('pyarrow', min_version='16.0.0') type = pa.DataType._import_from_c_capsule(requested_schema) if requested_schema is not None else None ca = pa.array(self, type=type) if not isinstance(ca, pa.ChunkedArray): ca = pa.chunked_array([ca]) return ca.__arrow_c_stream__() @property def _constructor(self) -> type[Series]: return Series def _constructor_from_mgr(self, mgr, axes): ser = Series._from_mgr(mgr, axes=axes) ser._name = None if type(self) is Series: return ser return self._constructor(ser) @property def _constructor_expanddim(self) -> Callable[..., DataFrame]: from pandas.core.frame import DataFrame return DataFrame def _constructor_expanddim_from_mgr(self, mgr, axes): from pandas.core.frame import DataFrame df = DataFrame._from_mgr(mgr, axes=mgr.axes) if type(self) is Series: return df return self._constructor_expanddim(df) @property def _can_hold_na(self) -> bool: return self._mgr._can_hold_na @property def dtype(self) -> DtypeObj: return self._mgr.dtype @property def dtypes(self) -> DtypeObj: return self.dtype @property def name(self) -> Hashable: return self._name @name.setter def name(self, value: Hashable) -> None: validate_all_hashable(value, error_name=f'{type(self).__name__}.name') object.__setattr__(self, '_name', value) @property def values(self): return self._mgr.external_values() @property def _values(self): return self._mgr.internal_values() @property def _references(self) -> BlockValuesRefs: return self._mgr._block.refs @Appender(base.IndexOpsMixin.array.__doc__) @property def array(self) -> ExtensionArray: return self._mgr.array_values() def __len__(self) -> int: return len(self._mgr) def __array__(self, dtype: npt.DTypeLike | None=None, copy: bool | None=None) -> np.ndarray: values = self._values arr = np.asarray(values, dtype=dtype) if astype_is_view(values.dtype, arr.dtype): arr = arr.view() arr.flags.writeable = False return arr @property def axes(self) -> list[Index]: return [self.index] def _ixs(self, i: int, axis: AxisInt=0) -> Any: return self._values[i] def _slice(self, slobj: slice, axis: AxisInt=0) -> Series: mgr = self._mgr.get_slice(slobj, axis=axis) out = self._constructor_from_mgr(mgr, axes=mgr.axes) out._name = self._name return out.__finalize__(self) def __getitem__(self, key): check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: return self.copy(deep=False) key_is_scalar = is_scalar(key) if isinstance(key, (list, tuple)): key = unpack_1tuple(key) elif key_is_scalar: return self._get_value(key) if is_iterator(key): key = list(key) if is_hashable(key) and (not isinstance(key, slice)): try: result = self._get_value(key) return result except (KeyError, TypeError, InvalidIndexError): if isinstance(key, tuple) and isinstance(self.index, MultiIndex): return self._get_values_tuple(key) if isinstance(key, slice): return self._getitem_slice(key) if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) return self._get_rows_with_mask(key) return self._get_with(key) def _get_with(self, key): if isinstance(key, ABCDataFrame): raise TypeError('Indexing a Series with DataFrame is not supported, use the appropriate DataFrame column') elif isinstance(key, tuple): return self._get_values_tuple(key) return self.loc[key] def _get_values_tuple(self, key: tuple): if com.any_none(*key): result = np.asarray(self._values[key]) disallow_ndim_indexing(result) return result if not isinstance(self.index, MultiIndex): raise KeyError('key of type tuple not found and not a MultiIndex') (indexer, new_index) = self.index.get_loc_level(key) new_ser = self._constructor(self._values[indexer], index=new_index, copy=False) if isinstance(indexer, slice): new_ser._mgr.add_references(self._mgr) return new_ser.__finalize__(self) def _get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Series: new_mgr = self._mgr.get_rows_with_mask(indexer) return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self) def _get_value(self, label, takeable: bool=False): if takeable: return self._values[label] loc = self.index.get_loc(label) if is_integer(loc): return self._values[loc] if isinstance(self.index, MultiIndex): mi = self.index new_values = self._values[loc] if len(new_values) == 1 and mi.nlevels == 1: return new_values[0] new_index = mi[loc] new_index = maybe_droplevels(new_index, label) new_ser = self._constructor(new_values, index=new_index, name=self.name, copy=False) if isinstance(loc, slice): new_ser._mgr.add_references(self._mgr) return new_ser.__finalize__(self) else: return self.iloc[loc] def __setitem__(self, key, value) -> None: if not PYPY: if sys.getrefcount(self) <= 3: warnings.warn(_chained_assignment_msg, ChainedAssignmentError, stacklevel=2) check_dict_or_set_indexers(key) key = com.apply_if_callable(key, self) if key is Ellipsis: key = slice(None) if isinstance(key, slice): indexer = self.index._convert_slice_indexer(key, kind='getitem') return self._set_values(indexer, value) try: self._set_with_engine(key, value) except KeyError: self.loc[key] = value except (TypeError, ValueError, LossySetitemError): indexer = self.index.get_loc(key) self._set_values(indexer, value) except InvalidIndexError as err: if isinstance(key, tuple) and (not isinstance(self.index, MultiIndex)): raise KeyError('key of type tuple not found and not a MultiIndex') from err if com.is_bool_indexer(key): key = check_bool_indexer(self.index, key) key = np.asarray(key, dtype=bool) if is_list_like(value) and len(value) != len(self) and (not isinstance(value, Series)) and (not is_object_dtype(self.dtype)): indexer = key.nonzero()[0] self._set_values(indexer, value) return try: self._where(~key, value, inplace=True) except InvalidIndexError: self.iloc[key] = value return else: self._set_with(key, value) def _set_with_engine(self, key, value) -> None: loc = self.index.get_loc(key) self._mgr.setitem_inplace(loc, value) def _set_with(self, key, value) -> None: assert not isinstance(key, tuple) if is_iterator(key): key = list(key) self._set_labels(key, value) def _set_labels(self, key, value) -> None: key = com.asarray_tuplesafe(key) indexer: np.ndarray = self.index.get_indexer(key) mask = indexer == -1 if mask.any(): raise KeyError(f'{key[mask]} not in index') self._set_values(indexer, value) def _set_values(self, key, value) -> None: if isinstance(key, (Index, Series)): key = key._values self._mgr = self._mgr.setitem(indexer=key, value=value) def _set_value(self, label, value, takeable: bool=False) -> None: if not takeable: try: loc = self.index.get_loc(label) except KeyError: self.loc[label] = value return else: loc = label self._set_values(loc, value) def repeat(self, repeats: int | Sequence[int], axis: None=None) -> Series: nv.validate_repeat((), {'axis': axis}) new_index = self.index.repeat(repeats) new_values = self._values.repeat(repeats) return self._constructor(new_values, index=new_index, copy=False).__finalize__(self, method='repeat') @overload def reset_index(self, level: IndexLabel=..., *, drop: Literal[False]=..., name: Level=..., inplace: Literal[False]=..., allow_duplicates: bool=...) -> DataFrame: ... @overload def reset_index(self, level: IndexLabel=..., *, drop: Literal[True], name: Level=..., inplace: Literal[False]=..., allow_duplicates: bool=...) -> Series: ... @overload def reset_index(self, level: IndexLabel=..., *, drop: bool=..., name: Level=..., inplace: Literal[True], allow_duplicates: bool=...) -> None: ... def reset_index(self, level: IndexLabel | None=None, *, drop: bool=False, name: Level=lib.no_default, inplace: bool=False, allow_duplicates: bool=False) -> DataFrame | Series | None: inplace = validate_bool_kwarg(inplace, 'inplace') if drop: new_index = default_index(len(self)) if level is not None: level_list: Sequence[Hashable] if not isinstance(level, (tuple, list)): level_list = [level] else: level_list = level level_list = [self.index._get_level_number(lev) for lev in level_list] if len(level_list) < self.index.nlevels: new_index = self.index.droplevel(level_list) if inplace: self.index = new_index else: new_ser = self.copy(deep=False) new_ser.index = new_index return new_ser.__finalize__(self, method='reset_index') elif inplace: raise TypeError('Cannot reset_index inplace on a Series to create a DataFrame') else: if name is lib.no_default: if self.name is None: name = 0 else: name = self.name df = self.to_frame(name) return df.reset_index(level=level, drop=drop, allow_duplicates=allow_duplicates) return None def __repr__(self) -> str: repr_params = fmt.get_series_repr_params() return self.to_string(**repr_params) @overload def to_string(self, buf: None=..., *, na_rep: str=..., float_format: str | None=..., header: bool=..., index: bool=..., length: bool=..., dtype=..., name=..., max_rows: int | None=..., min_rows: int | None=...) -> str: ... @overload def to_string(self, buf: FilePath | WriteBuffer[str], *, na_rep: str=..., float_format: str | None=..., header: bool=..., index: bool=..., length: bool=..., dtype=..., name=..., max_rows: int | None=..., min_rows: int | None=...) -> None: ... @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self', 'buf'], name='to_string') def to_string(self, buf: FilePath | WriteBuffer[str] | None=None, na_rep: str='NaN', float_format: str | None=None, header: bool=True, index: bool=True, length: bool=False, dtype: bool=False, name: bool=False, max_rows: int | None=None, min_rows: int | None=None) -> str | None: formatter = fmt.SeriesFormatter(self, name=name, length=length, header=header, index=index, dtype=dtype, na_rep=na_rep, float_format=float_format, min_rows=min_rows, max_rows=max_rows) result = formatter.to_string() if not isinstance(result, str): raise AssertionError(f'result must be of type str, type of result is {type(result).__name__!r}') if buf is None: return result elif hasattr(buf, 'write'): buf.write(result) else: with open(buf, 'w', encoding='utf-8') as f: f.write(result) return None @overload def to_markdown(self, buf: None=..., *, mode: str=..., index: bool=..., storage_options: StorageOptions | None=..., **kwargs) -> str: ... @overload def to_markdown(self, buf: IO[str], *, mode: str=..., index: bool=..., storage_options: StorageOptions | None=..., **kwargs) -> None: ... @overload def to_markdown(self, buf: IO[str] | None, *, mode: str=..., index: bool=..., storage_options: StorageOptions | None=..., **kwargs) -> str | None: ... @doc(klass=_shared_doc_kwargs['klass'], storage_options=_shared_docs['storage_options'], examples=dedent('Examples\n --------\n >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal")\n >>> print(s.to_markdown())\n | | animal |\n |---:|:---------|\n | 0 | elk |\n | 1 | pig |\n | 2 | dog |\n | 3 | quetzal |\n\n Output markdown with a tabulate option.\n\n >>> print(s.to_markdown(tablefmt="grid"))\n +----+----------+\n | | animal |\n +====+==========+\n | 0 | elk |\n +----+----------+\n | 1 | pig |\n +----+----------+\n | 2 | dog |\n +----+----------+\n | 3 | quetzal |\n +----+----------+')) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self', 'buf'], name='to_markdown') def to_markdown(self, buf: IO[str] | None=None, mode: str='wt', index: bool=True, storage_options: StorageOptions | None=None, **kwargs) -> str | None: return self.to_frame().to_markdown(buf, mode=mode, index=index, storage_options=storage_options, **kwargs) def items(self) -> Iterable[tuple[Hashable, Any]]: return zip(iter(self.index), iter(self)) def keys(self) -> Index: return self.index @overload def to_dict(self, *, into: type[MutableMappingT] | MutableMappingT) -> MutableMappingT: ... @overload def to_dict(self, *, into: type[dict]=...) -> dict: ... def to_dict(self, *, into: type[MutableMappingT] | MutableMappingT=dict) -> MutableMappingT: into_c = com.standardize_mapping(into) if is_object_dtype(self.dtype) or isinstance(self.dtype, ExtensionDtype): return into_c(((k, maybe_box_native(v)) for (k, v) in self.items())) else: return into_c(self.items()) def to_frame(self, name: Hashable=lib.no_default) -> DataFrame: columns: Index if name is lib.no_default: name = self.name if name is None: columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim_from_mgr(mgr, axes=mgr.axes) return df.__finalize__(self, method='to_frame') def _set_name(self, name, inplace: bool=False, deep: bool | None=None) -> Series: inplace = validate_bool_kwarg(inplace, 'inplace') ser = self if inplace else self.copy(deep=False) ser.name = name return ser @Appender(dedent('\n Examples\n --------\n >>> ser = pd.Series([390., 350., 30., 20.],\n ... index=[\'Falcon\', \'Falcon\', \'Parrot\', \'Parrot\'],\n ... name="Max Speed")\n >>> ser\n Falcon 390.0\n Falcon 350.0\n Parrot 30.0\n Parrot 20.0\n Name: Max Speed, dtype: float64\n\n We can pass a list of values to group the Series data by custom labels:\n\n >>> ser.groupby(["a", "b", "a", "b"]).mean()\n a 210.0\n b 185.0\n Name: Max Speed, dtype: float64\n\n Grouping by numeric labels yields similar results:\n\n >>> ser.groupby([0, 1, 0, 1]).mean()\n 0 210.0\n 1 185.0\n Name: Max Speed, dtype: float64\n\n We can group by a level of the index:\n\n >>> ser.groupby(level=0).mean()\n Falcon 370.0\n Parrot 25.0\n Name: Max Speed, dtype: float64\n\n We can group by a condition applied to the Series values:\n\n >>> ser.groupby(ser > 100).mean()\n Max Speed\n False 25.0\n True 370.0\n Name: Max Speed, dtype: float64\n\n **Grouping by Indexes**\n\n We can groupby different levels of a hierarchical index\n using the `level` parameter:\n\n >>> arrays = [[\'Falcon\', \'Falcon\', \'Parrot\', \'Parrot\'],\n ... [\'Captive\', \'Wild\', \'Captive\', \'Wild\']]\n >>> index = pd.MultiIndex.from_arrays(arrays, names=(\'Animal\', \'Type\'))\n >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")\n >>> ser\n Animal Type\n Falcon Captive 390.0\n Wild 350.0\n Parrot Captive 30.0\n Wild 20.0\n Name: Max Speed, dtype: float64\n\n >>> ser.groupby(level=0).mean()\n Animal\n Falcon 370.0\n Parrot 25.0\n Name: Max Speed, dtype: float64\n\n We can also group by the \'Type\' level of the hierarchical index\n to get the mean speed for each type:\n\n >>> ser.groupby(level="Type").mean()\n Type\n Captive 210.0\n Wild 185.0\n Name: Max Speed, dtype: float64\n\n We can also choose to include `NA` in group keys or not by defining\n `dropna` parameter, the default setting is `True`.\n\n >>> ser = pd.Series([1, 2, 3, 3], index=["a", \'a\', \'b\', np.nan])\n >>> ser.groupby(level=0).sum()\n a 3\n b 3\n dtype: int64\n\n To include `NA` values in the group keys, set `dropna=False`:\n\n >>> ser.groupby(level=0, dropna=False).sum()\n a 3\n b 3\n NaN 3\n dtype: int64\n\n We can also group by a custom list with NaN values to handle\n missing group labels:\n\n >>> arrays = [\'Falcon\', \'Falcon\', \'Parrot\', \'Parrot\']\n >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")\n >>> ser.groupby(["a", "b", "a", np.nan]).mean()\n a 210.0\n b 350.0\n Name: Max Speed, dtype: float64\n\n >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()\n a 210.0\n b 350.0\n NaN 20.0\n Name: Max Speed, dtype: float64\n ')) @Appender(_shared_docs['groupby'] % _shared_doc_kwargs) def groupby(self, by=None, level: IndexLabel | None=None, as_index: bool=True, sort: bool=True, group_keys: bool=True, observed: bool=False, dropna: bool=True) -> SeriesGroupBy: from pandas.core.groupby.generic import SeriesGroupBy if level is None and by is None: raise TypeError("You have to supply one of 'by' and 'level'") if not as_index: raise TypeError('as_index=False only valid with DataFrame') return SeriesGroupBy(obj=self, keys=by, level=level, as_index=as_index, sort=sort, group_keys=group_keys, observed=observed, dropna=dropna) def count(self) -> int: return notna(self._values).sum().astype('int64') def mode(self, dropna: bool=True) -> Series: values = self._values if isinstance(values, np.ndarray): res_values = algorithms.mode(values, dropna=dropna) else: res_values = values._mode(dropna=dropna) return self._constructor(res_values, index=range(len(res_values)), name=self.name, copy=False, dtype=self.dtype).__finalize__(self, method='mode') def unique(self) -> ArrayLike: return super().unique() @overload def drop_duplicates(self, *, keep: DropKeep=..., inplace: Literal[False]=..., ignore_index: bool=...) -> Series: ... @overload def drop_duplicates(self, *, keep: DropKeep=..., inplace: Literal[True], ignore_index: bool=...) -> None: ... @overload def drop_duplicates(self, *, keep: DropKeep=..., inplace: bool=..., ignore_index: bool=...) -> Series | None: ... def drop_duplicates(self, *, keep: DropKeep='first', inplace: bool=False, ignore_index: bool=False) -> Series | None: inplace = validate_bool_kwarg(inplace, 'inplace') result = super().drop_duplicates(keep=keep) if ignore_index: result.index = default_index(len(result)) if inplace: self._update_inplace(result) return None else: return result def duplicated(self, keep: DropKeep='first') -> Series: res = self._duplicated(keep=keep) result = self._constructor(res, index=self.index, copy=False) return result.__finalize__(self, method='duplicated') def idxmin(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Hashable: axis = self._get_axis_number(axis) iloc = self.argmin(axis, skipna, *args, **kwargs) return self.index[iloc] def idxmax(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Hashable: axis = self._get_axis_number(axis) iloc = self.argmax(axis, skipna, *args, **kwargs) return self.index[iloc] def round(self, decimals: int=0, *args, **kwargs) -> Series: nv.validate_round(args, kwargs) new_mgr = self._mgr.round(decimals=decimals) return self._constructor_from_mgr(new_mgr, axes=new_mgr.axes).__finalize__(self, method='round') @overload def quantile(self, q: float=..., interpolation: QuantileInterpolation=...) -> float: ... @overload def quantile(self, q: Sequence[float] | AnyArrayLike, interpolation: QuantileInterpolation=...) -> Series: ... @overload def quantile(self, q: float | Sequence[float] | AnyArrayLike=..., interpolation: QuantileInterpolation=...) -> float | Series: ... def quantile(self, q: float | Sequence[float] | AnyArrayLike=0.5, interpolation: QuantileInterpolation='linear') -> float | Series: validate_percentile(q) df = self.to_frame() result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) if result.ndim == 2: result = result.iloc[:, 0] if is_list_like(q): result.name = self.name idx = Index(q, dtype=np.float64) return self._constructor(result, index=idx, name=self.name) else: return result.iloc[0] def corr(self, other: Series, method: CorrelationMethod='pearson', min_periods: int | None=None) -> float: (this, other) = self.align(other, join='inner') if len(this) == 0: return np.nan this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False) other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False) if method in ['pearson', 'spearman', 'kendall'] or callable(method): return nanops.nancorr(this_values, other_values, method=method, min_periods=min_periods) raise ValueError(f"method must be either 'pearson', 'spearman', 'kendall', or a callable, '{method}' was supplied") def cov(self, other: Series, min_periods: int | None=None, ddof: int | None=1) -> float: (this, other) = self.align(other, join='inner') if len(this) == 0: return np.nan this_values = this.to_numpy(dtype=float, na_value=np.nan, copy=False) other_values = other.to_numpy(dtype=float, na_value=np.nan, copy=False) return nanops.nancov(this_values, other_values, min_periods=min_periods, ddof=ddof) @doc(klass='Series', extra_params='', other_klass='DataFrame', examples=dedent('\n Difference with previous row\n\n >>> s = pd.Series([1, 1, 2, 3, 5, 8])\n >>> s.diff()\n 0 NaN\n 1 0.0\n 2 1.0\n 3 1.0\n 4 2.0\n 5 3.0\n dtype: float64\n\n Difference with 3rd previous row\n\n >>> s.diff(periods=3)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 2.0\n 4 4.0\n 5 6.0\n dtype: float64\n\n Difference with following row\n\n >>> s.diff(periods=-1)\n 0 0.0\n 1 -1.0\n 2 -1.0\n 3 -2.0\n 4 -3.0\n 5 NaN\n dtype: float64\n\n Overflow in input dtype\n\n >>> s = pd.Series([1, 0], dtype=np.uint8)\n >>> s.diff()\n 0 NaN\n 1 255.0\n dtype: float64')) def diff(self, periods: int=1) -> Series: if not lib.is_integer(periods): if not (is_float(periods) and periods.is_integer()): raise ValueError('periods must be an integer') result = algorithms.diff(self._values, periods) return self._constructor(result, index=self.index, copy=False).__finalize__(self, method='diff') def autocorr(self, lag: int=1) -> float: return self.corr(cast(Series, self.shift(lag))) def dot(self, other: AnyArrayLike | DataFrame) -> Series | np.ndarray: if isinstance(other, (Series, ABCDataFrame)): common = self.index.union(other.index) if len(common) > len(self.index) or len(common) > len(other.index): raise ValueError('matrices are not aligned') left = self.reindex(index=common) right = other.reindex(index=common) lvals = left.values rvals = right.values else: lvals = self.values rvals = np.asarray(other) if lvals.shape[0] != rvals.shape[0]: raise Exception(f'Dot product shape mismatch, {lvals.shape} vs {rvals.shape}') if isinstance(other, ABCDataFrame): return self._constructor(np.dot(lvals, rvals), index=other.columns, copy=False).__finalize__(self, method='dot') elif isinstance(other, Series): return np.dot(lvals, rvals) elif isinstance(rvals, np.ndarray): return np.dot(lvals, rvals) else: raise TypeError(f'unsupported type: {type(other)}') def __matmul__(self, other): return self.dot(other) def __rmatmul__(self, other): return self.dot(np.transpose(other)) @doc(base.IndexOpsMixin.searchsorted, klass='Series') def searchsorted(self, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right']='left', sorter: NumpySorter | None=None) -> npt.NDArray[np.intp] | np.intp: return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) def _append(self, to_append, ignore_index: bool=False, verify_integrity: bool=False): from pandas.core.reshape.concat import concat if isinstance(to_append, (list, tuple)): to_concat = [self] to_concat.extend(to_append) else: to_concat = [self, to_append] if any((isinstance(x, (ABCDataFrame,)) for x in to_concat[1:])): msg = 'to_append should be a Series or list/tuple of Series, got DataFrame' raise TypeError(msg) return concat(to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity) @doc(_shared_docs['compare'], dedent('\n Returns\n -------\n Series or DataFrame\n If axis is 0 or \'index\' the result will be a Series.\n The resulting index will be a MultiIndex with \'self\' and \'other\'\n stacked alternately at the inner level.\n\n If axis is 1 or \'columns\' the result will be a DataFrame.\n It will have two columns namely \'self\' and \'other\'.\n\n See Also\n --------\n DataFrame.compare : Compare with another DataFrame and show differences.\n\n Notes\n -----\n Matching NaNs will not appear as a difference.\n\n Examples\n --------\n >>> s1 = pd.Series(["a", "b", "c", "d", "e"])\n >>> s2 = pd.Series(["a", "a", "c", "b", "e"])\n\n Align the differences on columns\n\n >>> s1.compare(s2)\n self other\n 1 b a\n 3 d b\n\n Stack the differences on indices\n\n >>> s1.compare(s2, align_axis=0)\n 1 self b\n other a\n 3 self d\n other b\n dtype: object\n\n Keep all original rows\n\n >>> s1.compare(s2, keep_shape=True)\n self other\n 0 NaN NaN\n 1 b a\n 2 NaN NaN\n 3 d b\n 4 NaN NaN\n\n Keep all original rows and also all original values\n\n >>> s1.compare(s2, keep_shape=True, keep_equal=True)\n self other\n 0 a a\n 1 b a\n 2 c c\n 3 d b\n 4 e e\n '), klass=_shared_doc_kwargs['klass']) def compare(self, other: Series, align_axis: Axis=1, keep_shape: bool=False, keep_equal: bool=False, result_names: Suffixes=('self', 'other')) -> DataFrame | Series: return super().compare(other=other, align_axis=align_axis, keep_shape=keep_shape, keep_equal=keep_equal, result_names=result_names) def combine(self, other: Series | Hashable, func: Callable[[Hashable, Hashable], Hashable], fill_value: Hashable | None=None) -> Series: if fill_value is None: fill_value = na_value_for_dtype(self.dtype, compat=False) if isinstance(other, Series): new_index = self.index.union(other.index) new_name = ops.get_op_result_name(self, other) new_values = np.empty(len(new_index), dtype=object) with np.errstate(all='ignore'): for (i, idx) in enumerate(new_index): lv = self.get(idx, fill_value) rv = other.get(idx, fill_value) new_values[i] = func(lv, rv) else: new_index = self.index new_values = np.empty(len(new_index), dtype=object) with np.errstate(all='ignore'): new_values[:] = [func(lv, other) for lv in self._values] new_name = self.name npvalues = lib.maybe_convert_objects(new_values, try_float=False) same_dtype = isinstance(self.dtype, (StringDtype, CategoricalDtype)) res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=same_dtype) return self._constructor(res_values, index=new_index, name=new_name, copy=False) def combine_first(self, other) -> Series: from pandas.core.reshape.concat import concat if self.dtype == other.dtype: if self.index.equals(other.index): return self.mask(self.isna(), other) elif self._can_hold_na and (not isinstance(self.dtype, SparseDtype)): (this, other) = self.align(other, join='outer') return this.mask(this.isna(), other) new_index = self.index.union(other.index) this = self keep_other = other.index.difference(this.index[notna(this)]) keep_this = this.index.difference(keep_other) this = this.reindex(keep_this) other = other.reindex(keep_other) if this.dtype.kind == 'M' and other.dtype.kind != 'M': other = to_datetime(other) combined = concat([this, other]) combined = combined.reindex(new_index) return combined.__finalize__(self, method='combine_first') def update(self, other: Series | Sequence | Mapping) -> None: if not PYPY: if sys.getrefcount(self) <= REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel=2) if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask=mask, new=other) @overload def sort_values(self, *, axis: Axis=..., ascending: bool | Sequence[bool]=..., inplace: Literal[False]=..., kind: SortKind=..., na_position: NaPosition=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> Series: ... @overload def sort_values(self, *, axis: Axis=..., ascending: bool | Sequence[bool]=..., inplace: Literal[True], kind: SortKind=..., na_position: NaPosition=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> None: ... @overload def sort_values(self, *, axis: Axis=..., ascending: bool | Sequence[bool]=..., inplace: bool=..., kind: SortKind=..., na_position: NaPosition=..., ignore_index: bool=..., key: ValueKeyFunc=...) -> Series | None: ... def sort_values(self, *, axis: Axis=0, ascending: bool | Sequence[bool]=True, inplace: bool=False, kind: SortKind='quicksort', na_position: NaPosition='last', ignore_index: bool=False, key: ValueKeyFunc | None=None) -> Series | None: inplace = validate_bool_kwarg(inplace, 'inplace') self._get_axis_number(axis) if is_list_like(ascending): ascending = cast(Sequence[bool], ascending) if len(ascending) != 1: raise ValueError(f'Length of ascending ({len(ascending)}) must be 1 for Series') ascending = ascending[0] ascending = validate_ascending(ascending) if na_position not in ['first', 'last']: raise ValueError(f'invalid na_position: {na_position}') if key: values_to_sort = cast(Series, ensure_key_mapped(self, key))._values else: values_to_sort = self._values sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) if is_range_indexer(sorted_index, len(sorted_index)): if inplace: return self._update_inplace(self) return self.copy(deep=False) result = self._constructor(self._values[sorted_index], index=self.index[sorted_index], copy=False) if ignore_index: result.index = default_index(len(sorted_index)) if not inplace: return result.__finalize__(self, method='sort_values') self._update_inplace(result) return None @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: Literal[True], kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> None: ... @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: Literal[False]=..., kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> Series: ... @overload def sort_index(self, *, axis: Axis=..., level: IndexLabel=..., ascending: bool | Sequence[bool]=..., inplace: bool=..., kind: SortKind=..., na_position: NaPosition=..., sort_remaining: bool=..., ignore_index: bool=..., key: IndexKeyFunc=...) -> Series | None: ... def sort_index(self, *, axis: Axis=0, level: IndexLabel | None=None, ascending: bool | Sequence[bool]=True, inplace: bool=False, kind: SortKind='quicksort', na_position: NaPosition='last', sort_remaining: bool=True, ignore_index: bool=False, key: IndexKeyFunc | None=None) -> Series | None: return super().sort_index(axis=axis, level=level, ascending=ascending, inplace=inplace, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ignore_index=ignore_index, key=key) def argsort(self, axis: Axis=0, kind: SortKind='quicksort', order: None=None, stable: None=None) -> Series: if axis != -1: self._get_axis_number(axis) result = self.array.argsort(kind=kind) res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp, copy=False) return res.__finalize__(self, method='argsort') def nlargest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series: return selectn.SelectNSeries(self, n=n, keep=keep).nlargest() def nsmallest(self, n: int=5, keep: Literal['first', 'last', 'all']='first') -> Series: return selectn.SelectNSeries(self, n=n, keep=keep).nsmallest() def swaplevel(self, i: Level=-2, j: Level=-1, copy: bool | lib.NoDefault=lib.no_default) -> Series: self._check_copy_deprecation(copy) assert isinstance(self.index, MultiIndex) result = self.copy(deep=False) result.index = self.index.swaplevel(i, j) return result def reorder_levels(self, order: Sequence[Level]) -> Series: if not isinstance(self.index, MultiIndex): raise Exception('Can only reorder levels on a hierarchical axis.') result = self.copy(deep=False) assert isinstance(result.index, MultiIndex) result.index = result.index.reorder_levels(order) return result def explode(self, ignore_index: bool=False) -> Series: if isinstance(self.dtype, ExtensionDtype): (values, counts) = self._values._explode() elif len(self) and is_object_dtype(self.dtype): (values, counts) = reshape.explode(np.asarray(self._values)) else: result = self.copy() return result.reset_index(drop=True) if ignore_index else result if ignore_index: index: Index = default_index(len(values)) else: index = self.index.repeat(counts) return self._constructor(values, index=index, name=self.name, copy=False) def unstack(self, level: IndexLabel=-1, fill_value: Hashable | None=None, sort: bool=True) -> DataFrame: from pandas.core.reshape.reshape import unstack return unstack(self, level, fill_value, sort) def map(self, arg: Callable | Mapping | Series, na_action: Literal['ignore'] | None=None) -> Series: new_values = self._map_values(arg, na_action=na_action) return self._constructor(new_values, index=self.index, copy=False).__finalize__(self, method='map') def _gotitem(self, key, ndim, subset=None) -> Self: return self _agg_see_also_doc = dedent('\n See Also\n --------\n Series.apply : Invoke function on a Series.\n Series.transform : Transform function producing a Series with like indexes.\n ') _agg_examples_doc = dedent("\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n dtype: int64\n\n >>> s.agg('min')\n 1\n\n >>> s.agg(['min', 'max'])\n min 1\n max 4\n dtype: int64\n ") @doc(_shared_docs['aggregate'], klass=_shared_doc_kwargs['klass'], axis=_shared_doc_kwargs['axis'], see_also=_agg_see_also_doc, examples=_agg_examples_doc) def aggregate(self, func=None, axis: Axis=0, *args, **kwargs): self._get_axis_number(axis) if func is None: func = dict(kwargs.items()) op = SeriesApply(self, func, args=args, kwargs=kwargs) result = op.agg() return result agg = aggregate @doc(_shared_docs['transform'], klass=_shared_doc_kwargs['klass'], axis=_shared_doc_kwargs['axis']) def transform(self, func: AggFuncType, axis: Axis=0, *args, **kwargs) -> DataFrame | Series: self._get_axis_number(axis) ser = self.copy(deep=False) result = SeriesApply(ser, func=func, args=args, kwargs=kwargs).transform() return result def apply(self, func: AggFuncType, args: tuple[Any, ...]=(), *, by_row: Literal[False, 'compat']='compat', **kwargs) -> DataFrame | Series: return SeriesApply(self, func, by_row=by_row, args=args, kwargs=kwargs).apply() def _reindex_indexer(self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None) -> Series: if indexer is None and (new_index is None or new_index.names == self.index.names): return self.copy(deep=False) new_values = algorithms.take_nd(self._values, indexer, allow_fill=True, fill_value=None) return self._constructor(new_values, index=new_index, copy=False) def _needs_reindex_multi(self, axes, method, level) -> bool: return False @overload def rename(self, index: Renamer | Hashable | None=..., *, axis: Axis | None=..., copy: bool | lib.NoDefault=..., inplace: Literal[True], level: Level | None=..., errors: IgnoreRaise=...) -> None: ... @overload def rename(self, index: Renamer | Hashable | None=..., *, axis: Axis | None=..., copy: bool | lib.NoDefault=..., inplace: Literal[False]=..., level: Level | None=..., errors: IgnoreRaise=...) -> Series: ... @overload def rename(self, index: Renamer | Hashable | None=..., *, axis: Axis | None=..., copy: bool | lib.NoDefault=..., inplace: bool=..., level: Level | None=..., errors: IgnoreRaise=...) -> Series | None: ... def rename(self, index: Renamer | Hashable | None=None, *, axis: Axis | None=None, copy: bool | lib.NoDefault=lib.no_default, inplace: bool=False, level: Level | None=None, errors: IgnoreRaise='ignore') -> Series | None: self._check_copy_deprecation(copy) if axis is not None: axis = self._get_axis_number(axis) if callable(index) or is_dict_like(index): return super()._rename(index, inplace=inplace, level=level, errors=errors) else: return self._set_name(index, inplace=inplace) @Appender("\n Examples\n --------\n >>> s = pd.Series([1, 2, 3])\n >>> s\n 0 1\n 1 2\n 2 3\n dtype: int64\n\n >>> s.set_axis(['a', 'b', 'c'], axis=0)\n a 1\n b 2\n c 3\n dtype: int64\n ") @Substitution(klass=_shared_doc_kwargs['klass'], axes_single_arg=_shared_doc_kwargs['axes_single_arg'], extended_summary_sub='', axis_description_sub='', see_also_sub='') @Appender(NDFrame.set_axis.__doc__) def set_axis(self, labels, *, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default) -> Series: return super().set_axis(labels, axis=axis, copy=copy) @doc(NDFrame.reindex, klass=_shared_doc_kwargs['klass'], optional_reindex=_shared_doc_kwargs['optional_reindex']) def reindex(self, index=None, *, axis: Axis | None=None, method: ReindexMethod | None=None, copy: bool | lib.NoDefault=lib.no_default, level: Level | None=None, fill_value: Scalar | None=None, limit: int | None=None, tolerance=None) -> Series: return super().reindex(index=index, method=method, level=level, fill_value=fill_value, limit=limit, tolerance=tolerance, copy=copy) @overload def rename_axis(self, mapper: IndexLabel | lib.NoDefault=..., *, index=..., axis: Axis=..., copy: bool | lib.NoDefault=..., inplace: Literal[True]) -> None: ... @overload def rename_axis(self, mapper: IndexLabel | lib.NoDefault=..., *, index=..., axis: Axis=..., copy: bool | lib.NoDefault=..., inplace: Literal[False]=...) -> Self: ... @overload def rename_axis(self, mapper: IndexLabel | lib.NoDefault=..., *, index=..., axis: Axis=..., copy: bool | lib.NoDefault=..., inplace: bool=...) -> Self | None: ... def rename_axis(self, mapper: IndexLabel | lib.NoDefault=lib.no_default, *, index=lib.no_default, axis: Axis=0, copy: bool | lib.NoDefault=lib.no_default, inplace: bool=False) -> Self | None: return super().rename_axis(mapper=mapper, index=index, axis=axis, inplace=inplace, copy=copy) @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level | None=..., inplace: Literal[True], errors: IgnoreRaise=...) -> None: ... @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level | None=..., inplace: Literal[False]=..., errors: IgnoreRaise=...) -> Series: ... @overload def drop(self, labels: IndexLabel | ListLike=..., *, axis: Axis=..., index: IndexLabel | ListLike=..., columns: IndexLabel | ListLike=..., level: Level | None=..., inplace: bool=..., errors: IgnoreRaise=...) -> Series | None: ... def drop(self, labels: IndexLabel | ListLike=None, *, axis: Axis=0, index: IndexLabel | ListLike=None, columns: IndexLabel | ListLike=None, level: Level | None=None, inplace: bool=False, errors: IgnoreRaise='raise') -> Series | None: return super().drop(labels=labels, axis=axis, index=index, columns=columns, level=level, inplace=inplace, errors=errors) def pop(self, item: Hashable) -> Any: return super().pop(item=item) @doc(INFO_DOCSTRING, **series_sub_kwargs) def info(self, verbose: bool | None=None, buf: IO[str] | None=None, max_cols: int | None=None, memory_usage: bool | str | None=None, show_counts: bool=True) -> None: return SeriesInfo(self, memory_usage).render(buf=buf, max_cols=max_cols, verbose=verbose, show_counts=show_counts) def memory_usage(self, index: bool=True, deep: bool=False) -> int: v = self._memory_usage(deep=deep) if index: v += self.index.memory_usage(deep=deep) return v def isin(self, values) -> Series: result = algorithms.isin(self._values, values) return self._constructor(result, index=self.index, copy=False).__finalize__(self, method='isin') def between(self, left, right, inclusive: Literal['both', 'neither', 'left', 'right']='both') -> Series: if inclusive == 'both': lmask = self >= left rmask = self <= right elif inclusive == 'left': lmask = self >= left rmask = self < right elif inclusive == 'right': lmask = self > left rmask = self <= right elif inclusive == 'neither': lmask = self > left rmask = self < right else: raise ValueError("Inclusive has to be either string of 'both','left', 'right', or 'neither'.") return lmask & rmask def case_when(self, caselist: list[tuple[ArrayLike | Callable[[Series], Series | np.ndarray | Sequence[bool]], ArrayLike | Scalar | Callable[[Series], Series | np.ndarray]],]) -> Series: if not isinstance(caselist, list): raise TypeError(f'The caselist argument should be a list; instead got {type(caselist)}') if not caselist: raise ValueError('provide at least one boolean condition, with a corresponding replacement.') for (num, entry) in enumerate(caselist): if not isinstance(entry, tuple): raise TypeError(f'Argument {num} must be a tuple; instead got {type(entry)}.') if len(entry) != 2: raise ValueError(f'Argument {num} must have length 2; a condition and replacement; instead got length {len(entry)}.') caselist = [(com.apply_if_callable(condition, self), com.apply_if_callable(replacement, self)) for (condition, replacement) in caselist] default = self.copy(deep=False) (conditions, replacements) = zip(*caselist) common_dtypes = [infer_dtype_from(arg)[0] for arg in [*replacements, default]] if len(set(common_dtypes)) > 1: common_dtype = find_common_type(common_dtypes) updated_replacements = [] for (condition, replacement) in zip(conditions, replacements): if is_scalar(replacement): replacement = construct_1d_arraylike_from_scalar(value=replacement, length=len(condition), dtype=common_dtype) elif isinstance(replacement, ABCSeries): replacement = replacement.astype(common_dtype) else: replacement = pd_array(replacement, dtype=common_dtype) updated_replacements.append(replacement) replacements = updated_replacements default = default.astype(common_dtype) counter = range(len(conditions) - 1, -1, -1) for (position, condition, replacement) in zip(counter, reversed(conditions), reversed(replacements)): try: default = default.mask(condition, other=replacement, axis=0, inplace=False, level=None) except Exception as error: raise ValueError(f'Failed to apply condition{position} and replacement{position}.') from error return default @doc(NDFrame.isna, klass=_shared_doc_kwargs['klass']) def isna(self) -> Series: return NDFrame.isna(self) @doc(NDFrame.isna, klass=_shared_doc_kwargs['klass']) def isnull(self) -> Series: return super().isnull() @doc(NDFrame.notna, klass=_shared_doc_kwargs['klass']) def notna(self) -> Series: return super().notna() @doc(NDFrame.notna, klass=_shared_doc_kwargs['klass']) def notnull(self) -> Series: return super().notnull() @overload def dropna(self, *, axis: Axis=..., inplace: Literal[False]=..., how: AnyAll | None=..., ignore_index: bool=...) -> Series: ... @overload def dropna(self, *, axis: Axis=..., inplace: Literal[True], how: AnyAll | None=..., ignore_index: bool=...) -> None: ... def dropna(self, *, axis: Axis=0, inplace: bool=False, how: AnyAll | None=None, ignore_index: bool=False) -> Series | None: inplace = validate_bool_kwarg(inplace, 'inplace') ignore_index = validate_bool_kwarg(ignore_index, 'ignore_index') self._get_axis_number(axis or 0) if self._can_hold_na: result = remove_na_arraylike(self) elif not inplace: result = self.copy(deep=False) else: result = self if ignore_index: result.index = default_index(len(result)) if inplace: return self._update_inplace(result) else: return result def to_timestamp(self, freq: Frequency | None=None, how: Literal['s', 'e', 'start', 'end']='start', copy: bool | lib.NoDefault=lib.no_default) -> Series: self._check_copy_deprecation(copy) if not isinstance(self.index, PeriodIndex): raise TypeError(f'unsupported Type {type(self.index).__name__}') new_obj = self.copy(deep=False) new_index = self.index.to_timestamp(freq=freq, how=how) setattr(new_obj, 'index', new_index) return new_obj def to_period(self, freq: str | None=None, copy: bool | lib.NoDefault=lib.no_default) -> Series: self._check_copy_deprecation(copy) if not isinstance(self.index, DatetimeIndex): raise TypeError(f'unsupported Type {type(self.index).__name__}') new_obj = self.copy(deep=False) new_index = self.index.to_period(freq=freq) setattr(new_obj, 'index', new_index) return new_obj _AXIS_ORDERS: list[Literal['index', 'columns']] = ['index'] _AXIS_LEN = len(_AXIS_ORDERS) _info_axis_number: Literal[0] = 0 _info_axis_name: Literal['index'] = 'index' index = properties.AxisProperty(axis=0, doc="\n The index (axis labels) of the Series.\n\n The index of a Series is used to label and identify each element of the\n underlying data. The index can be thought of as an immutable ordered set\n (technically a multi-set, as it may contain duplicate labels), and is\n used to index and align data in pandas.\n\n Returns\n -------\n Index\n The index labels of the Series.\n\n See Also\n --------\n Series.reindex : Conform Series to new index.\n Index : The base pandas index type.\n\n Notes\n -----\n For more information on pandas indexing, see the `indexing user guide\n `__.\n\n Examples\n --------\n To create a Series with a custom index and view the index labels:\n\n >>> cities = ['Kolkata', 'Chicago', 'Toronto', 'Lisbon']\n >>> populations = [14.85, 2.71, 2.93, 0.51]\n >>> city_series = pd.Series(populations, index=cities)\n >>> city_series.index\n Index(['Kolkata', 'Chicago', 'Toronto', 'Lisbon'], dtype='object')\n\n To change the index labels of an existing Series:\n\n >>> city_series.index = ['KOL', 'CHI', 'TOR', 'LIS']\n >>> city_series.index\n Index(['KOL', 'CHI', 'TOR', 'LIS'], dtype='object')\n ") str = Accessor('str', StringMethods) dt = Accessor('dt', CombinedDatetimelikeProperties) cat = Accessor('cat', CategoricalAccessor) plot = Accessor('plot', pandas.plotting.PlotAccessor) sparse = Accessor('sparse', SparseAccessor) struct = Accessor('struct', StructAccessor) list = Accessor('list', ListAccessor) hist = pandas.plotting.hist_series def _cmp_method(self, other, op): res_name = ops.get_op_result_name(self, other) if isinstance(other, Series) and (not self._indexed_same(other)): raise ValueError('Can only compare identically-labeled Series objects') lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.comparison_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _logical_method(self, other, op): res_name = ops.get_op_result_name(self, other) (self, other) = self._align_for_op(other, align_asobject=True) lvalues = self._values rvalues = extract_array(other, extract_numpy=True, extract_range=True) res_values = ops.logical_op(lvalues, rvalues, op) return self._construct_result(res_values, name=res_name) def _arith_method(self, other, op): (self, other) = self._align_for_op(other) return base.IndexOpsMixin._arith_method(self, other, op) def _align_for_op(self, right, align_asobject: bool=False): left = self if isinstance(right, Series): if not left.index.equals(right.index): if align_asobject: if left.dtype not in (object, np.bool_) or right.dtype not in (object, np.bool_): pass else: left = left.astype(object) right = right.astype(object) (left, right) = left.align(right) return (left, right) def _binop(self, other: Series, func, level=None, fill_value=None) -> Series: this = self if not self.index.equals(other.index): (this, other) = self.align(other, level=level, join='outer') (this_vals, other_vals) = ops.fill_binop(this._values, other._values, fill_value) with np.errstate(all='ignore'): result = func(this_vals, other_vals) name = ops.get_op_result_name(self, other) out = this._construct_result(result, name) return cast(Series, out) def _construct_result(self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable) -> Series | tuple[Series, Series]: if isinstance(result, tuple): res1 = self._construct_result(result[0], name=name) res2 = self._construct_result(result[1], name=name) assert isinstance(res1, Series) assert isinstance(res2, Series) return (res1, res2) dtype = getattr(result, 'dtype', None) out = self._constructor(result, index=self.index, dtype=dtype, copy=False) out = out.__finalize__(self) out.name = name return out def _flex_method(self, other, op, *, level=None, fill_value=None, axis: Axis=0): if axis is not None: self._get_axis_number(axis) res_name = ops.get_op_result_name(self, other) if isinstance(other, Series): return self._binop(other, op, level=level, fill_value=fill_value) elif isinstance(other, (np.ndarray, list, tuple)): if len(other) != len(self): raise ValueError('Lengths must be equal') other = self._constructor(other, self.index, copy=False) result = self._binop(other, op, level=level, fill_value=fill_value) result._name = res_name return result else: if fill_value is not None: if isna(other): return op(self, fill_value) self = self.fillna(fill_value) return op(self, other) def eq(self, other, level: Level | None=None, fill_value: float | None=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.eq, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('ne', 'series')) def ne(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.ne, level=level, fill_value=fill_value, axis=axis) def le(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.le, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('lt', 'series')) def lt(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.lt, level=level, fill_value=fill_value, axis=axis) def ge(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.ge, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('gt', 'series')) def gt(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.gt, level=level, fill_value=fill_value, axis=axis) def add(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.add, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('radd', 'series')) def radd(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.radd, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('sub', 'series')) def sub(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.sub, level=level, fill_value=fill_value, axis=axis) subtract = sub @Appender(ops.make_flex_doc('rsub', 'series')) def rsub(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.rsub, level=level, fill_value=fill_value, axis=axis) def mul(self, other, level: Level | None=None, fill_value: float | None=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.mul, level=level, fill_value=fill_value, axis=axis) multiply = mul @Appender(ops.make_flex_doc('rmul', 'series')) def rmul(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.rmul, level=level, fill_value=fill_value, axis=axis) def truediv(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.truediv, level=level, fill_value=fill_value, axis=axis) div = truediv divide = truediv @Appender(ops.make_flex_doc('rtruediv', 'series')) def rtruediv(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.rtruediv, level=level, fill_value=fill_value, axis=axis) rdiv = rtruediv @Appender(ops.make_flex_doc('floordiv', 'series')) def floordiv(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.floordiv, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('rfloordiv', 'series')) def rfloordiv(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.rfloordiv, level=level, fill_value=fill_value, axis=axis) def mod(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.mod, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('rmod', 'series')) def rmod(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.rmod, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('pow', 'series')) def pow(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, operator.pow, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('rpow', 'series')) def rpow(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.rpow, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('divmod', 'series')) def divmod(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, divmod, level=level, fill_value=fill_value, axis=axis) @Appender(ops.make_flex_doc('rdivmod', 'series')) def rdivmod(self, other, level=None, fill_value=None, axis: Axis=0) -> Series: return self._flex_method(other, roperator.rdivmod, level=level, fill_value=fill_value, axis=axis) def _reduce(self, op, name: str, *, axis: Axis=0, skipna: bool=True, numeric_only: bool=False, filter_type=None, **kwds): delegate = self._values if axis is not None: self._get_axis_number(axis) if isinstance(delegate, ExtensionArray): return delegate._reduce(name, skipna=skipna, **kwds) else: if numeric_only and self.dtype.kind not in 'iufcb': kwd_name = 'numeric_only' if name in ['any', 'all']: kwd_name = 'bool_only' raise TypeError(f'Series.{name} does not allow {kwd_name}={numeric_only} with non-numeric dtypes.') return op(delegate, skipna=skipna, **kwds) @Appender(make_doc('any', ndim=1)) def any(self, *, axis: Axis=0, bool_only: bool=False, skipna: bool=True, **kwargs) -> bool: nv.validate_logical_func((), kwargs, fname='any') validate_bool_kwarg(skipna, 'skipna', none_allowed=False) return self._reduce(nanops.nanany, name='any', axis=axis, numeric_only=bool_only, skipna=skipna, filter_type='bool') @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='all') @Appender(make_doc('all', ndim=1)) def all(self, axis: Axis=0, bool_only: bool=False, skipna: bool=True, **kwargs) -> bool: nv.validate_logical_func((), kwargs, fname='all') validate_bool_kwarg(skipna, 'skipna', none_allowed=False) return self._reduce(nanops.nanall, name='all', axis=axis, numeric_only=bool_only, skipna=skipna, filter_type='bool') @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='min') def min(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs): return NDFrame.min(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='max') def max(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs): return NDFrame.max(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sum') def sum(self, axis: Axis | None=None, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs): return NDFrame.sum(self, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='prod') @doc(make_doc('prod', ndim=1)) def prod(self, axis: Axis | None=None, skipna: bool=True, numeric_only: bool=False, min_count: int=0, **kwargs): return NDFrame.prod(self, axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='mean') def mean(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Any: return NDFrame.mean(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='median') def median(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs) -> Any: return NDFrame.median(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='sem') @doc(make_doc('sem', ndim=1)) def sem(self, axis: Axis | None=None, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs): return NDFrame.sem(self, axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='var') def var(self, axis: Axis | None=None, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs): return NDFrame.var(self, axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='std') @doc(make_doc('std', ndim=1)) def std(self, axis: Axis | None=None, skipna: bool=True, ddof: int=1, numeric_only: bool=False, **kwargs): return NDFrame.std(self, axis=axis, skipna=skipna, ddof=ddof, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='skew') @doc(make_doc('skew', ndim=1)) def skew(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs): return NDFrame.skew(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) @deprecate_nonkeyword_arguments(version='4.0', allowed_args=['self'], name='kurt') def kurt(self, axis: Axis | None=0, skipna: bool=True, numeric_only: bool=False, **kwargs): return NDFrame.kurt(self, axis=axis, skipna=skipna, numeric_only=numeric_only, **kwargs) kurtosis = kurt product = prod @doc(make_doc('cummin', ndim=1)) def cummin(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return NDFrame.cummin(self, axis, skipna, *args, **kwargs) @doc(make_doc('cummax', ndim=1)) def cummax(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return NDFrame.cummax(self, axis, skipna, *args, **kwargs) @doc(make_doc('cumsum', ndim=1)) def cumsum(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return NDFrame.cumsum(self, axis, skipna, *args, **kwargs) @doc(make_doc('cumprod', 1)) def cumprod(self, axis: Axis=0, skipna: bool=True, *args, **kwargs) -> Self: return NDFrame.cumprod(self, axis, skipna, *args, **kwargs) # File: pandas-main/pandas/core/shared_docs.py from __future__ import annotations _shared_docs: dict[str, str] = {} _shared_docs['aggregate'] = "\nAggregate using one or more operations over the specified axis.\n\nParameters\n----------\nfunc : function, str, list or dict\n Function to use for aggregating the data. If a function, must either\n work when passed a {klass} or when passed to {klass}.apply.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list of functions and/or function names, e.g. ``[np.sum, 'mean']``\n - dict of axis labels -> functions, function names or list of such.\n{axis}\n*args\n Positional arguments to pass to `func`.\n**kwargs\n Keyword arguments to pass to `func`.\n\nReturns\n-------\nscalar, Series or DataFrame\n\n The return can be:\n\n * scalar : when Series.agg is called with single function\n * Series : when DataFrame.agg is called with a single function\n * DataFrame : when DataFrame.agg is called with several functions\n{see_also}\nNotes\n-----\nThe aggregation operations are always performed over an axis, either the\nindex (default) or the column axis. This behavior is different from\n`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,\n`var`), where the default is to compute the aggregation of the flattened\narray, e.g., ``numpy.mean(arr_2d)`` as opposed to\n``numpy.mean(arr_2d, axis=0)``.\n\n`agg` is an alias for `aggregate`. Use the alias.\n\nFunctions that mutate the passed object can produce unexpected\nbehavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\nfor more details.\n\nA passed user-defined-function will be passed a Series for evaluation.\n\nIf ``func`` defines an index relabeling, ``axis`` must be ``0`` or ``index``.\n{examples}" _shared_docs['compare'] = "\nCompare to another {klass} and show the differences.\n\nParameters\n----------\nother : {klass}\n Object to compare with.\n\nalign_axis : {{0 or 'index', 1 or 'columns'}}, default 1\n Determine which axis to align the comparison on.\n\n * 0, or 'index' : Resulting differences are stacked vertically\n with rows drawn alternately from self and other.\n * 1, or 'columns' : Resulting differences are aligned horizontally\n with columns drawn alternately from self and other.\n\nkeep_shape : bool, default False\n If true, all rows and columns are kept.\n Otherwise, only the ones with different values are kept.\n\nkeep_equal : bool, default False\n If true, the result keeps values that are equal.\n Otherwise, equal values are shown as NaNs.\n\nresult_names : tuple, default ('self', 'other')\n Set the dataframes names in the comparison.\n\n .. versionadded:: 1.5.0\n" _shared_docs['groupby'] = '\nGroup %(klass)s using a mapper or by a Series of columns.\n\nA groupby operation involves some combination of splitting the\nobject, applying a function, and combining the results. This can be\nused to group large amounts of data and compute operations on these\ngroups.\n\nParameters\n----------\nby : mapping, function, label, pd.Grouper or list of such\n Used to determine the groups for the groupby.\n If ``by`` is a function, it\'s called on each value of the object\'s\n index. If a dict or Series is passed, the Series or dict VALUES\n will be used to determine the groups (the Series\' values are first\n aligned; see ``.align()`` method). If a list or ndarray of length\n equal to the selected axis is passed (see the `groupby user guide\n `_),\n the values are used as-is to determine the groups. A label or list\n of labels may be passed to group by the columns in ``self``.\n Notice that a tuple is interpreted as a (single) key.\nlevel : int, level name, or sequence of such, default None\n If the axis is a MultiIndex (hierarchical), group by a particular\n level or levels. Do not specify both ``by`` and ``level``.\nas_index : bool, default True\n Return object with group labels as the\n index. Only relevant for DataFrame input. as_index=False is\n effectively "SQL-style" grouped output. This argument has no effect\n on filtrations (see the `filtrations in the user guide\n `_),\n such as ``head()``, ``tail()``, ``nth()`` and in transformations\n (see the `transformations in the user guide\n `_).\nsort : bool, default True\n Sort group keys. Get better performance by turning this off.\n Note this does not influence the order of observations within each\n group. Groupby preserves the order of rows within each group. If False,\n the groups will appear in the same order as they did in the original DataFrame.\n This argument has no effect on filtrations (see the `filtrations in the user guide\n `_),\n such as ``head()``, ``tail()``, ``nth()`` and in transformations\n (see the `transformations in the user guide\n `_).\n\n .. versionchanged:: 2.0.0\n\n Specifying ``sort=False`` with an ordered categorical grouper will no\n longer sort the values.\n\ngroup_keys : bool, default True\n When calling apply and the ``by`` argument produces a like-indexed\n (i.e. :ref:`a transform `) result, add group keys to\n index to identify pieces. By default group keys are not included\n when the result\'s index (and column) labels match the inputs, and\n are included otherwise.\n\n .. versionchanged:: 1.5.0\n\n Warns that ``group_keys`` will no longer be ignored when the\n result from ``apply`` is a like-indexed Series or DataFrame.\n Specify ``group_keys`` explicitly to include the group keys or\n not.\n\n .. versionchanged:: 2.0.0\n\n ``group_keys`` now defaults to ``True``.\n\nobserved : bool, default True\n This only applies if any of the groupers are Categoricals.\n If True: only show observed values for categorical groupers.\n If False: show all values for categorical groupers.\n\n .. versionchanged:: 3.0.0\n\n The default value is now ``True``.\n\ndropna : bool, default True\n If True, and if group keys contain NA values, NA values together\n with row/column will be dropped.\n If False, NA values will also be treated as the key in groups.\n\nReturns\n-------\npandas.api.typing.%(klass)sGroupBy\n Returns a groupby object that contains information about the groups.\n\nSee Also\n--------\nresample : Convenience method for frequency conversion and resampling\n of time series.\n\nNotes\n-----\nSee the `user guide\n`__ for more\ndetailed usage and examples, including splitting an object into groups,\niterating through groups, selecting a group, aggregation, and more.\n\nThe implementation of groupby is hash-based, meaning in particular that\nobjects that compare as equal will be considered to be in the same group.\nAn exception to this is that pandas has special handling of NA values:\nany NA values will be collapsed to a single group, regardless of how\nthey compare. See the user guide linked above for more details.\n' _shared_docs['transform'] = '\nCall ``func`` on self producing a {klass} with the same axis shape as self.\n\nParameters\n----------\nfunc : function, str, list-like or dict-like\n Function to use for transforming the data. If a function, must either\n work when passed a {klass} or when passed to {klass}.apply. If func\n is both list-like and dict-like, dict-like behavior takes precedence.\n\n Accepted combinations are:\n\n - function\n - string function name\n - list-like of functions and/or function names, e.g. ``[np.exp, \'sqrt\']``\n - dict-like of axis labels -> functions, function names or list-like of such.\n{axis}\n*args\n Positional arguments to pass to `func`.\n**kwargs\n Keyword arguments to pass to `func`.\n\nReturns\n-------\n{klass}\n A {klass} that must have the same length as self.\n\nRaises\n------\nValueError : If the returned {klass} has a different length than self.\n\nSee Also\n--------\n{klass}.agg : Only perform aggregating type operations.\n{klass}.apply : Invoke function on a {klass}.\n\nNotes\n-----\nFunctions that mutate the passed object can produce unexpected\nbehavior or errors and are not supported. See :ref:`gotchas.udf-mutation`\nfor more details.\n\nExamples\n--------\n>>> df = pd.DataFrame({{\'A\': range(3), \'B\': range(1, 4)}})\n>>> df\n A B\n0 0 1\n1 1 2\n2 2 3\n>>> df.transform(lambda x: x + 1)\n A B\n0 1 2\n1 2 3\n2 3 4\n\nEven though the resulting {klass} must have the same length as the\ninput {klass}, it is possible to provide several input functions:\n\n>>> s = pd.Series(range(3))\n>>> s\n0 0\n1 1\n2 2\ndtype: int64\n>>> s.transform([np.sqrt, np.exp])\n sqrt exp\n0 0.000000 1.000000\n1 1.000000 2.718282\n2 1.414214 7.389056\n\nYou can call transform on a GroupBy object:\n\n>>> df = pd.DataFrame({{\n... "Date": [\n... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05",\n... "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05"],\n... "Data": [5, 8, 6, 1, 50, 100, 60, 120],\n... }})\n>>> df\n Date Data\n0 2015-05-08 5\n1 2015-05-07 8\n2 2015-05-06 6\n3 2015-05-05 1\n4 2015-05-08 50\n5 2015-05-07 100\n6 2015-05-06 60\n7 2015-05-05 120\n>>> df.groupby(\'Date\')[\'Data\'].transform(\'sum\')\n0 55\n1 108\n2 66\n3 121\n4 55\n5 108\n6 66\n7 121\nName: Data, dtype: int64\n\n>>> df = pd.DataFrame({{\n... "c": [1, 1, 1, 2, 2, 2, 2],\n... "type": ["m", "n", "o", "m", "m", "n", "n"]\n... }})\n>>> df\n c type\n0 1 m\n1 1 n\n2 1 o\n3 2 m\n4 2 m\n5 2 n\n6 2 n\n>>> df[\'size\'] = df.groupby(\'c\')[\'type\'].transform(len)\n>>> df\n c type size\n0 1 m 3\n1 1 n 3\n2 1 o 3\n3 2 m 4\n4 2 m 4\n5 2 n 4\n6 2 n 4\n' _shared_docs['storage_options'] = 'storage_options : dict, optional\n Extra options that make sense for a particular storage connection, e.g.\n host, port, username, password, etc. For HTTP(S) URLs the key-value pairs\n are forwarded to ``urllib.request.Request`` as header options. For other\n URLs (e.g. starting with "s3://", and "gcs://") the key-value pairs are\n forwarded to ``fsspec.open``. Please see ``fsspec`` and ``urllib`` for more\n details, and for more examples on storage options refer `here\n `_.' _shared_docs['compression_options'] = "compression : str or dict, default 'infer'\n For on-the-fly compression of the output data. If 'infer' and '%s' is\n path-like, then detect compression from the following extensions: '.gz',\n '.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'\n (otherwise no compression).\n Set to ``None`` for no compression.\n Can also be a dict with key ``'method'`` set\n to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and\n other key-value pairs are forwarded to\n ``zipfile.ZipFile``, ``gzip.GzipFile``,\n ``bz2.BZ2File``, ``zstandard.ZstdCompressor``, ``lzma.LZMAFile`` or\n ``tarfile.TarFile``, respectively.\n As an example, the following could be passed for faster compression and to create\n a reproducible gzip archive:\n ``compression={'method': 'gzip', 'compresslevel': 1, 'mtime': 1}``.\n\n .. versionadded:: 1.5.0\n Added support for `.tar` files." _shared_docs['decompression_options'] = "compression : str or dict, default 'infer'\n For on-the-fly decompression of on-disk data. If 'infer' and '%s' is\n path-like, then detect compression from the following extensions: '.gz',\n '.bz2', '.zip', '.xz', '.zst', '.tar', '.tar.gz', '.tar.xz' or '.tar.bz2'\n (otherwise no compression).\n If using 'zip' or 'tar', the ZIP file must contain only one data file to be read in.\n Set to ``None`` for no decompression.\n Can also be a dict with key ``'method'`` set\n to one of {``'zip'``, ``'gzip'``, ``'bz2'``, ``'zstd'``, ``'xz'``, ``'tar'``} and\n other key-value pairs are forwarded to\n ``zipfile.ZipFile``, ``gzip.GzipFile``,\n ``bz2.BZ2File``, ``zstandard.ZstdDecompressor``, ``lzma.LZMAFile`` or\n ``tarfile.TarFile``, respectively.\n As an example, the following could be passed for Zstandard decompression using a\n custom compression dictionary:\n ``compression={'method': 'zstd', 'dict_data': my_compression_dict}``.\n\n .. versionadded:: 1.5.0\n Added support for `.tar` files." _shared_docs['replace'] = "\n Replace values given in `to_replace` with `value`.\n\n Values of the {klass} are replaced with other values dynamically.\n This differs from updating with ``.loc`` or ``.iloc``, which require\n you to specify a location to update with some value.\n\n Parameters\n ----------\n to_replace : str, regex, list, dict, Series, int, float, or None\n How to find the values that will be replaced.\n\n * numeric, str or regex:\n\n - numeric: numeric values equal to `to_replace` will be\n replaced with `value`\n - str: string exactly matching `to_replace` will be replaced\n with `value`\n - regex: regexs matching `to_replace` will be replaced with\n `value`\n\n * list of str, regex, or numeric:\n\n - First, if `to_replace` and `value` are both lists, they\n **must** be the same length.\n - Second, if ``regex=True`` then all of the strings in **both**\n lists will be interpreted as regexs otherwise they will match\n directly. This doesn't matter much for `value` since there\n are only a few possible substitution regexes you can use.\n - str, regex and numeric rules apply as above.\n\n * dict:\n\n - Dicts can be used to specify different replacement values\n for different existing values. For example,\n ``{{'a': 'b', 'y': 'z'}}`` replaces the value 'a' with 'b' and\n 'y' with 'z'. To use a dict in this way, the optional `value`\n parameter should not be given.\n - For a DataFrame a dict can specify that different values\n should be replaced in different columns. For example,\n ``{{'a': 1, 'b': 'z'}}`` looks for the value 1 in column 'a'\n and the value 'z' in column 'b' and replaces these values\n with whatever is specified in `value`. The `value` parameter\n should not be ``None`` in this case. You can treat this as a\n special case of passing two lists except that you are\n specifying the column to search in.\n - For a DataFrame nested dictionaries, e.g.,\n ``{{'a': {{'b': np.nan}}}}``, are read as follows: look in column\n 'a' for the value 'b' and replace it with NaN. The optional `value`\n parameter should not be specified to use a nested dict in this\n way. You can nest regular expressions as well. Note that\n column names (the top-level dictionary keys in a nested\n dictionary) **cannot** be regular expressions.\n\n * None:\n\n - This means that the `regex` argument must be a string,\n compiled regular expression, or list, dict, ndarray or\n Series of such elements. If `value` is also ``None`` then\n this **must** be a nested dictionary or Series.\n\n See the examples section for examples of each of these.\n value : scalar, dict, list, str, regex, default None\n Value to replace any values matching `to_replace` with.\n For a DataFrame a dict of values can be used to specify which\n value to use for each column (columns not in the dict will not be\n filled). Regular expressions, strings and lists or dicts of such\n objects are also allowed.\n {inplace}\n regex : bool or same types as `to_replace`, default False\n Whether to interpret `to_replace` and/or `value` as regular\n expressions. Alternatively, this could be a regular expression or a\n list, dict, or array of regular expressions in which case\n `to_replace` must be ``None``.\n\n Returns\n -------\n {klass}\n Object after replacement.\n\n Raises\n ------\n AssertionError\n * If `regex` is not a ``bool`` and `to_replace` is not\n ``None``.\n\n TypeError\n * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``\n * If `to_replace` is a ``dict`` and `value` is not a ``list``,\n ``dict``, ``ndarray``, or ``Series``\n * If `to_replace` is ``None`` and `regex` is not compilable\n into a regular expression or is a list, dict, ndarray, or\n Series.\n * When replacing multiple ``bool`` or ``datetime64`` objects and\n the arguments to `to_replace` does not match the type of the\n value being replaced\n\n ValueError\n * If a ``list`` or an ``ndarray`` is passed to `to_replace` and\n `value` but they are not the same length.\n\n See Also\n --------\n Series.fillna : Fill NA values.\n DataFrame.fillna : Fill NA values.\n Series.where : Replace values based on boolean condition.\n DataFrame.where : Replace values based on boolean condition.\n DataFrame.map: Apply a function to a Dataframe elementwise.\n Series.map: Map values of Series according to an input mapping or function.\n Series.str.replace : Simple string replacement.\n\n Notes\n -----\n * Regex substitution is performed under the hood with ``re.sub``. The\n rules for substitution for ``re.sub`` are the same.\n * Regular expressions will only substitute on strings, meaning you\n cannot provide, for example, a regular expression matching floating\n point numbers and expect the columns in your frame that have a\n numeric dtype to be matched. However, if those floating point\n numbers *are* strings, then you can do this.\n * This method has *a lot* of options. You are encouraged to experiment\n and play with this method to gain intuition about how it works.\n * When dict is used as the `to_replace` value, it is like\n key(s) in the dict are the to_replace part and\n value(s) in the dict are the value parameter.\n\n Examples\n --------\n\n **Scalar `to_replace` and `value`**\n\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s.replace(1, 5)\n 0 5\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4],\n ... 'B': [5, 6, 7, 8, 9],\n ... 'C': ['a', 'b', 'c', 'd', 'e']}})\n >>> df.replace(0, 5)\n A B C\n 0 5 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n **List-like `to_replace`**\n\n >>> df.replace([0, 1, 2, 3], 4)\n A B C\n 0 4 5 a\n 1 4 6 b\n 2 4 7 c\n 3 4 8 d\n 4 4 9 e\n\n >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])\n A B C\n 0 4 5 a\n 1 3 6 b\n 2 2 7 c\n 3 1 8 d\n 4 4 9 e\n\n **dict-like `to_replace`**\n\n >>> df.replace({{0: 10, 1: 100}})\n A B C\n 0 10 5 a\n 1 100 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({{'A': 0, 'B': 5}}, 100)\n A B C\n 0 100 100 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 4 9 e\n\n >>> df.replace({{'A': {{0: 100, 4: 400}}}})\n A B C\n 0 100 5 a\n 1 1 6 b\n 2 2 7 c\n 3 3 8 d\n 4 400 9 e\n\n **Regular expression `to_replace`**\n\n >>> df = pd.DataFrame({{'A': ['bat', 'foo', 'bait'],\n ... 'B': ['abc', 'bar', 'xyz']}})\n >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace({{'A': r'^ba.$'}}, {{'A': 'new'}}, regex=True)\n A B\n 0 new abc\n 1 foo bar\n 2 bait xyz\n\n >>> df.replace(regex=r'^ba.$', value='new')\n A B\n 0 new abc\n 1 foo new\n 2 bait xyz\n\n >>> df.replace(regex={{r'^ba.$': 'new', 'foo': 'xyz'}})\n A B\n 0 new abc\n 1 xyz new\n 2 bait xyz\n\n >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')\n A B\n 0 new abc\n 1 new new\n 2 bait xyz\n\n Compare the behavior of ``s.replace({{'a': None}})`` and\n ``s.replace('a', None)`` to understand the peculiarities\n of the `to_replace` parameter:\n\n >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])\n\n When one uses a dict as the `to_replace` value, it is like the\n value(s) in the dict are equal to the `value` parameter.\n ``s.replace({{'a': None}})`` is equivalent to\n ``s.replace(to_replace={{'a': None}}, value=None)``:\n\n >>> s.replace({{'a': None}})\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n If ``None`` is explicitly passed for ``value``, it will be respected:\n\n >>> s.replace('a', None)\n 0 10\n 1 None\n 2 None\n 3 b\n 4 None\n dtype: object\n\n .. versionchanged:: 1.4.0\n Previously the explicit ``None`` was silently ignored.\n\n When ``regex=True``, ``value`` is not ``None`` and `to_replace` is a string,\n the replacement will be applied in all columns of the DataFrame.\n\n >>> df = pd.DataFrame({{'A': [0, 1, 2, 3, 4],\n ... 'B': ['a', 'b', 'c', 'd', 'e'],\n ... 'C': ['f', 'g', 'h', 'i', 'j']}})\n\n >>> df.replace(to_replace='^[a-g]', value='e', regex=True)\n A B C\n 0 0 e e\n 1 1 e e\n 2 2 e h\n 3 3 e i\n 4 4 e j\n\n If ``value`` is not ``None`` and `to_replace` is a dictionary, the dictionary\n keys will be the DataFrame columns that the replacement will be applied.\n\n >>> df.replace(to_replace={{'B': '^[a-c]', 'C': '^[h-j]'}}, value='e', regex=True)\n A B C\n 0 0 e f\n 1 1 e g\n 2 2 e e\n 3 3 d e\n 4 4 e e\n" # File: pandas-main/pandas/core/sorting.py """""" from __future__ import annotations import itertools from typing import TYPE_CHECKING, cast import numpy as np from pandas._libs import algos, hashtable, lib from pandas._libs.hashtable import unique_label_indices from pandas.core.dtypes.common import ensure_int64, ensure_platform_int from pandas.core.dtypes.generic import ABCMultiIndex, ABCRangeIndex from pandas.core.dtypes.missing import isna from pandas.core.construction import extract_array if TYPE_CHECKING: from collections.abc import Callable, Hashable, Sequence from pandas._typing import ArrayLike, AxisInt, IndexKeyFunc, Level, NaPosition, Shape, SortKind, npt from pandas import MultiIndex, Series from pandas.core.arrays import ExtensionArray from pandas.core.indexes.base import Index def get_indexer_indexer(target: Index, level: Level | list[Level] | None, ascending: list[bool] | bool, kind: SortKind, na_position: NaPosition, sort_remaining: bool, key: IndexKeyFunc) -> npt.NDArray[np.intp] | None: target = ensure_key_mapped(target, key, levels=level) target = target._sort_levels_monotonic() if level is not None: (_, indexer) = target.sortlevel(level, ascending=ascending, sort_remaining=sort_remaining, na_position=na_position) elif np.all(ascending) and target.is_monotonic_increasing or (not np.any(ascending) and target.is_monotonic_decreasing): return None elif isinstance(target, ABCMultiIndex): codes = [lev.codes for lev in target._get_codes_for_sorting()] indexer = lexsort_indexer(codes, orders=ascending, na_position=na_position, codes_given=True) else: indexer = nargsort(target, kind=kind, ascending=cast(bool, ascending), na_position=na_position) return indexer def get_group_index(labels, shape: Shape, sort: bool, xnull: bool) -> npt.NDArray[np.int64]: def _int64_cut_off(shape) -> int: acc = 1 for (i, mul) in enumerate(shape): acc *= int(mul) if not acc < lib.i8max: return i return len(shape) def maybe_lift(lab, size: int) -> tuple[np.ndarray, int]: return (lab + 1, size + 1) if (lab == -1).any() else (lab, size) labels = [ensure_int64(x) for x in labels] lshape = list(shape) if not xnull: for (i, (lab, size)) in enumerate(zip(labels, shape)): (labels[i], lshape[i]) = maybe_lift(lab, size) while True: nlev = _int64_cut_off(lshape) stride = np.prod(lshape[1:nlev], dtype='i8') out = stride * labels[0].astype('i8', subok=False, copy=False) for i in range(1, nlev): if lshape[i] == 0: stride = np.int64(0) else: stride //= lshape[i] out += labels[i] * stride if xnull: mask = labels[0] == -1 for lab in labels[1:nlev]: mask |= lab == -1 out[mask] = -1 if nlev == len(lshape): break (comp_ids, obs_ids) = compress_group_index(out, sort=sort) labels = [comp_ids] + labels[nlev:] lshape = [len(obs_ids)] + lshape[nlev:] return out def get_compressed_ids(labels, sizes: Shape) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ids = get_group_index(labels, sizes, sort=True, xnull=False) return compress_group_index(ids, sort=True) def is_int64_overflow_possible(shape: Shape) -> bool: the_prod = 1 for x in shape: the_prod *= int(x) return the_prod >= lib.i8max def _decons_group_index(comp_labels: npt.NDArray[np.intp], shape: Shape) -> list[npt.NDArray[np.intp]]: if is_int64_overflow_possible(shape): raise ValueError('cannot deconstruct factorized group indices!') label_list = [] factor = 1 y = np.array(0) x = comp_labels for i in reversed(range(len(shape))): labels = (x - y) % (factor * shape[i]) // factor np.putmask(labels, comp_labels < 0, -1) label_list.append(labels) y = labels * factor factor *= shape[i] return label_list[::-1] def decons_obs_group_ids(comp_ids: npt.NDArray[np.intp], obs_ids: npt.NDArray[np.intp], shape: Shape, labels: Sequence[npt.NDArray[np.signedinteger]], xnull: bool) -> list[npt.NDArray[np.intp]]: if not xnull: lift = np.fromiter(((a == -1).any() for a in labels), dtype=np.intp) arr_shape = np.asarray(shape, dtype=np.intp) + lift shape = tuple(arr_shape) if not is_int64_overflow_possible(shape): out = _decons_group_index(obs_ids, shape) return out if xnull or not lift.any() else [x - y for (x, y) in zip(out, lift)] indexer = unique_label_indices(comp_ids) return [lab[indexer].astype(np.intp, subok=False, copy=True) for lab in labels] def lexsort_indexer(keys: Sequence[ArrayLike | Index | Series], orders=None, na_position: str='last', key: Callable | None=None, codes_given: bool=False) -> npt.NDArray[np.intp]: from pandas.core.arrays import Categorical if na_position not in ['last', 'first']: raise ValueError(f'invalid na_position: {na_position}') if isinstance(orders, bool): orders = itertools.repeat(orders, len(keys)) elif orders is None: orders = itertools.repeat(True, len(keys)) else: orders = reversed(orders) labels = [] for (k, order) in zip(reversed(keys), orders): k = ensure_key_mapped(k, key) if codes_given: codes = cast(np.ndarray, k) n = codes.max() + 1 if len(codes) else 0 else: cat = Categorical(k, ordered=True) codes = cat.codes n = len(cat.categories) mask = codes == -1 if na_position == 'last' and mask.any(): codes = np.where(mask, n, codes) if not order: codes = np.where(mask, codes, n - codes - 1) labels.append(codes) return np.lexsort(labels) def nargsort(items: ArrayLike | Index | Series, kind: SortKind='quicksort', ascending: bool=True, na_position: str='last', key: Callable | None=None, mask: npt.NDArray[np.bool_] | None=None) -> npt.NDArray[np.intp]: if key is not None: items = ensure_key_mapped(items, key) return nargsort(items, kind=kind, ascending=ascending, na_position=na_position, key=None, mask=mask) if isinstance(items, ABCRangeIndex): return items.argsort(ascending=ascending) elif not isinstance(items, ABCMultiIndex): items = extract_array(items) else: raise TypeError('nargsort does not support MultiIndex. Use index.sort_values instead.') if mask is None: mask = np.asarray(isna(items)) if not isinstance(items, np.ndarray): return items.argsort(ascending=ascending, kind=kind, na_position=na_position) idx = np.arange(len(items)) non_nans = items[~mask] non_nan_idx = idx[~mask] nan_idx = np.nonzero(mask)[0] if not ascending: non_nans = non_nans[::-1] non_nan_idx = non_nan_idx[::-1] indexer = non_nan_idx[non_nans.argsort(kind=kind)] if not ascending: indexer = indexer[::-1] if na_position == 'last': indexer = np.concatenate([indexer, nan_idx]) elif na_position == 'first': indexer = np.concatenate([nan_idx, indexer]) else: raise ValueError(f'invalid na_position: {na_position}') return ensure_platform_int(indexer) def nargminmax(values: ExtensionArray, method: str, axis: AxisInt=0): assert method in {'argmax', 'argmin'} func = np.argmax if method == 'argmax' else np.argmin mask = np.asarray(isna(values)) arr_values = values._values_for_argsort() if arr_values.ndim > 1: if mask.any(): if axis == 1: zipped = zip(arr_values, mask) else: zipped = zip(arr_values.T, mask.T) return np.array([_nanargminmax(v, m, func) for (v, m) in zipped]) return func(arr_values, axis=axis) return _nanargminmax(arr_values, mask, func) def _nanargminmax(values: np.ndarray, mask: npt.NDArray[np.bool_], func) -> int: idx = np.arange(values.shape[0]) non_nans = values[~mask] non_nan_idx = idx[~mask] return non_nan_idx[func(non_nans)] def _ensure_key_mapped_multiindex(index: MultiIndex, key: Callable, level=None) -> MultiIndex: if level is not None: if isinstance(level, (str, int)): level_iter = [level] else: level_iter = level sort_levels: range | set = {index._get_level_number(lev) for lev in level_iter} else: sort_levels = range(index.nlevels) mapped = [ensure_key_mapped(index._get_level_values(level), key) if level in sort_levels else index._get_level_values(level) for level in range(index.nlevels)] return type(index).from_arrays(mapped) def ensure_key_mapped(values: ArrayLike | Index | Series, key: Callable | None, levels=None) -> ArrayLike | Index | Series: from pandas.core.indexes.api import Index if not key: return values if isinstance(values, ABCMultiIndex): return _ensure_key_mapped_multiindex(values, key, level=levels) result = key(values.copy()) if len(result) != len(values): raise ValueError('User-provided `key` function must not change the shape of the array.') try: if isinstance(values, Index): result = Index(result, tupleize_cols=False) else: type_of_values = type(values) result = type_of_values(result) except TypeError as err: raise TypeError(f'User-provided `key` function returned an invalid type {type(result)} which could not be converted to {type(values)}.') from err return result def get_indexer_dict(label_list: list[np.ndarray], keys: list[Index]) -> dict[Hashable, npt.NDArray[np.intp]]: shape = tuple((len(x) for x in keys)) group_index = get_group_index(label_list, shape, sort=True, xnull=True) if np.all(group_index == -1): return {} ngroups = (group_index.size and group_index.max()) + 1 if is_int64_overflow_possible(shape) else np.prod(shape, dtype='i8') sorter = get_group_index_sorter(group_index, ngroups) sorted_labels = [lab.take(sorter) for lab in label_list] group_index = group_index.take(sorter) return lib.indices_fast(sorter, group_index, keys, sorted_labels) def get_group_index_sorter(group_index: npt.NDArray[np.intp], ngroups: int | None=None) -> npt.NDArray[np.intp]: if ngroups is None: ngroups = 1 + group_index.max() count = len(group_index) alpha = 0.0 beta = 1.0 do_groupsort = count > 0 and alpha + beta * ngroups < count * np.log(count) if do_groupsort: (sorter, _) = algos.groupsort_indexer(ensure_platform_int(group_index), ngroups) else: sorter = group_index.argsort(kind='mergesort') return ensure_platform_int(sorter) def compress_group_index(group_index: npt.NDArray[np.int64], sort: bool=True) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: if len(group_index) and np.all(group_index[1:] >= group_index[:-1]): unique_mask = np.concatenate([group_index[:1] > -1, group_index[1:] != group_index[:-1]]) comp_ids = unique_mask.cumsum() comp_ids -= 1 obs_group_ids = group_index[unique_mask] else: size_hint = len(group_index) table = hashtable.Int64HashTable(size_hint) group_index = ensure_int64(group_index) (comp_ids, obs_group_ids) = table.get_labels_groupby(group_index) if sort and len(obs_group_ids) > 0: (obs_group_ids, comp_ids) = _reorder_by_uniques(obs_group_ids, comp_ids) return (ensure_int64(comp_ids), ensure_int64(obs_group_ids)) def _reorder_by_uniques(uniques: npt.NDArray[np.int64], labels: npt.NDArray[np.intp]) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.intp]]: sorter = uniques.argsort() reverse_indexer = np.empty(len(sorter), dtype=np.intp) reverse_indexer.put(sorter, np.arange(len(sorter))) mask = labels < 0 labels = reverse_indexer.take(labels) np.putmask(labels, mask, -1) uniques = uniques.take(sorter) return (uniques, labels) # File: pandas-main/pandas/core/strings/__init__.py """""" # File: pandas-main/pandas/core/strings/accessor.py from __future__ import annotations import codecs from functools import wraps import re from typing import TYPE_CHECKING, Literal, cast import warnings import numpy as np from pandas._libs import lib from pandas._typing import AlignJoin, DtypeObj, F, Scalar, npt from pandas.util._decorators import Appender from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ensure_object, is_bool_dtype, is_extension_array_dtype, is_integer, is_list_like, is_object_dtype, is_re from pandas.core.dtypes.dtypes import ArrowDtype, CategoricalDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCMultiIndex, ABCSeries from pandas.core.dtypes.missing import isna from pandas.core.arrays import ExtensionArray from pandas.core.base import NoNewAttributesMixin from pandas.core.construction import extract_array if TYPE_CHECKING: from collections.abc import Callable, Hashable, Iterator from pandas._typing import NpDtype from pandas import DataFrame, Index, Series _shared_docs: dict[str, str] = {} _cpython_optimized_encoders = ('utf-8', 'utf8', 'latin-1', 'latin1', 'iso-8859-1', 'mbcs', 'ascii') _cpython_optimized_decoders = _cpython_optimized_encoders + ('utf-16', 'utf-32') def forbid_nonstring_types(forbidden: list[str] | None, name: str | None=None) -> Callable[[F], F]: forbidden = [] if forbidden is None else forbidden allowed_types = {'string', 'empty', 'bytes', 'mixed', 'mixed-integer'} - set(forbidden) def _forbid_nonstring_types(func: F) -> F: func_name = func.__name__ if name is None else name @wraps(func) def wrapper(self, *args, **kwargs): if self._inferred_dtype not in allowed_types: msg = f"Cannot use .str.{func_name} with values of inferred dtype '{self._inferred_dtype}'." raise TypeError(msg) return func(self, *args, **kwargs) wrapper.__name__ = func_name return cast(F, wrapper) return _forbid_nonstring_types def _map_and_wrap(name: str | None, docstring: str | None): @forbid_nonstring_types(['bytes'], name=name) def wrapper(self): result = getattr(self._data.array, f'_str_{name}')() return self._wrap_result(result, returns_string=name not in ('isnumeric', 'isdecimal')) wrapper.__doc__ = docstring return wrapper class StringMethods(NoNewAttributesMixin): def __init__(self, data) -> None: from pandas.core.arrays.string_ import StringDtype self._inferred_dtype = self._validate(data) self._is_categorical = isinstance(data.dtype, CategoricalDtype) self._is_string = isinstance(data.dtype, StringDtype) self._data = data self._index = self._name = None if isinstance(data, ABCSeries): self._index = data.index self._name = data.name self._parent = data._values.categories if self._is_categorical else data self._orig = data self._freeze() @staticmethod def _validate(data): if isinstance(data, ABCMultiIndex): raise AttributeError('Can only use .str accessor with Index, not MultiIndex') allowed_types = ['string', 'empty', 'bytes', 'mixed', 'mixed-integer'] data = extract_array(data) values = getattr(data, 'categories', data) inferred_dtype = lib.infer_dtype(values, skipna=True) if inferred_dtype not in allowed_types: raise AttributeError('Can only use .str accessor with string values!') return inferred_dtype def __getitem__(self, key): result = self._data.array._str_getitem(key) return self._wrap_result(result) def __iter__(self) -> Iterator: raise TypeError(f"'{type(self).__name__}' object is not iterable") def _wrap_result(self, result, name=None, expand: bool | None=None, fill_value=np.nan, returns_string: bool=True, dtype=None): from pandas import Index, MultiIndex if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'): if isinstance(result, ABCDataFrame): result = result.__finalize__(self._orig, name='str') return result assert result.ndim < 3 if expand is None: expand = result.ndim != 1 elif expand is True and (not isinstance(self._orig, ABCIndex)): if isinstance(result.dtype, ArrowDtype): import pyarrow as pa from pandas.compat import pa_version_under11p0 from pandas.core.arrays.arrow.array import ArrowExtensionArray value_lengths = pa.compute.list_value_length(result._pa_array) max_len = pa.compute.max(value_lengths).as_py() min_len = pa.compute.min(value_lengths).as_py() if result._hasna: result = ArrowExtensionArray(result._pa_array.fill_null([None] * max_len)) if min_len < max_len: if not pa_version_under11p0: result = ArrowExtensionArray(pa.compute.list_slice(result._pa_array, start=0, stop=max_len, return_fixed_size_list=True)) else: all_null = np.full(max_len, fill_value=None, dtype=object) values = result.to_numpy() new_values = [] for row in values: if len(row) < max_len: nulls = all_null[:max_len - len(row)] row = np.append(row, nulls) new_values.append(row) pa_type = result._pa_array.type result = ArrowExtensionArray(pa.array(new_values, type=pa_type)) if name is None: name = range(max_len) result = pa.compute.list_flatten(result._pa_array).to_numpy().reshape(len(result), max_len) result = {label: ArrowExtensionArray(pa.array(res)) for (label, res) in zip(name, result.T)} elif is_object_dtype(result): def cons_row(x): if is_list_like(x): return x else: return [x] result = [cons_row(x) for x in result] if result and (not self._is_string): max_len = max((len(x) for x in result)) result = [x * max_len if len(x) == 0 or x[0] is np.nan else x for x in result] if not isinstance(expand, bool): raise ValueError('expand must be True or False') if expand is False: if name is None: name = getattr(result, 'name', None) if name is None: name = self._orig.name if isinstance(self._orig, ABCIndex): if is_bool_dtype(result): return result if expand: result = list(result) out: Index = MultiIndex.from_tuples(result, names=name) if out.nlevels == 1: out = out.get_level_values(0) return out else: return Index(result, name=name, dtype=dtype) else: index = self._orig.index _dtype: DtypeObj | str | None = dtype vdtype = getattr(result, 'dtype', None) if self._is_string: if is_bool_dtype(vdtype): _dtype = result.dtype elif returns_string: _dtype = self._orig.dtype else: _dtype = vdtype elif vdtype is not None: _dtype = vdtype if expand: cons = self._orig._constructor_expanddim result = cons(result, columns=name, index=index, dtype=_dtype) else: cons = self._orig._constructor result = cons(result, name=name, index=index, dtype=_dtype) result = result.__finalize__(self._orig, method='str') if name is not None and result.ndim == 1: result.name = name return result def _get_series_list(self, others): from pandas import DataFrame, Series idx = self._orig if isinstance(self._orig, ABCIndex) else self._orig.index if isinstance(others, ABCSeries): return [others] elif isinstance(others, ABCIndex): return [Series(others, index=idx, dtype=others.dtype)] elif isinstance(others, ABCDataFrame): return [others[x] for x in others] elif isinstance(others, np.ndarray) and others.ndim == 2: others = DataFrame(others, index=idx) return [others[x] for x in others] elif is_list_like(others, allow_sets=False): try: others = list(others) except TypeError: pass else: if all((isinstance(x, (ABCSeries, ABCIndex, ExtensionArray)) or (isinstance(x, np.ndarray) and x.ndim == 1) for x in others)): los: list[Series] = [] while others: los = los + self._get_series_list(others.pop(0)) return los elif all((not is_list_like(x) for x in others)): return [Series(others, index=idx)] raise TypeError('others must be Series, Index, DataFrame, np.ndarray or list-like (either containing only strings or containing only objects of type Series/Index/np.ndarray[1-dim])') @forbid_nonstring_types(['bytes', 'mixed', 'mixed-integer']) def cat(self, others=None, sep: str | None=None, na_rep=None, join: AlignJoin='left') -> str | Series | Index: from pandas import Index, Series, concat if isinstance(others, str): raise ValueError('Did you mean to supply a `sep` keyword?') if sep is None: sep = '' if isinstance(self._orig, ABCIndex): data = Series(self._orig, index=self._orig, dtype=self._orig.dtype) else: data = self._orig if others is None: data = ensure_object(data) na_mask = isna(data) if na_rep is None and na_mask.any(): return sep.join(data[~na_mask]) elif na_rep is not None and na_mask.any(): return sep.join(np.where(na_mask, na_rep, data)) else: return sep.join(data) try: others = self._get_series_list(others) except ValueError as err: raise ValueError('If `others` contains arrays or lists (or other list-likes without an index), these must all be of the same length as the calling Series/Index.') from err if any((not data.index.equals(x.index) for x in others)): others = concat(others, axis=1, join=join if join == 'inner' else 'outer', keys=range(len(others)), sort=False) (data, others) = data.align(others, join=join) others = [others[x] for x in others] all_cols = [ensure_object(x) for x in [data] + others] na_masks = np.array([isna(x) for x in all_cols]) union_mask = np.logical_or.reduce(na_masks, axis=0) if na_rep is None and union_mask.any(): result = np.empty(len(data), dtype=object) np.putmask(result, union_mask, np.nan) not_masked = ~union_mask result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep) elif na_rep is not None and union_mask.any(): all_cols = [np.where(nm, na_rep, col) for (nm, col) in zip(na_masks, all_cols)] result = cat_safe(all_cols, sep) else: result = cat_safe(all_cols, sep) out: Index | Series if isinstance(self._orig.dtype, CategoricalDtype): dtype = self._orig.dtype.categories.dtype else: dtype = self._orig.dtype if isinstance(self._orig, ABCIndex): if isna(result).all(): dtype = object out = Index(result, dtype=dtype, name=self._orig.name) else: res_ser = Series(result, dtype=dtype, index=data.index, name=self._orig.name, copy=False) out = res_ser.__finalize__(self._orig, method='str_cat') return out _shared_docs['str_split'] = '\n Split strings around given separator/delimiter.\n\n Splits the string in the Series/Index from the %(side)s,\n at the specified delimiter string.\n\n Parameters\n ----------\n pat : str%(pat_regex)s, optional\n %(pat_description)s.\n If not specified, split on whitespace.\n n : int, default -1 (all)\n Limit number of splits in output.\n ``None``, 0 and -1 will be interpreted as return all splits.\n expand : bool, default False\n Expand the split strings into separate columns.\n\n - If ``True``, return DataFrame/MultiIndex expanding dimensionality.\n - If ``False``, return Series/Index, containing lists of strings.\n %(regex_argument)s\n Returns\n -------\n Series, Index, DataFrame or MultiIndex\n Type matches caller unless ``expand=True`` (see Notes).\n %(raises_split)s\n See Also\n --------\n Series.str.split : Split strings around given separator/delimiter.\n Series.str.rsplit : Splits string around given separator/delimiter,\n starting from the right.\n Series.str.join : Join lists contained as elements in the Series/Index\n with passed delimiter.\n str.split : Standard library version for split.\n str.rsplit : Standard library version for rsplit.\n\n Notes\n -----\n The handling of the `n` keyword depends on the number of found splits:\n\n - If found splits > `n`, make first `n` splits only\n - If found splits <= `n`, make all splits\n - If for a certain row the number of found splits < `n`,\n append `None` for padding up to `n` if ``expand=True``\n\n If using ``expand=True``, Series and Index callers return DataFrame and\n MultiIndex objects, respectively.\n %(regex_pat_note)s\n Examples\n --------\n >>> s = pd.Series(\n ... [\n ... "this is a regular sentence",\n ... "https://docs.python.org/3/tutorial/index.html",\n ... np.nan\n ... ]\n ... )\n >>> s\n 0 this is a regular sentence\n 1 https://docs.python.org/3/tutorial/index.html\n 2 NaN\n dtype: object\n\n In the default setting, the string is split by whitespace.\n\n >>> s.str.split()\n 0 [this, is, a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n Without the `n` parameter, the outputs of `rsplit` and `split`\n are identical.\n\n >>> s.str.rsplit()\n 0 [this, is, a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n The `n` parameter can be used to limit the number of splits on the\n delimiter. The outputs of `split` and `rsplit` are different.\n\n >>> s.str.split(n=2)\n 0 [this, is, a regular sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n >>> s.str.rsplit(n=2)\n 0 [this is a, regular, sentence]\n 1 [https://docs.python.org/3/tutorial/index.html]\n 2 NaN\n dtype: object\n\n The `pat` parameter can be used to split by other characters.\n\n >>> s.str.split(pat="/")\n 0 [this is a regular sentence]\n 1 [https:, , docs.python.org, 3, tutorial, index...\n 2 NaN\n dtype: object\n\n When using ``expand=True``, the split elements will expand out into\n separate columns. If NaN is present, it is propagated throughout\n the columns during the split.\n\n >>> s.str.split(expand=True)\n 0 1 2 3 4\n 0 this is a regular sentence\n 1 https://docs.python.org/3/tutorial/index.html None None None None\n 2 NaN NaN NaN NaN NaN\n\n For slightly more complex use cases like splitting the html document name\n from a url, a combination of parameter settings can be used.\n\n >>> s.str.rsplit("/", n=1, expand=True)\n 0 1\n 0 this is a regular sentence None\n 1 https://docs.python.org/3/tutorial index.html\n 2 NaN NaN\n %(regex_examples)s' @Appender(_shared_docs['str_split'] % {'side': 'beginning', 'pat_regex': ' or compiled regex', 'pat_description': 'String or regular expression to split on', 'regex_argument': '\n regex : bool, default None\n Determines if the passed-in pattern is a regular expression:\n\n - If ``True``, assumes the passed-in pattern is a regular expression\n - If ``False``, treats the pattern as a literal string.\n - If ``None`` and `pat` length is 1, treats `pat` as a literal string.\n - If ``None`` and `pat` length is not 1, treats `pat` as a regular expression.\n - Cannot be set to False if `pat` is a compiled regex\n\n .. versionadded:: 1.4.0\n ', 'raises_split': '\n Raises\n ------\n ValueError\n * if `regex` is False and `pat` is a compiled regex\n ', 'regex_pat_note': '\n Use of `regex =False` with a `pat` as a compiled regex will raise an error.\n ', 'method': 'split', 'regex_examples': '\n Remember to escape special characters when explicitly using regular expressions.\n\n >>> s = pd.Series(["foo and bar plus baz"])\n >>> s.str.split(r"and|plus", expand=True)\n 0 1 2\n 0 foo bar baz\n\n Regular expressions can be used to handle urls or file names.\n When `pat` is a string and ``regex=None`` (the default), the given `pat` is compiled\n as a regex only if ``len(pat) != 1``.\n\n >>> s = pd.Series([\'foojpgbar.jpg\'])\n >>> s.str.split(r".", expand=True)\n 0 1\n 0 foojpgbar jpg\n\n >>> s.str.split(r"\\.jpg", expand=True)\n 0 1\n 0 foojpgbar\n\n When ``regex=True``, `pat` is interpreted as a regex\n\n >>> s.str.split(r"\\.jpg", regex=True, expand=True)\n 0 1\n 0 foojpgbar\n\n A compiled regex can be passed as `pat`\n\n >>> import re\n >>> s.str.split(re.compile(r"\\.jpg"), expand=True)\n 0 1\n 0 foojpgbar\n\n When ``regex=False``, `pat` is interpreted as the string itself\n\n >>> s.str.split(r"\\.jpg", regex=False, expand=True)\n 0\n 0 foojpgbar.jpg\n '}) @forbid_nonstring_types(['bytes']) def split(self, pat: str | re.Pattern | None=None, *, n=-1, expand: bool=False, regex: bool | None=None): if regex is False and is_re(pat): raise ValueError('Cannot use a compiled regex as replacement pattern with regex=False') if is_re(pat): regex = True result = self._data.array._str_split(pat, n, expand, regex) if self._data.dtype == 'category': dtype = self._data.dtype.categories.dtype else: dtype = object if self._data.dtype == object else None return self._wrap_result(result, expand=expand, returns_string=expand, dtype=dtype) @Appender(_shared_docs['str_split'] % {'side': 'end', 'pat_regex': '', 'pat_description': 'String to split on', 'regex_argument': '', 'raises_split': '', 'regex_pat_note': '', 'method': 'rsplit', 'regex_examples': ''}) @forbid_nonstring_types(['bytes']) def rsplit(self, pat=None, *, n=-1, expand: bool=False): result = self._data.array._str_rsplit(pat, n=n) dtype = object if self._data.dtype == object else None return self._wrap_result(result, expand=expand, returns_string=expand, dtype=dtype) _shared_docs['str_partition'] = "\n Split the string at the %(side)s occurrence of `sep`.\n\n This method splits the string at the %(side)s occurrence of `sep`,\n and returns 3 elements containing the part before the separator,\n the separator itself, and the part after the separator.\n If the separator is not found, return %(return)s.\n\n Parameters\n ----------\n sep : str, default whitespace\n String to split on.\n expand : bool, default True\n If True, return DataFrame/MultiIndex expanding dimensionality.\n If False, return Series/Index.\n\n Returns\n -------\n DataFrame/MultiIndex or Series/Index of objects\n Returns appropriate type based on `expand` parameter with strings\n split based on the `sep` parameter.\n\n See Also\n --------\n %(also)s\n Series.str.split : Split strings around given separators.\n str.partition : Standard library version.\n\n Examples\n --------\n\n >>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])\n >>> s\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n dtype: object\n\n >>> s.str.partition()\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n\n To partition by the last space instead of the first one:\n\n >>> s.str.rpartition()\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt-Rivers\n\n To partition by something different than a space:\n\n >>> s.str.partition('-')\n 0 1 2\n 0 Linda van der Berg\n 1 George Pitt - Rivers\n\n To return a Series containing tuples instead of a DataFrame:\n\n >>> s.str.partition('-', expand=False)\n 0 (Linda van der Berg, , )\n 1 (George Pitt, -, Rivers)\n dtype: object\n\n Also available on indices:\n\n >>> idx = pd.Index(['X 123', 'Y 999'])\n >>> idx\n Index(['X 123', 'Y 999'], dtype='object')\n\n Which will create a MultiIndex:\n\n >>> idx.str.partition()\n MultiIndex([('X', ' ', '123'),\n ('Y', ' ', '999')],\n )\n\n Or an index with tuples with ``expand=False``:\n\n >>> idx.str.partition(expand=False)\n Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')\n " @Appender(_shared_docs['str_partition'] % {'side': 'first', 'return': '3 elements containing the string itself, followed by two empty strings', 'also': 'rpartition : Split the string at the last occurrence of `sep`.'}) @forbid_nonstring_types(['bytes']) def partition(self, sep: str=' ', expand: bool=True): result = self._data.array._str_partition(sep, expand) if self._data.dtype == 'category': dtype = self._data.dtype.categories.dtype else: dtype = object if self._data.dtype == object else None return self._wrap_result(result, expand=expand, returns_string=expand, dtype=dtype) @Appender(_shared_docs['str_partition'] % {'side': 'last', 'return': '3 elements containing two empty strings, followed by the string itself', 'also': 'partition : Split the string at the first occurrence of `sep`.'}) @forbid_nonstring_types(['bytes']) def rpartition(self, sep: str=' ', expand: bool=True): result = self._data.array._str_rpartition(sep, expand) if self._data.dtype == 'category': dtype = self._data.dtype.categories.dtype else: dtype = object if self._data.dtype == object else None return self._wrap_result(result, expand=expand, returns_string=expand, dtype=dtype) def get(self, i): result = self._data.array._str_get(i) return self._wrap_result(result) @forbid_nonstring_types(['bytes']) def join(self, sep: str): result = self._data.array._str_join(sep) return self._wrap_result(result) @forbid_nonstring_types(['bytes']) def contains(self, pat, case: bool=True, flags: int=0, na=None, regex: bool=True): if regex and re.compile(pat).groups: warnings.warn('This pattern is interpreted as a regular expression, and has match groups. To actually get the groups, use str.extract.', UserWarning, stacklevel=find_stack_level()) result = self._data.array._str_contains(pat, case, flags, na, regex) return self._wrap_result(result, fill_value=na, returns_string=False) @forbid_nonstring_types(['bytes']) def match(self, pat: str, case: bool=True, flags: int=0, na=None): result = self._data.array._str_match(pat, case=case, flags=flags, na=na) return self._wrap_result(result, fill_value=na, returns_string=False) @forbid_nonstring_types(['bytes']) def fullmatch(self, pat, case: bool=True, flags: int=0, na=None): result = self._data.array._str_fullmatch(pat, case=case, flags=flags, na=na) return self._wrap_result(result, fill_value=na, returns_string=False) @forbid_nonstring_types(['bytes']) def replace(self, pat: str | re.Pattern | dict, repl: str | Callable | None=None, n: int=-1, case: bool | None=None, flags: int=0, regex: bool=False): if isinstance(pat, dict) and repl is not None: raise ValueError('repl cannot be used when pat is a dictionary') if not isinstance(pat, dict) and (not (isinstance(repl, str) or callable(repl))): raise TypeError('repl must be a string or callable') is_compiled_re = is_re(pat) if regex or regex is None: if is_compiled_re and (case is not None or flags != 0): raise ValueError('case and flags cannot be set when pat is a compiled regex') elif is_compiled_re: raise ValueError('Cannot use a compiled regex as replacement pattern with regex=False') elif callable(repl): raise ValueError('Cannot use a callable replacement when regex=False') if case is None: case = True res_output = self._data if not isinstance(pat, dict): pat = {pat: repl} for (key, value) in pat.items(): result = res_output.array._str_replace(key, value, n=n, case=case, flags=flags, regex=regex) res_output = self._wrap_result(result) return res_output @forbid_nonstring_types(['bytes']) def repeat(self, repeats): result = self._data.array._str_repeat(repeats) return self._wrap_result(result) @forbid_nonstring_types(['bytes']) def pad(self, width: int, side: Literal['left', 'right', 'both']='left', fillchar: str=' '): if not isinstance(fillchar, str): msg = f'fillchar must be a character, not {type(fillchar).__name__}' raise TypeError(msg) if len(fillchar) != 1: raise TypeError('fillchar must be a character, not str') if not is_integer(width): msg = f'width must be of integer type, not {type(width).__name__}' raise TypeError(msg) result = self._data.array._str_pad(width, side=side, fillchar=fillchar) return self._wrap_result(result) _shared_docs['str_pad'] = "\n Pad %(side)s side of strings in the Series/Index.\n\n Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n width : int\n Minimum width of resulting string; additional characters will be filled\n with ``fillchar``.\n fillchar : str\n Additional character for filling, default is whitespace.\n\n Returns\n -------\n Series/Index of objects.\n A Series or Index where the strings are modified by :meth:`str.%(method)s`.\n\n See Also\n --------\n Series.str.rjust : Fills the left side of strings with an arbitrary\n character.\n Series.str.ljust : Fills the right side of strings with an arbitrary\n character.\n Series.str.center : Fills both sides of strings with an arbitrary\n character.\n Series.str.zfill : Pad strings in the Series/Index by prepending '0'\n character.\n\n Examples\n --------\n For Series.str.center:\n\n >>> ser = pd.Series(['dog', 'bird', 'mouse'])\n >>> ser.str.center(8, fillchar='.')\n 0 ..dog...\n 1 ..bird..\n 2 .mouse..\n dtype: object\n\n For Series.str.ljust:\n\n >>> ser = pd.Series(['dog', 'bird', 'mouse'])\n >>> ser.str.ljust(8, fillchar='.')\n 0 dog.....\n 1 bird....\n 2 mouse...\n dtype: object\n\n For Series.str.rjust:\n\n >>> ser = pd.Series(['dog', 'bird', 'mouse'])\n >>> ser.str.rjust(8, fillchar='.')\n 0 .....dog\n 1 ....bird\n 2 ...mouse\n dtype: object\n " @Appender(_shared_docs['str_pad'] % {'side': 'left and right', 'method': 'center'}) @forbid_nonstring_types(['bytes']) def center(self, width: int, fillchar: str=' '): return self.pad(width, side='both', fillchar=fillchar) @Appender(_shared_docs['str_pad'] % {'side': 'right', 'method': 'ljust'}) @forbid_nonstring_types(['bytes']) def ljust(self, width: int, fillchar: str=' '): return self.pad(width, side='right', fillchar=fillchar) @Appender(_shared_docs['str_pad'] % {'side': 'left', 'method': 'rjust'}) @forbid_nonstring_types(['bytes']) def rjust(self, width: int, fillchar: str=' '): return self.pad(width, side='left', fillchar=fillchar) @forbid_nonstring_types(['bytes']) def zfill(self, width: int): if not is_integer(width): msg = f'width must be of integer type, not {type(width).__name__}' raise TypeError(msg) f = lambda x: x.zfill(width) result = self._data.array._str_map(f) return self._wrap_result(result) def slice(self, start=None, stop=None, step=None): result = self._data.array._str_slice(start, stop, step) return self._wrap_result(result) @forbid_nonstring_types(['bytes']) def slice_replace(self, start=None, stop=None, repl=None): result = self._data.array._str_slice_replace(start, stop, repl) return self._wrap_result(result) def decode(self, encoding, errors: str='strict'): if encoding in _cpython_optimized_decoders: f = lambda x: x.decode(encoding, errors) else: decoder = codecs.getdecoder(encoding) f = lambda x: decoder(x, errors)[0] arr = self._data.array result = arr._str_map(f) return self._wrap_result(result) @forbid_nonstring_types(['bytes']) def encode(self, encoding, errors: str='strict'): result = self._data.array._str_encode(encoding, errors) return self._wrap_result(result, returns_string=False) _shared_docs['str_strip'] = "\n Remove %(position)s characters.\n\n Strip whitespaces (including newlines) or a set of specified characters\n from each string in the Series/Index from %(side)s.\n Replaces any non-strings in Series with NaNs.\n Equivalent to :meth:`str.%(method)s`.\n\n Parameters\n ----------\n to_strip : str or None, default None\n Specifying the set of characters to be removed.\n All combinations of this set of characters will be stripped.\n If None then whitespaces are removed.\n\n Returns\n -------\n Series or Index of object\n Series or Index with the strings being stripped from the %(side)s.\n\n See Also\n --------\n Series.str.strip : Remove leading and trailing characters in Series/Index.\n Series.str.lstrip : Remove leading characters in Series/Index.\n Series.str.rstrip : Remove trailing characters in Series/Index.\n\n Examples\n --------\n >>> s = pd.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', np.nan, 10, True])\n >>> s\n 0 1. Ant.\n 1 2. Bee!\\n\n 2 3. Cat?\\t\n 3 NaN\n 4 10\n 5 True\n dtype: object\n\n >>> s.str.strip()\n 0 1. Ant.\n 1 2. Bee!\n 2 3. Cat?\n 3 NaN\n 4 NaN\n 5 NaN\n dtype: object\n\n >>> s.str.lstrip('123.')\n 0 Ant.\n 1 Bee!\\n\n 2 Cat?\\t\n 3 NaN\n 4 NaN\n 5 NaN\n dtype: object\n\n >>> s.str.rstrip('.!? \\n\\t')\n 0 1. Ant\n 1 2. Bee\n 2 3. Cat\n 3 NaN\n 4 NaN\n 5 NaN\n dtype: object\n\n >>> s.str.strip('123.!? \\n\\t')\n 0 Ant\n 1 Bee\n 2 Cat\n 3 NaN\n 4 NaN\n 5 NaN\n dtype: object\n " @Appender(_shared_docs['str_strip'] % {'side': 'left and right sides', 'method': 'strip', 'position': 'leading and trailing'}) @forbid_nonstring_types(['bytes']) def strip(self, to_strip=None): result = self._data.array._str_strip(to_strip) return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % {'side': 'left side', 'method': 'lstrip', 'position': 'leading'}) @forbid_nonstring_types(['bytes']) def lstrip(self, to_strip=None): result = self._data.array._str_lstrip(to_strip) return self._wrap_result(result) @Appender(_shared_docs['str_strip'] % {'side': 'right side', 'method': 'rstrip', 'position': 'trailing'}) @forbid_nonstring_types(['bytes']) def rstrip(self, to_strip=None): result = self._data.array._str_rstrip(to_strip) return self._wrap_result(result) _shared_docs['str_removefix'] = '\n Remove a %(side)s from an object series.\n\n If the %(side)s is not present, the original string will be returned.\n\n Parameters\n ----------\n %(side)s : str\n Remove the %(side)s of the string.\n\n Returns\n -------\n Series/Index: object\n The Series or Index with given %(side)s removed.\n\n See Also\n --------\n Series.str.remove%(other_side)s : Remove a %(other_side)s from an object series.\n\n Examples\n --------\n >>> s = pd.Series(["str_foo", "str_bar", "no_prefix"])\n >>> s\n 0 str_foo\n 1 str_bar\n 2 no_prefix\n dtype: object\n >>> s.str.removeprefix("str_")\n 0 foo\n 1 bar\n 2 no_prefix\n dtype: object\n\n >>> s = pd.Series(["foo_str", "bar_str", "no_suffix"])\n >>> s\n 0 foo_str\n 1 bar_str\n 2 no_suffix\n dtype: object\n >>> s.str.removesuffix("_str")\n 0 foo\n 1 bar\n 2 no_suffix\n dtype: object\n ' @Appender(_shared_docs['str_removefix'] % {'side': 'prefix', 'other_side': 'suffix'}) @forbid_nonstring_types(['bytes']) def removeprefix(self, prefix: str): result = self._data.array._str_removeprefix(prefix) return self._wrap_result(result) @Appender(_shared_docs['str_removefix'] % {'side': 'suffix', 'other_side': 'prefix'}) @forbid_nonstring_types(['bytes']) def removesuffix(self, suffix: str): result = self._data.array._str_removesuffix(suffix) return self._wrap_result(result) @forbid_nonstring_types(['bytes']) def wrap(self, width: int, expand_tabs: bool=True, tabsize: int=8, replace_whitespace: bool=True, drop_whitespace: bool=True, initial_indent: str='', subsequent_indent: str='', fix_sentence_endings: bool=False, break_long_words: bool=True, break_on_hyphens: bool=True, max_lines: int | None=None, placeholder: str=' [...]'): result = self._data.array._str_wrap(width=width, expand_tabs=expand_tabs, tabsize=tabsize, replace_whitespace=replace_whitespace, drop_whitespace=drop_whitespace, initial_indent=initial_indent, subsequent_indent=subsequent_indent, fix_sentence_endings=fix_sentence_endings, break_long_words=break_long_words, break_on_hyphens=break_on_hyphens, max_lines=max_lines, placeholder=placeholder) return self._wrap_result(result) @forbid_nonstring_types(['bytes']) def get_dummies(self, sep: str='|', dtype: NpDtype | None=None): from pandas.core.frame import DataFrame (result, name) = self._data.array._str_get_dummies(sep, dtype) if is_extension_array_dtype(dtype) or isinstance(dtype, ArrowDtype): return self._wrap_result(DataFrame(result, columns=name, dtype=dtype), name=name, returns_string=False) return self._wrap_result(result, name=name, expand=True, returns_string=False) @forbid_nonstring_types(['bytes']) def translate(self, table): result = self._data.array._str_translate(table) dtype = object if self._data.dtype == 'object' else None return self._wrap_result(result, dtype=dtype) @forbid_nonstring_types(['bytes']) def count(self, pat, flags: int=0): result = self._data.array._str_count(pat, flags) return self._wrap_result(result, returns_string=False) @forbid_nonstring_types(['bytes']) def startswith(self, pat: str | tuple[str, ...], na: Scalar | None=None) -> Series | Index: if not isinstance(pat, (str, tuple)): msg = f'expected a string or tuple, not {type(pat).__name__}' raise TypeError(msg) result = self._data.array._str_startswith(pat, na=na) return self._wrap_result(result, returns_string=False) @forbid_nonstring_types(['bytes']) def endswith(self, pat: str | tuple[str, ...], na: Scalar | None=None) -> Series | Index: if not isinstance(pat, (str, tuple)): msg = f'expected a string or tuple, not {type(pat).__name__}' raise TypeError(msg) result = self._data.array._str_endswith(pat, na=na) return self._wrap_result(result, returns_string=False) @forbid_nonstring_types(['bytes']) def findall(self, pat, flags: int=0): result = self._data.array._str_findall(pat, flags) return self._wrap_result(result, returns_string=False) @forbid_nonstring_types(['bytes']) def extract(self, pat: str, flags: int=0, expand: bool=True) -> DataFrame | Series | Index: from pandas import DataFrame if not isinstance(expand, bool): raise ValueError('expand must be True or False') regex = re.compile(pat, flags=flags) if regex.groups == 0: raise ValueError('pattern contains no capture groups') if not expand and regex.groups > 1 and isinstance(self._data, ABCIndex): raise ValueError('only one regex group is supported with Index') obj = self._data result_dtype = _result_dtype(obj) returns_df = regex.groups > 1 or expand if returns_df: name = None columns = _get_group_names(regex) if obj.array.size == 0: result = DataFrame(columns=columns, dtype=result_dtype) else: result_list = self._data.array._str_extract(pat, flags=flags, expand=returns_df) result_index: Index | None if isinstance(obj, ABCSeries): result_index = obj.index else: result_index = None result = DataFrame(result_list, columns=columns, index=result_index, dtype=result_dtype) else: name = _get_single_group_name(regex) result = self._data.array._str_extract(pat, flags=flags, expand=returns_df) return self._wrap_result(result, name=name, dtype=result_dtype) @forbid_nonstring_types(['bytes']) def extractall(self, pat, flags: int=0) -> DataFrame: return str_extractall(self._orig, pat, flags) _shared_docs['find'] = '\n Return %(side)s indexes in each strings in the Series/Index.\n\n Each of returned indexes corresponds to the position where the\n substring is fully contained between [start:end]. Return -1 on\n failure. Equivalent to standard :meth:`str.%(method)s`.\n\n Parameters\n ----------\n sub : str\n Substring being searched.\n start : int\n Left edge index.\n end : int\n Right edge index.\n\n Returns\n -------\n Series or Index of int.\n A Series (if the input is a Series) or an Index (if the input is an\n Index) of the %(side)s indexes corresponding to the positions where the\n substring is found in each string of the input.\n\n See Also\n --------\n %(also)s\n\n Examples\n --------\n For Series.str.find:\n\n >>> ser = pd.Series(["_cow_", "duck_", "do_v_e"])\n >>> ser.str.find("_")\n 0 0\n 1 4\n 2 2\n dtype: int64\n\n For Series.str.rfind:\n\n >>> ser = pd.Series(["_cow_", "duck_", "do_v_e"])\n >>> ser.str.rfind("_")\n 0 4\n 1 4\n 2 4\n dtype: int64\n ' @Appender(_shared_docs['find'] % {'side': 'lowest', 'method': 'find', 'also': 'rfind : Return highest indexes in each strings.'}) @forbid_nonstring_types(['bytes']) def find(self, sub, start: int=0, end=None): if not isinstance(sub, str): msg = f'expected a string object, not {type(sub).__name__}' raise TypeError(msg) result = self._data.array._str_find(sub, start, end) return self._wrap_result(result, returns_string=False) @Appender(_shared_docs['find'] % {'side': 'highest', 'method': 'rfind', 'also': 'find : Return lowest indexes in each strings.'}) @forbid_nonstring_types(['bytes']) def rfind(self, sub, start: int=0, end=None): if not isinstance(sub, str): msg = f'expected a string object, not {type(sub).__name__}' raise TypeError(msg) result = self._data.array._str_rfind(sub, start=start, end=end) return self._wrap_result(result, returns_string=False) @forbid_nonstring_types(['bytes']) def normalize(self, form): result = self._data.array._str_normalize(form) return self._wrap_result(result) _shared_docs['index'] = '\n Return %(side)s indexes in each string in Series/Index.\n\n Each of the returned indexes corresponds to the position where the\n substring is fully contained between [start:end]. This is the same\n as ``str.%(similar)s`` except instead of returning -1, it raises a\n ValueError when the substring is not found. Equivalent to standard\n ``str.%(method)s``.\n\n Parameters\n ----------\n sub : str\n Substring being searched.\n start : int\n Left edge index.\n end : int\n Right edge index.\n\n Returns\n -------\n Series or Index of object\n Returns a Series or an Index of the %(side)s indexes\n in each string of the input.\n\n See Also\n --------\n %(also)s\n\n Examples\n --------\n For Series.str.index:\n\n >>> ser = pd.Series(["horse", "eagle", "donkey"])\n >>> ser.str.index("e")\n 0 4\n 1 0\n 2 4\n dtype: int64\n\n For Series.str.rindex:\n\n >>> ser = pd.Series(["Deer", "eagle", "Sheep"])\n >>> ser.str.rindex("e")\n 0 2\n 1 4\n 2 3\n dtype: int64\n ' @Appender(_shared_docs['index'] % {'side': 'lowest', 'similar': 'find', 'method': 'index', 'also': 'rindex : Return highest indexes in each strings.'}) @forbid_nonstring_types(['bytes']) def index(self, sub, start: int=0, end=None): if not isinstance(sub, str): msg = f'expected a string object, not {type(sub).__name__}' raise TypeError(msg) result = self._data.array._str_index(sub, start=start, end=end) return self._wrap_result(result, returns_string=False) @Appender(_shared_docs['index'] % {'side': 'highest', 'similar': 'rfind', 'method': 'rindex', 'also': 'index : Return lowest indexes in each strings.'}) @forbid_nonstring_types(['bytes']) def rindex(self, sub, start: int=0, end=None): if not isinstance(sub, str): msg = f'expected a string object, not {type(sub).__name__}' raise TypeError(msg) result = self._data.array._str_rindex(sub, start=start, end=end) return self._wrap_result(result, returns_string=False) def len(self): result = self._data.array._str_len() return self._wrap_result(result, returns_string=False) _shared_docs['casemethods'] = "\n Convert strings in the Series/Index to %(type)s.\n %(version)s\n Equivalent to :meth:`str.%(method)s`.\n\n Returns\n -------\n Series or Index of objects\n A Series or Index where the strings are modified by :meth:`str.%(method)s`.\n\n See Also\n --------\n Series.str.lower : Converts all characters to lowercase.\n Series.str.upper : Converts all characters to uppercase.\n Series.str.title : Converts first character of each word to uppercase and\n remaining to lowercase.\n Series.str.capitalize : Converts first character to uppercase and\n remaining to lowercase.\n Series.str.swapcase : Converts uppercase to lowercase and lowercase to\n uppercase.\n Series.str.casefold: Removes all case distinctions in the string.\n\n Examples\n --------\n >>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])\n >>> s\n 0 lower\n 1 CAPITALS\n 2 this is a sentence\n 3 SwApCaSe\n dtype: object\n\n >>> s.str.lower()\n 0 lower\n 1 capitals\n 2 this is a sentence\n 3 swapcase\n dtype: object\n\n >>> s.str.upper()\n 0 LOWER\n 1 CAPITALS\n 2 THIS IS A SENTENCE\n 3 SWAPCASE\n dtype: object\n\n >>> s.str.title()\n 0 Lower\n 1 Capitals\n 2 This Is A Sentence\n 3 Swapcase\n dtype: object\n\n >>> s.str.capitalize()\n 0 Lower\n 1 Capitals\n 2 This is a sentence\n 3 Swapcase\n dtype: object\n\n >>> s.str.swapcase()\n 0 LOWER\n 1 capitals\n 2 THIS IS A SENTENCE\n 3 sWaPcAsE\n dtype: object\n " _doc_args: dict[str, dict[str, str]] = {} _doc_args['lower'] = {'type': 'lowercase', 'method': 'lower', 'version': ''} _doc_args['upper'] = {'type': 'uppercase', 'method': 'upper', 'version': ''} _doc_args['title'] = {'type': 'titlecase', 'method': 'title', 'version': ''} _doc_args['capitalize'] = {'type': 'be capitalized', 'method': 'capitalize', 'version': ''} _doc_args['swapcase'] = {'type': 'be swapcased', 'method': 'swapcase', 'version': ''} _doc_args['casefold'] = {'type': 'be casefolded', 'method': 'casefold', 'version': ''} @Appender(_shared_docs['casemethods'] % _doc_args['lower']) @forbid_nonstring_types(['bytes']) def lower(self): result = self._data.array._str_lower() return self._wrap_result(result) @Appender(_shared_docs['casemethods'] % _doc_args['upper']) @forbid_nonstring_types(['bytes']) def upper(self): result = self._data.array._str_upper() return self._wrap_result(result) @Appender(_shared_docs['casemethods'] % _doc_args['title']) @forbid_nonstring_types(['bytes']) def title(self): result = self._data.array._str_title() return self._wrap_result(result) @Appender(_shared_docs['casemethods'] % _doc_args['capitalize']) @forbid_nonstring_types(['bytes']) def capitalize(self): result = self._data.array._str_capitalize() return self._wrap_result(result) @Appender(_shared_docs['casemethods'] % _doc_args['swapcase']) @forbid_nonstring_types(['bytes']) def swapcase(self): result = self._data.array._str_swapcase() return self._wrap_result(result) @Appender(_shared_docs['casemethods'] % _doc_args['casefold']) @forbid_nonstring_types(['bytes']) def casefold(self): result = self._data.array._str_casefold() return self._wrap_result(result) _shared_docs['ismethods'] = "\n Check whether all characters in each string are %(type)s.\n\n This is equivalent to running the Python string method\n :meth:`str.%(method)s` for each element of the Series/Index. If a string\n has zero characters, ``False`` is returned for that check.\n\n Returns\n -------\n Series or Index of bool\n Series or Index of boolean values with the same length as the original\n Series/Index.\n\n See Also\n --------\n Series.str.isalpha : Check whether all characters are alphabetic.\n Series.str.isnumeric : Check whether all characters are numeric.\n Series.str.isalnum : Check whether all characters are alphanumeric.\n Series.str.isdigit : Check whether all characters are digits.\n Series.str.isdecimal : Check whether all characters are decimal.\n Series.str.isspace : Check whether all characters are whitespace.\n Series.str.islower : Check whether all characters are lowercase.\n Series.str.isupper : Check whether all characters are uppercase.\n Series.str.istitle : Check whether all characters are titlecase.\n\n Examples\n --------\n **Checks for Alphabetic and Numeric Characters**\n\n >>> s1 = pd.Series(['one', 'one1', '1', ''])\n\n >>> s1.str.isalpha()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n >>> s1.str.isnumeric()\n 0 False\n 1 False\n 2 True\n 3 False\n dtype: bool\n\n >>> s1.str.isalnum()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n Note that checks against characters mixed with any additional punctuation\n or whitespace will evaluate to false for an alphanumeric check.\n\n >>> s2 = pd.Series(['A B', '1.5', '3,000'])\n >>> s2.str.isalnum()\n 0 False\n 1 False\n 2 False\n dtype: bool\n\n **More Detailed Checks for Numeric Characters**\n\n There are several different but overlapping sets of numeric characters that\n can be checked for.\n\n >>> s3 = pd.Series(['23', '³', '⅕', ''])\n\n The ``s3.str.isdecimal`` method checks for characters used to form numbers\n in base 10.\n\n >>> s3.str.isdecimal()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also\n includes special digits, like superscripted and subscripted digits in\n unicode.\n\n >>> s3.str.isdigit()\n 0 True\n 1 True\n 2 False\n 3 False\n dtype: bool\n\n The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also\n includes other characters that can represent quantities such as unicode\n fractions.\n\n >>> s3.str.isnumeric()\n 0 True\n 1 True\n 2 True\n 3 False\n dtype: bool\n\n **Checks for Whitespace**\n\n >>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])\n >>> s4.str.isspace()\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n **Checks for Character Case**\n\n >>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])\n\n >>> s5.str.islower()\n 0 True\n 1 False\n 2 False\n 3 False\n dtype: bool\n\n >>> s5.str.isupper()\n 0 False\n 1 False\n 2 True\n 3 False\n dtype: bool\n\n The ``s5.str.istitle`` method checks for whether all words are in title\n case (whether only the first letter of each word is capitalized). Words are\n assumed to be as any sequence of non-numeric characters separated by\n whitespace characters.\n\n >>> s5.str.istitle()\n 0 False\n 1 True\n 2 False\n 3 False\n dtype: bool\n " _doc_args['isalnum'] = {'type': 'alphanumeric', 'method': 'isalnum'} _doc_args['isalpha'] = {'type': 'alphabetic', 'method': 'isalpha'} _doc_args['isdigit'] = {'type': 'digits', 'method': 'isdigit'} _doc_args['isspace'] = {'type': 'whitespace', 'method': 'isspace'} _doc_args['islower'] = {'type': 'lowercase', 'method': 'islower'} _doc_args['isupper'] = {'type': 'uppercase', 'method': 'isupper'} _doc_args['istitle'] = {'type': 'titlecase', 'method': 'istitle'} _doc_args['isnumeric'] = {'type': 'numeric', 'method': 'isnumeric'} _doc_args['isdecimal'] = {'type': 'decimal', 'method': 'isdecimal'} isalnum = _map_and_wrap('isalnum', docstring=_shared_docs['ismethods'] % _doc_args['isalnum']) isalpha = _map_and_wrap('isalpha', docstring=_shared_docs['ismethods'] % _doc_args['isalpha']) isdigit = _map_and_wrap('isdigit', docstring=_shared_docs['ismethods'] % _doc_args['isdigit']) isspace = _map_and_wrap('isspace', docstring=_shared_docs['ismethods'] % _doc_args['isspace']) islower = _map_and_wrap('islower', docstring=_shared_docs['ismethods'] % _doc_args['islower']) isupper = _map_and_wrap('isupper', docstring=_shared_docs['ismethods'] % _doc_args['isupper']) istitle = _map_and_wrap('istitle', docstring=_shared_docs['ismethods'] % _doc_args['istitle']) isnumeric = _map_and_wrap('isnumeric', docstring=_shared_docs['ismethods'] % _doc_args['isnumeric']) isdecimal = _map_and_wrap('isdecimal', docstring=_shared_docs['ismethods'] % _doc_args['isdecimal']) def cat_safe(list_of_columns: list[npt.NDArray[np.object_]], sep: str): try: result = cat_core(list_of_columns, sep) except TypeError: for column in list_of_columns: dtype = lib.infer_dtype(column, skipna=True) if dtype not in ['string', 'empty']: raise TypeError(f'Concatenation requires list-likes containing only strings (or missing values). Offending values found in column {dtype}') from None return result def cat_core(list_of_columns: list, sep: str): if sep == '': arr_of_cols = np.asarray(list_of_columns, dtype=object) return np.sum(arr_of_cols, axis=0) list_with_sep = [sep] * (2 * len(list_of_columns) - 1) list_with_sep[::2] = list_of_columns arr_with_sep = np.asarray(list_with_sep, dtype=object) return np.sum(arr_with_sep, axis=0) def _result_dtype(arr): from pandas.core.arrays.string_ import StringDtype if isinstance(arr.dtype, (ArrowDtype, StringDtype)): return arr.dtype return object def _get_single_group_name(regex: re.Pattern) -> Hashable: if regex.groupindex: return next(iter(regex.groupindex)) else: return None def _get_group_names(regex: re.Pattern) -> list[Hashable] | range: rng = range(regex.groups) names = {v: k for (k, v) in regex.groupindex.items()} if not names: return rng result: list[Hashable] = [names.get(1 + i, i) for i in rng] arr = np.array(result) if arr.dtype.kind == 'i' and lib.is_range_indexer(arr, len(arr)): return rng return result def str_extractall(arr, pat, flags: int=0) -> DataFrame: regex = re.compile(pat, flags=flags) if regex.groups == 0: raise ValueError('pattern contains no capture groups') if isinstance(arr, ABCIndex): arr = arr.to_series().reset_index(drop=True).astype(arr.dtype) columns = _get_group_names(regex) match_list = [] index_list = [] is_mi = arr.index.nlevels > 1 for (subject_key, subject) in arr.items(): if isinstance(subject, str): if not is_mi: subject_key = (subject_key,) for (match_i, match_tuple) in enumerate(regex.findall(subject)): if isinstance(match_tuple, str): match_tuple = (match_tuple,) na_tuple = [np.nan if group == '' else group for group in match_tuple] match_list.append(na_tuple) result_key = tuple(subject_key + (match_i,)) index_list.append(result_key) from pandas import MultiIndex index = MultiIndex.from_tuples(index_list, names=arr.index.names + ['match']) dtype = _result_dtype(arr) result = arr._constructor_expanddim(match_list, index=index, columns=columns, dtype=dtype) return result # File: pandas-main/pandas/core/strings/base.py from __future__ import annotations import abc from typing import TYPE_CHECKING, Literal import numpy as np if TYPE_CHECKING: from collections.abc import Callable, Sequence import re from pandas._typing import NpDtype, Scalar, Self class BaseStringArrayMethods(abc.ABC): def _str_getitem(self, key): if isinstance(key, slice): return self._str_slice(start=key.start, stop=key.stop, step=key.step) else: return self._str_get(key) @abc.abstractmethod def _str_count(self, pat, flags: int=0): pass @abc.abstractmethod def _str_pad(self, width: int, side: Literal['left', 'right', 'both']='left', fillchar: str=' '): pass @abc.abstractmethod def _str_contains(self, pat, case: bool=True, flags: int=0, na=None, regex: bool=True): pass @abc.abstractmethod def _str_startswith(self, pat, na=None): pass @abc.abstractmethod def _str_endswith(self, pat, na=None): pass @abc.abstractmethod def _str_replace(self, pat: str | re.Pattern, repl: str | Callable, n: int=-1, case: bool=True, flags: int=0, regex: bool=True): pass @abc.abstractmethod def _str_repeat(self, repeats: int | Sequence[int]): pass @abc.abstractmethod def _str_match(self, pat: str, case: bool=True, flags: int=0, na: Scalar=np.nan): pass @abc.abstractmethod def _str_fullmatch(self, pat: str | re.Pattern, case: bool=True, flags: int=0, na: Scalar=np.nan): pass @abc.abstractmethod def _str_encode(self, encoding, errors: str='strict'): pass @abc.abstractmethod def _str_find(self, sub, start: int=0, end=None): pass @abc.abstractmethod def _str_rfind(self, sub, start: int=0, end=None): pass @abc.abstractmethod def _str_findall(self, pat, flags: int=0): pass @abc.abstractmethod def _str_get(self, i): pass @abc.abstractmethod def _str_index(self, sub, start: int=0, end=None): pass @abc.abstractmethod def _str_rindex(self, sub, start: int=0, end=None): pass @abc.abstractmethod def _str_join(self, sep: str): pass @abc.abstractmethod def _str_partition(self, sep: str, expand): pass @abc.abstractmethod def _str_rpartition(self, sep: str, expand): pass @abc.abstractmethod def _str_len(self): pass @abc.abstractmethod def _str_slice(self, start=None, stop=None, step=None): pass @abc.abstractmethod def _str_slice_replace(self, start=None, stop=None, repl=None): pass @abc.abstractmethod def _str_translate(self, table): pass @abc.abstractmethod def _str_wrap(self, width: int, **kwargs): pass @abc.abstractmethod def _str_get_dummies(self, sep: str='|', dtype: NpDtype | None=None): pass @abc.abstractmethod def _str_isalnum(self): pass @abc.abstractmethod def _str_isalpha(self): pass @abc.abstractmethod def _str_isdecimal(self): pass @abc.abstractmethod def _str_isdigit(self): pass @abc.abstractmethod def _str_islower(self): pass @abc.abstractmethod def _str_isnumeric(self): pass @abc.abstractmethod def _str_isspace(self): pass @abc.abstractmethod def _str_istitle(self): pass @abc.abstractmethod def _str_isupper(self): pass @abc.abstractmethod def _str_capitalize(self): pass @abc.abstractmethod def _str_casefold(self): pass @abc.abstractmethod def _str_title(self): pass @abc.abstractmethod def _str_swapcase(self): pass @abc.abstractmethod def _str_lower(self): pass @abc.abstractmethod def _str_upper(self): pass @abc.abstractmethod def _str_normalize(self, form): pass @abc.abstractmethod def _str_strip(self, to_strip=None): pass @abc.abstractmethod def _str_lstrip(self, to_strip=None): pass @abc.abstractmethod def _str_rstrip(self, to_strip=None): pass @abc.abstractmethod def _str_removeprefix(self, prefix: str) -> Self: pass @abc.abstractmethod def _str_removesuffix(self, suffix: str) -> Self: pass @abc.abstractmethod def _str_split(self, pat=None, n=-1, expand: bool=False, regex: bool | None=None): pass @abc.abstractmethod def _str_rsplit(self, pat=None, n=-1): pass @abc.abstractmethod def _str_extract(self, pat: str, flags: int=0, expand: bool=True): pass # File: pandas-main/pandas/core/strings/object_array.py from __future__ import annotations import functools import re import textwrap from typing import TYPE_CHECKING, Literal, cast import unicodedata import warnings import numpy as np from pandas._libs import lib import pandas._libs.missing as libmissing import pandas._libs.ops as libops from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.missing import isna from pandas.core.strings.base import BaseStringArrayMethods if TYPE_CHECKING: from collections.abc import Callable, Sequence from pandas._typing import NpDtype, Scalar class ObjectStringArrayMixin(BaseStringArrayMethods): def __len__(self) -> int: raise NotImplementedError def _str_map(self, f, na_value=None, dtype: NpDtype | None=None, convert: bool=True): if dtype is None: dtype = np.dtype('object') if na_value is None: na_value = self.dtype.na_value if not len(self): return np.array([], dtype=dtype) arr = np.asarray(self, dtype=object) mask = isna(arr) map_convert = convert and (not np.all(mask)) try: result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert=map_convert) except (TypeError, AttributeError) as err: p_err = '((takes)|(missing)) (?(2)from \\d+ to )?\\d+ (?(3)required )positional arguments?' if len(err.args) >= 1 and re.search(p_err, err.args[0]): raise err def g(x): try: return f(x) except (TypeError, AttributeError): return na_value return self._str_map(g, na_value=na_value, dtype=dtype) if not isinstance(result, np.ndarray): return result if na_value is not np.nan: np.putmask(result, mask, na_value) if convert and result.dtype == object: result = lib.maybe_convert_objects(result) return result def _str_count(self, pat, flags: int=0): regex = re.compile(pat, flags=flags) f = lambda x: len(regex.findall(x)) return self._str_map(f, dtype='int64') def _str_pad(self, width: int, side: Literal['left', 'right', 'both']='left', fillchar: str=' '): if side == 'left': f = lambda x: x.rjust(width, fillchar) elif side == 'right': f = lambda x: x.ljust(width, fillchar) elif side == 'both': f = lambda x: x.center(width, fillchar) else: raise ValueError('Invalid side') return self._str_map(f) def _str_contains(self, pat, case: bool=True, flags: int=0, na=np.nan, regex: bool=True): if regex: if not case: flags |= re.IGNORECASE pat = re.compile(pat, flags=flags) f = lambda x: pat.search(x) is not None elif case: f = lambda x: pat in x else: upper_pat = pat.upper() f = lambda x: upper_pat in x.upper() if not isna(na) and (not isinstance(na, bool)): warnings.warn("Allowing a non-bool 'na' in obj.str.contains is deprecated and will raise in a future version.", FutureWarning, stacklevel=find_stack_level()) return self._str_map(f, na, dtype=np.dtype('bool')) def _str_startswith(self, pat, na=None): f = lambda x: x.startswith(pat) if not isna(na) and (not isinstance(na, bool)): warnings.warn("Allowing a non-bool 'na' in obj.str.startswith is deprecated and will raise in a future version.", FutureWarning, stacklevel=find_stack_level()) return self._str_map(f, na_value=na, dtype=np.dtype(bool)) def _str_endswith(self, pat, na=None): f = lambda x: x.endswith(pat) if not isna(na) and (not isinstance(na, bool)): warnings.warn("Allowing a non-bool 'na' in obj.str.endswith is deprecated and will raise in a future version.", FutureWarning, stacklevel=find_stack_level()) return self._str_map(f, na_value=na, dtype=np.dtype(bool)) def _str_replace(self, pat: str | re.Pattern, repl: str | Callable, n: int=-1, case: bool=True, flags: int=0, regex: bool=True): if case is False: flags |= re.IGNORECASE if regex or flags or callable(repl): if not isinstance(pat, re.Pattern): if regex is False: pat = re.escape(pat) pat = re.compile(pat, flags=flags) n = n if n >= 0 else 0 f = lambda x: pat.sub(repl=repl, string=x, count=n) else: f = lambda x: x.replace(pat, repl, n) return self._str_map(f, dtype=str) def _str_repeat(self, repeats: int | Sequence[int]): if lib.is_integer(repeats): rint = cast(int, repeats) def scalar_rep(x): try: return bytes.__mul__(x, rint) except TypeError: return str.__mul__(x, rint) return self._str_map(scalar_rep, dtype=str) else: from pandas.core.arrays.string_ import BaseStringArray def rep(x, r): if x is libmissing.NA: return x try: return bytes.__mul__(x, r) except TypeError: return str.__mul__(x, r) result = libops.vec_binop(np.asarray(self), np.asarray(repeats, dtype=object), rep) if not isinstance(self, BaseStringArray): return result return type(self)._from_sequence(result, dtype=self.dtype) def _str_match(self, pat: str, case: bool=True, flags: int=0, na: Scalar | None=None): if not case: flags |= re.IGNORECASE regex = re.compile(pat, flags=flags) f = lambda x: regex.match(x) is not None return self._str_map(f, na_value=na, dtype=np.dtype(bool)) def _str_fullmatch(self, pat: str | re.Pattern, case: bool=True, flags: int=0, na: Scalar | None=None): if not case: flags |= re.IGNORECASE regex = re.compile(pat, flags=flags) f = lambda x: regex.fullmatch(x) is not None return self._str_map(f, na_value=na, dtype=np.dtype(bool)) def _str_encode(self, encoding, errors: str='strict'): f = lambda x: x.encode(encoding, errors=errors) return self._str_map(f, dtype=object) def _str_find(self, sub, start: int=0, end=None): return self._str_find_(sub, start, end, side='left') def _str_rfind(self, sub, start: int=0, end=None): return self._str_find_(sub, start, end, side='right') def _str_find_(self, sub, start, end, side): if side == 'left': method = 'find' elif side == 'right': method = 'rfind' else: raise ValueError('Invalid side') if end is None: f = lambda x: getattr(x, method)(sub, start) else: f = lambda x: getattr(x, method)(sub, start, end) return self._str_map(f, dtype='int64') def _str_findall(self, pat, flags: int=0): regex = re.compile(pat, flags=flags) return self._str_map(regex.findall, dtype='object') def _str_get(self, i): def f(x): if isinstance(x, dict): return x.get(i) elif len(x) > i >= -len(x): return x[i] return self.dtype.na_value return self._str_map(f) def _str_index(self, sub, start: int=0, end=None): if end: f = lambda x: x.index(sub, start, end) else: f = lambda x: x.index(sub, start, end) return self._str_map(f, dtype='int64') def _str_rindex(self, sub, start: int=0, end=None): if end: f = lambda x: x.rindex(sub, start, end) else: f = lambda x: x.rindex(sub, start, end) return self._str_map(f, dtype='int64') def _str_join(self, sep: str): return self._str_map(sep.join) def _str_partition(self, sep: str, expand): result = self._str_map(lambda x: x.partition(sep), dtype='object') return result def _str_rpartition(self, sep: str, expand): return self._str_map(lambda x: x.rpartition(sep), dtype='object') def _str_len(self): return self._str_map(len, dtype='int64') def _str_slice(self, start=None, stop=None, step=None): obj = slice(start, stop, step) return self._str_map(lambda x: x[obj]) def _str_slice_replace(self, start=None, stop=None, repl=None): if repl is None: repl = '' def f(x): if x[start:stop] == '': local_stop = start else: local_stop = stop y = '' if start is not None: y += x[:start] y += repl if stop is not None: y += x[local_stop:] return y return self._str_map(f) def _str_split(self, pat: str | re.Pattern | None=None, n=-1, expand: bool=False, regex: bool | None=None): if pat is None: if n is None or n == 0: n = -1 f = lambda x: x.split(pat, n) else: new_pat: str | re.Pattern if regex is True or isinstance(pat, re.Pattern): new_pat = re.compile(pat) elif regex is False: new_pat = pat elif len(pat) == 1: new_pat = pat else: new_pat = re.compile(pat) if isinstance(new_pat, re.Pattern): if n is None or n == -1: n = 0 f = lambda x: new_pat.split(x, maxsplit=n) else: if n is None or n == 0: n = -1 f = lambda x: x.split(pat, n) return self._str_map(f, dtype=object) def _str_rsplit(self, pat=None, n=-1): if n is None or n == 0: n = -1 f = lambda x: x.rsplit(pat, n) return self._str_map(f, dtype='object') def _str_translate(self, table): return self._str_map(lambda x: x.translate(table)) def _str_wrap(self, width: int, **kwargs): kwargs['width'] = width tw = textwrap.TextWrapper(**kwargs) return self._str_map(lambda s: '\n'.join(tw.wrap(s))) def _str_get_dummies(self, sep: str='|', dtype: NpDtype | None=None): from pandas import Series if dtype is None: dtype = np.int64 arr = Series(self).fillna('') try: arr = sep + arr + sep except (TypeError, NotImplementedError): arr = sep + arr.astype(str) + sep tags: set[str] = set() for ts in Series(arr, copy=False).str.split(sep): tags.update(ts) tags2 = sorted(tags - {''}) _dtype = pandas_dtype(dtype) dummies_dtype: NpDtype if isinstance(_dtype, np.dtype): dummies_dtype = _dtype else: dummies_dtype = np.bool_ dummies = np.empty((len(arr), len(tags2)), dtype=dummies_dtype) def _isin(test_elements: str, element: str) -> bool: return element in test_elements for (i, t) in enumerate(tags2): pat = sep + t + sep dummies[:, i] = lib.map_infer(arr.to_numpy(), functools.partial(_isin, element=pat)) return (dummies, tags2) def _str_upper(self): return self._str_map(lambda x: x.upper()) def _str_isalnum(self): return self._str_map(str.isalnum, dtype='bool') def _str_isalpha(self): return self._str_map(str.isalpha, dtype='bool') def _str_isdecimal(self): return self._str_map(str.isdecimal, dtype='bool') def _str_isdigit(self): return self._str_map(str.isdigit, dtype='bool') def _str_islower(self): return self._str_map(str.islower, dtype='bool') def _str_isnumeric(self): return self._str_map(str.isnumeric, dtype='bool') def _str_isspace(self): return self._str_map(str.isspace, dtype='bool') def _str_istitle(self): return self._str_map(str.istitle, dtype='bool') def _str_isupper(self): return self._str_map(str.isupper, dtype='bool') def _str_capitalize(self): return self._str_map(str.capitalize) def _str_casefold(self): return self._str_map(str.casefold) def _str_title(self): return self._str_map(str.title) def _str_swapcase(self): return self._str_map(str.swapcase) def _str_lower(self): return self._str_map(str.lower) def _str_normalize(self, form): f = lambda x: unicodedata.normalize(form, x) return self._str_map(f) def _str_strip(self, to_strip=None): return self._str_map(lambda x: x.strip(to_strip)) def _str_lstrip(self, to_strip=None): return self._str_map(lambda x: x.lstrip(to_strip)) def _str_rstrip(self, to_strip=None): return self._str_map(lambda x: x.rstrip(to_strip)) def _str_removeprefix(self, prefix: str): return self._str_map(lambda x: x.removeprefix(prefix)) def _str_removesuffix(self, suffix: str): return self._str_map(lambda x: x.removesuffix(suffix)) def _str_extract(self, pat: str, flags: int=0, expand: bool=True): regex = re.compile(pat, flags=flags) na_value = self.dtype.na_value if not expand: def g(x): m = regex.search(x) return m.groups()[0] if m else na_value return self._str_map(g, convert=False) empty_row = [na_value] * regex.groups def f(x): if not isinstance(x, str): return empty_row m = regex.search(x) if m: return [na_value if item is None else item for item in m.groups()] else: return empty_row return [f(val) for val in np.asarray(self)] # File: pandas-main/pandas/core/tools/datetimes.py from __future__ import annotations from collections import abc from datetime import date from functools import partial from itertools import islice from typing import TYPE_CHECKING, TypedDict, Union, cast, overload import warnings import numpy as np from pandas._libs import lib, tslib from pandas._libs.tslibs import OutOfBoundsDatetime, Timedelta, Timestamp, astype_overflowsafe, is_supported_dtype, timezones as libtimezones from pandas._libs.tslibs.conversion import cast_from_unit_vectorized from pandas._libs.tslibs.dtypes import NpyDatetimeUnit from pandas._libs.tslibs.parsing import DateParseError, guess_datetime_format from pandas._libs.tslibs.strptime import array_strptime from pandas._typing import AnyArrayLike, ArrayLike, DateTimeErrorChoices from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ensure_object, is_float, is_integer, is_integer_dtype, is_list_like, is_numeric_dtype from pandas.core.dtypes.dtypes import ArrowDtype, DatetimeTZDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.arrays import DatetimeArray, IntegerArray, NumpyExtensionArray from pandas.core.algorithms import unique from pandas.core.arrays import ArrowExtensionArray from pandas.core.arrays.base import ExtensionArray from pandas.core.arrays.datetimes import maybe_convert_dtype, objects_to_datetime64, tz_to_dtype from pandas.core.construction import extract_array from pandas.core.indexes.base import Index from pandas.core.indexes.datetimes import DatetimeIndex if TYPE_CHECKING: from collections.abc import Callable, Hashable from pandas._libs.tslibs.nattype import NaTType from pandas._libs.tslibs.timedeltas import UnitChoices from pandas import DataFrame, Series ArrayConvertible = Union[list, tuple, AnyArrayLike] Scalar = Union[float, str] DatetimeScalar = Union[Scalar, date, np.datetime64] DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible] DatetimeDictArg = Union[list[Scalar], tuple[Scalar, ...], AnyArrayLike] class YearMonthDayDict(TypedDict, total=True): year: DatetimeDictArg month: DatetimeDictArg day: DatetimeDictArg class FulldatetimeDict(YearMonthDayDict, total=False): hour: DatetimeDictArg hours: DatetimeDictArg minute: DatetimeDictArg minutes: DatetimeDictArg second: DatetimeDictArg seconds: DatetimeDictArg ms: DatetimeDictArg us: DatetimeDictArg ns: DatetimeDictArg DictConvertible = Union[FulldatetimeDict, 'DataFrame'] start_caching_at = 50 def _guess_datetime_format_for_array(arr, dayfirst: bool | None=False) -> str | None: if (first_non_null := tslib.first_non_null(arr)) != -1: if type((first_non_nan_element := arr[first_non_null])) is str: guessed_format = guess_datetime_format(first_non_nan_element, dayfirst=dayfirst) if guessed_format is not None: return guessed_format if tslib.first_non_null(arr[first_non_null + 1:]) != -1: warnings.warn('Could not infer format, so each element will be parsed individually, falling back to `dateutil`. To ensure parsing is consistent and as-expected, please specify a format.', UserWarning, stacklevel=find_stack_level()) return None def should_cache(arg: ArrayConvertible, unique_share: float=0.7, check_count: int | None=None) -> bool: do_caching = True if check_count is None: if len(arg) <= start_caching_at: return False if len(arg) <= 5000: check_count = len(arg) // 10 else: check_count = 500 else: assert 0 <= check_count <= len(arg), 'check_count must be in next bounds: [0; len(arg)]' if check_count == 0: return False assert 0 < unique_share < 1, 'unique_share must be in next bounds: (0; 1)' try: unique_elements = set(islice(arg, check_count)) except TypeError: return False if len(unique_elements) > check_count * unique_share: do_caching = False return do_caching def _maybe_cache(arg: ArrayConvertible, format: str | None, cache: bool, convert_listlike: Callable) -> Series: from pandas import Series cache_array = Series(dtype=object) if cache: if not should_cache(arg): return cache_array if not isinstance(arg, (np.ndarray, ExtensionArray, Index, ABCSeries)): arg = np.array(arg) unique_dates = unique(arg) if len(unique_dates) < len(arg): cache_dates = convert_listlike(unique_dates, format) try: cache_array = Series(cache_dates, index=unique_dates, copy=False) except OutOfBoundsDatetime: return cache_array if not cache_array.index.is_unique: cache_array = cache_array[~cache_array.index.duplicated()] return cache_array def _box_as_indexlike(dt_array: ArrayLike, utc: bool=False, name: Hashable | None=None) -> Index: if lib.is_np_dtype(dt_array.dtype, 'M'): tz = 'utc' if utc else None return DatetimeIndex(dt_array, tz=tz, name=name) return Index(dt_array, name=name, dtype=dt_array.dtype) def _convert_and_box_cache(arg: DatetimeScalarOrArrayConvertible, cache_array: Series, name: Hashable | None=None) -> Index: from pandas import Series result = Series(arg, dtype=cache_array.index.dtype).map(cache_array) return _box_as_indexlike(result._values, utc=False, name=name) def _convert_listlike_datetimes(arg, format: str | None, name: Hashable | None=None, utc: bool=False, unit: str | None=None, errors: DateTimeErrorChoices='raise', dayfirst: bool | None=None, yearfirst: bool | None=None, exact: bool=True): if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype='O') elif isinstance(arg, NumpyExtensionArray): arg = np.array(arg) arg_dtype = getattr(arg, 'dtype', None) tz = 'utc' if utc else None if isinstance(arg_dtype, DatetimeTZDtype): if not isinstance(arg, (DatetimeArray, DatetimeIndex)): return DatetimeIndex(arg, tz=tz, name=name) if utc: arg = arg.tz_convert(None).tz_localize('utc') return arg elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.type is Timestamp: if utc: if isinstance(arg, Index): arg_array = cast(ArrowExtensionArray, arg.array) if arg_dtype.pyarrow_dtype.tz is not None: arg_array = arg_array._dt_tz_convert('UTC') else: arg_array = arg_array._dt_tz_localize('UTC') arg = Index(arg_array) elif arg_dtype.pyarrow_dtype.tz is not None: arg = arg._dt_tz_convert('UTC') else: arg = arg._dt_tz_localize('UTC') return arg elif lib.is_np_dtype(arg_dtype, 'M'): if not is_supported_dtype(arg_dtype): arg = astype_overflowsafe(np.asarray(arg), np.dtype('M8[s]'), is_coerce=errors == 'coerce') if not isinstance(arg, (DatetimeArray, DatetimeIndex)): return DatetimeIndex(arg, tz=tz, name=name) elif utc: return arg.tz_localize('utc') return arg elif unit is not None: if format is not None: raise ValueError('cannot specify both format and unit') return _to_datetime_with_unit(arg, unit, name, utc, errors) elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series') try: (arg, _) = maybe_convert_dtype(arg, copy=False, tz=libtimezones.maybe_get_tz(tz)) except TypeError: if errors == 'coerce': npvalues = np.full(len(arg), np.datetime64('NaT', 'ns')) return DatetimeIndex(npvalues, name=name) raise arg = ensure_object(arg) if format is None: format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst) if format is not None and format != 'mixed': return _array_strptime_with_fallback(arg, name, utc, format, exact, errors) (result, tz_parsed) = objects_to_datetime64(arg, dayfirst=dayfirst, yearfirst=yearfirst, utc=utc, errors=errors, allow_object=True) if tz_parsed is not None: out_unit = np.datetime_data(result.dtype)[0] dtype = tz_to_dtype(tz_parsed, out_unit) dt64_values = result.view(f'M8[{dtype.unit}]') dta = DatetimeArray._simple_new(dt64_values, dtype=dtype) return DatetimeIndex._simple_new(dta, name=name) return _box_as_indexlike(result, utc=utc, name=name) def _array_strptime_with_fallback(arg, name, utc: bool, fmt: str, exact: bool, errors: str) -> Index: (result, tz_out) = array_strptime(arg, fmt, exact=exact, errors=errors, utc=utc) if tz_out is not None: unit = np.datetime_data(result.dtype)[0] dtype = DatetimeTZDtype(tz=tz_out, unit=unit) dta = DatetimeArray._simple_new(result, dtype=dtype) if utc: dta = dta.tz_convert('UTC') return Index(dta, name=name) elif result.dtype != object and utc: unit = np.datetime_data(result.dtype)[0] res = Index(result, dtype=f'M8[{unit}, UTC]', name=name) return res return Index(result, dtype=result.dtype, name=name) def _to_datetime_with_unit(arg, unit, name, utc: bool, errors: str) -> Index: arg = extract_array(arg, extract_numpy=True) if isinstance(arg, IntegerArray): arr = arg.astype(f'datetime64[{unit}]') tz_parsed = None else: arg = np.asarray(arg) if arg.dtype.kind in 'iu': arr = arg.astype(f'datetime64[{unit}]', copy=False) try: arr = astype_overflowsafe(arr, np.dtype('M8[ns]'), copy=False) except OutOfBoundsDatetime: if errors == 'raise': raise arg = arg.astype(object) return _to_datetime_with_unit(arg, unit, name, utc, errors) tz_parsed = None elif arg.dtype.kind == 'f': with np.errstate(over='raise'): try: arr = cast_from_unit_vectorized(arg, unit=unit) except OutOfBoundsDatetime as err: if errors != 'raise': return _to_datetime_with_unit(arg.astype(object), unit, name, utc, errors) raise OutOfBoundsDatetime(f"cannot convert input with unit '{unit}'") from err arr = arr.view('M8[ns]') tz_parsed = None else: arg = arg.astype(object, copy=False) (arr, tz_parsed) = tslib.array_to_datetime(arg, utc=utc, errors=errors, unit_for_numerics=unit, creso=NpyDatetimeUnit.NPY_FR_ns.value) result = DatetimeIndex(arr, name=name) if not isinstance(result, DatetimeIndex): return result result = result.tz_localize('UTC').tz_convert(tz_parsed) if utc: if result.tz is None: result = result.tz_localize('utc') else: result = result.tz_convert('utc') return result def _adjust_to_origin(arg, origin, unit): if origin == 'julian': original = arg j0 = Timestamp(0).to_julian_date() if unit != 'D': raise ValueError("unit must be 'D' for origin='julian'") try: arg = arg - j0 except TypeError as err: raise ValueError("incompatible 'arg' type for given 'origin'='julian'") from err j_max = Timestamp.max.to_julian_date() - j0 j_min = Timestamp.min.to_julian_date() - j0 if np.any(arg > j_max) or np.any(arg < j_min): raise OutOfBoundsDatetime(f"{original} is Out of Bounds for origin='julian'") else: if not ((is_integer(arg) or is_float(arg)) or is_numeric_dtype(np.asarray(arg))): raise ValueError(f"'{arg}' is not compatible with origin='{origin}'; it must be numeric with a unit specified") try: offset = Timestamp(origin, unit=unit) except OutOfBoundsDatetime as err: raise OutOfBoundsDatetime(f'origin {origin} is Out of Bounds') from err except ValueError as err: raise ValueError(f'origin {origin} cannot be converted to a Timestamp') from err if offset.tz is not None: raise ValueError(f'origin offset {offset} must be tz-naive') td_offset = offset - Timestamp(0) ioffset = td_offset // Timedelta(1, unit=unit) if is_list_like(arg) and (not isinstance(arg, (ABCSeries, Index, np.ndarray))): arg = np.asarray(arg) arg = arg + ioffset return arg @overload def to_datetime(arg: DatetimeScalar, errors: DateTimeErrorChoices=..., dayfirst: bool=..., yearfirst: bool=..., utc: bool=..., format: str | None=..., exact: bool=..., unit: str | None=..., origin=..., cache: bool=...) -> Timestamp: ... @overload def to_datetime(arg: Series | DictConvertible, errors: DateTimeErrorChoices=..., dayfirst: bool=..., yearfirst: bool=..., utc: bool=..., format: str | None=..., exact: bool=..., unit: str | None=..., origin=..., cache: bool=...) -> Series: ... @overload def to_datetime(arg: list | tuple | Index | ArrayLike, errors: DateTimeErrorChoices=..., dayfirst: bool=..., yearfirst: bool=..., utc: bool=..., format: str | None=..., exact: bool=..., unit: str | None=..., origin=..., cache: bool=...) -> DatetimeIndex: ... def to_datetime(arg: DatetimeScalarOrArrayConvertible | DictConvertible, errors: DateTimeErrorChoices='raise', dayfirst: bool=False, yearfirst: bool=False, utc: bool=False, format: str | None=None, exact: bool | lib.NoDefault=lib.no_default, unit: str | None=None, origin: str='unix', cache: bool=True) -> DatetimeIndex | Series | DatetimeScalar | NaTType | None: if exact is not lib.no_default and format in {'mixed', 'ISO8601'}: raise ValueError("Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'") if arg is None: return None if origin != 'unix': arg = _adjust_to_origin(arg, origin, unit) convert_listlike = partial(_convert_listlike_datetimes, utc=utc, unit=unit, dayfirst=dayfirst, yearfirst=yearfirst, errors=errors, exact=exact) result: Timestamp | NaTType | Series | Index if isinstance(arg, Timestamp): result = arg if utc: if arg.tz is not None: result = arg.tz_convert('utc') else: result = arg.tz_localize('utc') elif isinstance(arg, ABCSeries): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = arg.map(cache_array) else: values = convert_listlike(arg._values, format) result = arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)): result = _assemble_from_unit_mappings(arg, errors, utc) elif isinstance(arg, Index): cache_array = _maybe_cache(arg, format, cache, convert_listlike) if not cache_array.empty: result = _convert_and_box_cache(arg, cache_array, name=arg.name) else: result = convert_listlike(arg, format, name=arg.name) elif is_list_like(arg): try: argc = cast(Union[list, tuple, ExtensionArray, np.ndarray, 'Series', Index], arg) cache_array = _maybe_cache(argc, format, cache, convert_listlike) except OutOfBoundsDatetime: if errors == 'raise': raise from pandas import Series cache_array = Series([], dtype=object) if not cache_array.empty: result = _convert_and_box_cache(argc, cache_array) else: result = convert_listlike(argc, format) else: result = convert_listlike(np.array([arg]), format)[0] if isinstance(arg, bool) and isinstance(result, np.bool_): result = bool(result) return result _unit_map = {'year': 'year', 'years': 'year', 'month': 'month', 'months': 'month', 'day': 'day', 'days': 'day', 'hour': 'h', 'hours': 'h', 'minute': 'm', 'minutes': 'm', 'second': 's', 'seconds': 's', 'ms': 'ms', 'millisecond': 'ms', 'milliseconds': 'ms', 'us': 'us', 'microsecond': 'us', 'microseconds': 'us', 'ns': 'ns', 'nanosecond': 'ns', 'nanoseconds': 'ns'} def _assemble_from_unit_mappings(arg, errors: DateTimeErrorChoices, utc: bool) -> Series: from pandas import DataFrame, to_numeric, to_timedelta arg = DataFrame(arg) if not arg.columns.is_unique: raise ValueError('cannot assemble with duplicate keys') def f(value): if value in _unit_map: return _unit_map[value] if value.lower() in _unit_map: return _unit_map[value.lower()] return value unit = {k: f(k) for k in arg.keys()} unit_rev = {v: k for (k, v) in unit.items()} required = ['year', 'month', 'day'] req = set(required) - set(unit_rev.keys()) if len(req): _required = ','.join(sorted(req)) raise ValueError(f'to assemble mappings requires at least that [year, month, day] be specified: [{_required}] is missing') excess = set(unit_rev.keys()) - set(_unit_map.values()) if len(excess): _excess = ','.join(sorted(excess)) raise ValueError(f'extra keys have been passed to the datetime assemblage: [{_excess}]') def coerce(values): values = to_numeric(values, errors=errors) if is_integer_dtype(values.dtype): values = values.astype('int64') return values values = coerce(arg[unit_rev['year']]) * 10000 + coerce(arg[unit_rev['month']]) * 100 + coerce(arg[unit_rev['day']]) try: values = to_datetime(values, format='%Y%m%d', errors=errors, utc=utc) except (TypeError, ValueError) as err: raise ValueError(f'cannot assemble the datetimes: {err}') from err units: list[UnitChoices] = ['h', 'm', 's', 'ms', 'us', 'ns'] for u in units: value = unit_rev.get(u) if value is not None and value in arg: try: values += to_timedelta(coerce(arg[value]), unit=u, errors=errors) except (TypeError, ValueError) as err: raise ValueError(f'cannot assemble the datetimes [{value}]: {err}') from err return values __all__ = ['DateParseError', 'should_cache', 'to_datetime'] # File: pandas-main/pandas/core/tools/numeric.py from __future__ import annotations from typing import TYPE_CHECKING, Literal import numpy as np from pandas._libs import lib, missing as libmissing from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.cast import maybe_downcast_numeric from pandas.core.dtypes.common import ensure_object, is_bool_dtype, is_decimal, is_integer_dtype, is_number, is_numeric_dtype, is_scalar, is_string_dtype, needs_i8_conversion from pandas.core.dtypes.dtypes import ArrowDtype from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core.arrays import BaseMaskedArray from pandas.core.arrays.string_ import StringDtype if TYPE_CHECKING: from pandas._typing import DateTimeErrorChoices, DtypeBackend, npt def to_numeric(arg, errors: DateTimeErrorChoices='raise', downcast: Literal['integer', 'signed', 'unsigned', 'float'] | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default): if downcast not in (None, 'integer', 'signed', 'unsigned', 'float'): raise ValueError('invalid downcasting method provided') if errors not in ('raise', 'coerce'): raise ValueError('invalid error value specified') check_dtype_backend(dtype_backend) is_series = False is_index = False is_scalars = False if isinstance(arg, ABCSeries): is_series = True values = arg.values elif isinstance(arg, ABCIndex): is_index = True if needs_i8_conversion(arg.dtype): values = arg.view('i8') else: values = arg.values elif isinstance(arg, (list, tuple)): values = np.array(arg, dtype='O') elif is_scalar(arg): if is_decimal(arg): return float(arg) if is_number(arg): return arg is_scalars = True values = np.array([arg], dtype='O') elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a list, tuple, 1-d array, or Series') else: values = arg mask: npt.NDArray[np.bool_] | None = None if isinstance(values, BaseMaskedArray): mask = values._mask values = values._data[~mask] values_dtype = getattr(values, 'dtype', None) if isinstance(values_dtype, ArrowDtype): mask = values.isna() values = values.dropna().to_numpy() new_mask: np.ndarray | None = None if is_numeric_dtype(values_dtype): pass elif lib.is_np_dtype(values_dtype, 'mM'): values = values.view(np.int64) else: values = ensure_object(values) coerce_numeric = errors != 'raise' (values, new_mask) = lib.maybe_convert_numeric(values, set(), coerce_numeric=coerce_numeric, convert_to_masked_nullable=dtype_backend is not lib.no_default or (isinstance(values_dtype, StringDtype) and values_dtype.na_value is libmissing.NA)) if new_mask is not None: values = values[~new_mask] elif dtype_backend is not lib.no_default and new_mask is None or (isinstance(values_dtype, StringDtype) and values_dtype.na_value is libmissing.NA): new_mask = np.zeros(values.shape, dtype=np.bool_) if downcast is not None and is_numeric_dtype(values.dtype): typecodes: str | None = None if downcast in ('integer', 'signed'): typecodes = np.typecodes['Integer'] elif downcast == 'unsigned' and (not len(values) or np.min(values) >= 0): typecodes = np.typecodes['UnsignedInteger'] elif downcast == 'float': typecodes = np.typecodes['Float'] float_32_char = np.dtype(np.float32).char float_32_ind = typecodes.index(float_32_char) typecodes = typecodes[float_32_ind:] if typecodes is not None: for typecode in typecodes: dtype = np.dtype(typecode) if dtype.itemsize <= values.dtype.itemsize: values = maybe_downcast_numeric(values, dtype) if values.dtype == dtype: break if (mask is not None or new_mask is not None) and (not is_string_dtype(values.dtype)): if mask is None or (new_mask is not None and new_mask.shape == mask.shape): mask = new_mask else: mask = mask.copy() assert isinstance(mask, np.ndarray) data = np.zeros(mask.shape, dtype=values.dtype) data[~mask] = values from pandas.core.arrays import ArrowExtensionArray, BooleanArray, FloatingArray, IntegerArray klass: type[IntegerArray | BooleanArray | FloatingArray] if is_integer_dtype(data.dtype): klass = IntegerArray elif is_bool_dtype(data.dtype): klass = BooleanArray else: klass = FloatingArray values = klass(data, mask) if dtype_backend == 'pyarrow' or isinstance(values_dtype, ArrowDtype): values = ArrowExtensionArray(values.__arrow_array__()) if is_series: return arg._constructor(values, index=arg.index, name=arg.name) elif is_index: from pandas import Index return Index(values, name=arg.name) elif is_scalars: return values[0] else: return values # File: pandas-main/pandas/core/tools/timedeltas.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any, overload import numpy as np from pandas._libs import lib from pandas._libs.tslibs import NaT, NaTType from pandas._libs.tslibs.timedeltas import Timedelta, disallow_ambiguous_unit, parse_timedelta_unit from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.dtypes import ArrowDtype from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core.arrays.timedeltas import sequence_to_td64ns if TYPE_CHECKING: from collections.abc import Hashable from datetime import timedelta from pandas._libs.tslibs.timedeltas import UnitChoices from pandas._typing import ArrayLike, DateTimeErrorChoices from pandas import Index, Series, TimedeltaIndex @overload def to_timedelta(arg: str | float | timedelta, unit: UnitChoices | None=..., errors: DateTimeErrorChoices=...) -> Timedelta: ... @overload def to_timedelta(arg: Series, unit: UnitChoices | None=..., errors: DateTimeErrorChoices=...) -> Series: ... @overload def to_timedelta(arg: list | tuple | range | ArrayLike | Index, unit: UnitChoices | None=..., errors: DateTimeErrorChoices=...) -> TimedeltaIndex: ... def to_timedelta(arg: str | int | float | timedelta | list | tuple | range | ArrayLike | Index | Series, unit: UnitChoices | None=None, errors: DateTimeErrorChoices='raise') -> Timedelta | TimedeltaIndex | Series | NaTType | Any: if unit is not None: unit = parse_timedelta_unit(unit) disallow_ambiguous_unit(unit) if errors not in ('raise', 'coerce'): raise ValueError("errors must be one of 'raise', or 'coerce'.") if arg is None: return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, unit=unit, errors=errors) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndex): return _convert_listlike(arg, unit=unit, errors=errors, name=arg.name) elif isinstance(arg, np.ndarray) and arg.ndim == 0: arg = lib.item_from_zerodim(arg) elif is_list_like(arg) and getattr(arg, 'ndim', 1) == 1: return _convert_listlike(arg, unit=unit, errors=errors) elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, timedelta, list, tuple, 1-d array, or Series') if isinstance(arg, str) and unit is not None: raise ValueError('unit must not be specified if the input is/contains a str') return _coerce_scalar_to_timedelta_type(arg, unit=unit, errors=errors) def _coerce_scalar_to_timedelta_type(r, unit: UnitChoices | None='ns', errors: DateTimeErrorChoices='raise') -> Timedelta | NaTType: result: Timedelta | NaTType try: result = Timedelta(r, unit) except ValueError: if errors == 'raise': raise result = NaT return result def _convert_listlike(arg, unit: UnitChoices | None=None, errors: DateTimeErrorChoices='raise', name: Hashable | None=None): arg_dtype = getattr(arg, 'dtype', None) if isinstance(arg, (list, tuple)) or arg_dtype is None: arg = np.array(arg, dtype=object) elif isinstance(arg_dtype, ArrowDtype) and arg_dtype.kind == 'm': return arg td64arr = sequence_to_td64ns(arg, unit=unit, errors=errors, copy=False)[0] from pandas import TimedeltaIndex value = TimedeltaIndex(td64arr, name=name) return value # File: pandas-main/pandas/core/tools/times.py from __future__ import annotations from datetime import datetime, time from typing import TYPE_CHECKING import numpy as np from pandas._libs.lib import is_list_like from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core.dtypes.missing import notna if TYPE_CHECKING: from pandas._typing import DateTimeErrorChoices def to_time(arg, format: str | None=None, infer_time_format: bool=False, errors: DateTimeErrorChoices='raise'): if errors not in ('raise', 'coerce'): raise ValueError("errors must be one of 'raise', or 'coerce'.") def _convert_listlike(arg, format): if isinstance(arg, (list, tuple)): arg = np.array(arg, dtype='O') elif getattr(arg, 'ndim', 1) > 1: raise TypeError('arg must be a string, datetime, list, tuple, 1-d array, or Series') arg = np.asarray(arg, dtype='O') if infer_time_format and format is None: format = _guess_time_format_for_array(arg) times: list[time | None] = [] if format is not None: for element in arg: try: times.append(datetime.strptime(element, format).time()) except (ValueError, TypeError) as err: if errors == 'raise': msg = f'Cannot convert {element} to a time with given format {format}' raise ValueError(msg) from err times.append(None) else: formats = _time_formats[:] format_found = False for element in arg: time_object = None try: time_object = time.fromisoformat(element) except (ValueError, TypeError): for time_format in formats: try: time_object = datetime.strptime(element, time_format).time() if not format_found: fmt = formats.pop(formats.index(time_format)) formats.insert(0, fmt) format_found = True break except (ValueError, TypeError): continue if time_object is not None: times.append(time_object) elif errors == 'raise': raise ValueError(f'Cannot convert arg {arg} to a time') else: times.append(None) return times if arg is None: return arg elif isinstance(arg, time): return arg elif isinstance(arg, ABCSeries): values = _convert_listlike(arg._values, format) return arg._constructor(values, index=arg.index, name=arg.name) elif isinstance(arg, ABCIndex): return _convert_listlike(arg, format) elif is_list_like(arg): return _convert_listlike(arg, format) return _convert_listlike(np.array([arg]), format)[0] _time_formats = ['%H:%M', '%H%M', '%I:%M%p', '%I%M%p', '%H:%M:%S', '%H%M%S', '%I:%M:%S%p', '%I%M%S%p'] def _guess_time_format_for_array(arr): non_nan_elements = notna(arr).nonzero()[0] if len(non_nan_elements): element = arr[non_nan_elements[0]] for time_format in _time_formats: try: datetime.strptime(element, time_format) return time_format except ValueError: pass return None # File: pandas-main/pandas/core/util/hashing.py """""" from __future__ import annotations import itertools from typing import TYPE_CHECKING import numpy as np from pandas._libs.hashing import hash_object_array from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCExtensionArray, ABCIndex, ABCMultiIndex, ABCSeries if TYPE_CHECKING: from collections.abc import Hashable, Iterable, Iterator from pandas._typing import ArrayLike, npt from pandas import DataFrame, Index, MultiIndex, Series _default_hash_key = '0123456789123456' def combine_hash_arrays(arrays: Iterator[np.ndarray], num_items: int) -> npt.NDArray[np.uint64]: try: first = next(arrays) except StopIteration: return np.array([], dtype=np.uint64) arrays = itertools.chain([first], arrays) mult = np.uint64(1000003) out = np.zeros_like(first) + np.uint64(3430008) last_i = 0 for (i, a) in enumerate(arrays): inverse_i = num_items - i out ^= a out *= mult mult += np.uint64(82520 + inverse_i + inverse_i) last_i = i assert last_i + 1 == num_items, 'Fed in wrong num_items' out += np.uint64(97531) return out def hash_pandas_object(obj: Index | DataFrame | Series, index: bool=True, encoding: str='utf8', hash_key: str | None=_default_hash_key, categorize: bool=True) -> Series: from pandas import Series if hash_key is None: hash_key = _default_hash_key if isinstance(obj, ABCMultiIndex): return Series(hash_tuples(obj, encoding, hash_key), dtype='uint64', copy=False) elif isinstance(obj, ABCIndex): h = hash_array(obj._values, encoding, hash_key, categorize).astype('uint64', copy=False) ser = Series(h, index=obj, dtype='uint64', copy=False) elif isinstance(obj, ABCSeries): h = hash_array(obj._values, encoding, hash_key, categorize).astype('uint64', copy=False) if index: index_iter = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize)._values for _ in [None]) arrays = itertools.chain([h], index_iter) h = combine_hash_arrays(arrays, 2) ser = Series(h, index=obj.index, dtype='uint64', copy=False) elif isinstance(obj, ABCDataFrame): hashes = (hash_array(series._values, encoding, hash_key, categorize) for (_, series) in obj.items()) num_items = len(obj.columns) if index: index_hash_generator = (hash_pandas_object(obj.index, index=False, encoding=encoding, hash_key=hash_key, categorize=categorize)._values for _ in [None]) num_items += 1 _hashes = itertools.chain(hashes, index_hash_generator) hashes = (x for x in _hashes) h = combine_hash_arrays(hashes, num_items) ser = Series(h, index=obj.index, dtype='uint64', copy=False) else: raise TypeError(f'Unexpected type for hashing {type(obj)}') return ser def hash_tuples(vals: MultiIndex | Iterable[tuple[Hashable, ...]], encoding: str='utf8', hash_key: str=_default_hash_key) -> npt.NDArray[np.uint64]: if not is_list_like(vals): raise TypeError('must be convertible to a list-of-tuples') from pandas import Categorical, MultiIndex if not isinstance(vals, ABCMultiIndex): mi = MultiIndex.from_tuples(vals) else: mi = vals cat_vals = [Categorical._simple_new(mi.codes[level], CategoricalDtype(categories=mi.levels[level], ordered=False)) for level in range(mi.nlevels)] hashes = (cat._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=False) for cat in cat_vals) h = combine_hash_arrays(hashes, len(cat_vals)) return h def hash_array(vals: ArrayLike, encoding: str='utf8', hash_key: str=_default_hash_key, categorize: bool=True) -> npt.NDArray[np.uint64]: if not hasattr(vals, 'dtype'): raise TypeError('must pass a ndarray-like') if isinstance(vals, ABCExtensionArray): return vals._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=categorize) if not isinstance(vals, np.ndarray): raise TypeError(f'hash_array requires np.ndarray or ExtensionArray, not {type(vals).__name__}. Use hash_pandas_object instead.') return _hash_ndarray(vals, encoding, hash_key, categorize) def _hash_ndarray(vals: np.ndarray, encoding: str='utf8', hash_key: str=_default_hash_key, categorize: bool=True) -> npt.NDArray[np.uint64]: dtype = vals.dtype if np.issubdtype(dtype, np.complex128): hash_real = _hash_ndarray(vals.real, encoding, hash_key, categorize) hash_imag = _hash_ndarray(vals.imag, encoding, hash_key, categorize) return hash_real + 23 * hash_imag if dtype == bool: vals = vals.astype('u8') elif issubclass(dtype.type, (np.datetime64, np.timedelta64)): vals = vals.view('i8').astype('u8', copy=False) elif issubclass(dtype.type, np.number) and dtype.itemsize <= 8: vals = vals.view(f'u{vals.dtype.itemsize}').astype('u8') else: if categorize: from pandas import Categorical, Index, factorize (codes, categories) = factorize(vals, sort=False) dtype = CategoricalDtype(categories=Index(categories), ordered=False) cat = Categorical._simple_new(codes, dtype) return cat._hash_pandas_object(encoding=encoding, hash_key=hash_key, categorize=False) try: vals = hash_object_array(vals, hash_key, encoding) except TypeError: vals = hash_object_array(vals.astype(str).astype(object), hash_key, encoding) vals ^= vals >> 30 vals *= np.uint64(13787848793156543929) vals ^= vals >> 27 vals *= np.uint64(10723151780598845931) vals ^= vals >> 31 return vals # File: pandas-main/pandas/core/util/numba_.py """""" from __future__ import annotations import inspect import types from typing import TYPE_CHECKING import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.errors import NumbaUtilError if TYPE_CHECKING: from collections.abc import Callable GLOBAL_USE_NUMBA: bool = False def maybe_use_numba(engine: str | None) -> bool: return engine == 'numba' or (engine is None and GLOBAL_USE_NUMBA) def set_use_numba(enable: bool=False) -> None: global GLOBAL_USE_NUMBA if enable: import_optional_dependency('numba') GLOBAL_USE_NUMBA = enable def get_jit_arguments(engine_kwargs: dict[str, bool] | None=None, kwargs: dict | None=None) -> dict[str, bool]: if engine_kwargs is None: engine_kwargs = {} nopython = engine_kwargs.get('nopython', True) if kwargs: raise NumbaUtilError('numba does not support keyword-only argumentshttps://github.com/numba/numba/issues/2916, https://github.com/numba/numba/issues/6846') nogil = engine_kwargs.get('nogil', False) parallel = engine_kwargs.get('parallel', False) return {'nopython': nopython, 'nogil': nogil, 'parallel': parallel} def jit_user_function(func: Callable) -> Callable: if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') if numba.extending.is_jitted(func): numba_func = func elif getattr(np, func.__name__, False) is func or isinstance(func, types.BuiltinFunctionType): numba_func = func else: numba_func = numba.extending.register_jitable(func) return numba_func _sentinel = object() def prepare_function_arguments(func: Callable, args: tuple, kwargs: dict) -> tuple[tuple, dict]: if not kwargs: return (args, kwargs) signature = inspect.signature(func) arguments = signature.bind(_sentinel, *args, **kwargs) arguments.apply_defaults() args = arguments.args kwargs = arguments.kwargs assert args[0] is _sentinel args = args[1:] return (args, kwargs) # File: pandas-main/pandas/core/window/__init__.py from pandas.core.window.ewm import ExponentialMovingWindow, ExponentialMovingWindowGroupby from pandas.core.window.expanding import Expanding, ExpandingGroupby from pandas.core.window.rolling import Rolling, RollingGroupby, Window __all__ = ['Expanding', 'ExpandingGroupby', 'ExponentialMovingWindow', 'ExponentialMovingWindowGroupby', 'Rolling', 'RollingGroupby', 'Window'] # File: pandas-main/pandas/core/window/common.py """""" from __future__ import annotations from collections import defaultdict from typing import cast import numpy as np from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.indexes.api import MultiIndex def flex_binary_moment(arg1, arg2, f, pairwise: bool=False): if isinstance(arg1, ABCSeries) and isinstance(arg2, ABCSeries): (X, Y) = prep_binary(arg1, arg2) return f(X, Y) elif isinstance(arg1, ABCDataFrame): from pandas import DataFrame def dataframe_from_int_dict(data, frame_template) -> DataFrame: result = DataFrame(data, index=frame_template.index) if len(result.columns) > 0: result.columns = frame_template.columns[result.columns] else: result.columns = frame_template.columns.copy() return result results = {} if isinstance(arg2, ABCDataFrame): if pairwise is False: if arg1 is arg2: for i in range(len(arg1.columns)): results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i]) return dataframe_from_int_dict(results, arg1) else: if not arg1.columns.is_unique: raise ValueError("'arg1' columns are not unique") if not arg2.columns.is_unique: raise ValueError("'arg2' columns are not unique") (X, Y) = arg1.align(arg2, join='outer') (X, Y) = prep_binary(X, Y) res_columns = arg1.columns.union(arg2.columns) for col in res_columns: if col in X and col in Y: results[col] = f(X[col], Y[col]) return DataFrame(results, index=X.index, columns=res_columns) elif pairwise is True: results = defaultdict(dict) for i in range(len(arg1.columns)): for j in range(len(arg2.columns)): if j < i and arg2 is arg1: results[i][j] = results[j][i] else: results[i][j] = f(*prep_binary(arg1.iloc[:, i], arg2.iloc[:, j])) from pandas import concat result_index = arg1.index.union(arg2.index) if len(result_index): result = concat([concat([results[i][j] for j in range(len(arg2.columns))], ignore_index=True) for i in range(len(arg1.columns))], ignore_index=True, axis=1) result.columns = arg1.columns if arg2.columns.nlevels > 1: arg2.columns = cast(MultiIndex, arg2.columns) result_level = np.tile(result_index, len(result) // len(result_index)) arg2_levels = (np.repeat(arg2.columns.get_level_values(i), len(result) // len(arg2.columns)) for i in range(arg2.columns.nlevels)) result_names = list(arg2.columns.names) + [result_index.name] result.index = MultiIndex.from_arrays([*arg2_levels, result_level], names=result_names) num_levels = len(result.index.levels) new_order = [num_levels - 1] + list(range(num_levels - 1)) result = result.reorder_levels(new_order).sort_index() else: result.index = MultiIndex.from_product([range(len(arg2.columns)), range(len(result_index))]) result = result.swaplevel(1, 0).sort_index() result.index = MultiIndex.from_product([result_index] + [arg2.columns]) else: result = DataFrame(index=MultiIndex(levels=[arg1.index, arg2.columns], codes=[[], []]), columns=arg2.columns, dtype='float64') result.columns = result.columns.set_names(arg1.columns.names) result.index = result.index.set_names(result_index.names + arg2.columns.names) return result else: results = {i: f(*prep_binary(arg1.iloc[:, i], arg2)) for i in range(len(arg1.columns))} return dataframe_from_int_dict(results, arg1) else: return flex_binary_moment(arg2, arg1, f) def zsqrt(x): with np.errstate(all='ignore'): result = np.sqrt(x) mask = x < 0 if isinstance(x, ABCDataFrame): if mask._values.any(): result[mask] = 0 elif mask.any(): result[mask] = 0 return result def prep_binary(arg1, arg2): X = arg1 + 0 * arg2 Y = arg2 + 0 * arg1 return (X, Y) # File: pandas-main/pandas/core/window/doc.py """""" from __future__ import annotations from textwrap import dedent from pandas.core.shared_docs import _shared_docs _shared_docs = dict(**_shared_docs) def create_section_header(header: str) -> str: return f"{header}\n{'-' * len(header)}\n" template_header = '\nCalculate the {window_method} {aggregation_description}.\n\n' template_returns = dedent('\n Series or DataFrame\n Return type is the same as the original object with ``np.float64`` dtype.\n\n ').replace('\n', '', 1) template_see_also = dedent('\n Series.{window_method} : Calling {window_method} with Series data.\n DataFrame.{window_method} : Calling {window_method} with DataFrames.\n Series.{agg_method} : Aggregating {agg_method} for Series.\n DataFrame.{agg_method} : Aggregating {agg_method} for DataFrame.\n\n ').replace('\n', '', 1) kwargs_numeric_only = dedent('\n numeric_only : bool, default False\n Include only float, int, boolean columns.\n\n .. versionadded:: 1.5.0\n\n ').replace('\n', '', 1) kwargs_scipy = dedent('\n **kwargs\n Keyword arguments to configure the ``SciPy`` weighted window type.\n\n ').replace('\n', '', 1) window_apply_parameters = dedent("\n func : function\n Must produce a single value from an ndarray input if ``raw=True``\n or a single value from a Series if ``raw=False``. Can also accept a\n Numba JIT function with ``engine='numba'`` specified.\n\n raw : bool, default False\n * ``False`` : passes each row or column as a Series to the\n function.\n * ``True`` : the passed function will receive ndarray\n objects instead.\n If you are just applying a NumPy reduction function this will\n achieve much better performance.\n\n engine : str, default None\n * ``'cython'`` : Runs rolling apply through C-extensions from cython.\n * ``'numba'`` : Runs rolling apply through JIT compiled code from numba.\n Only available when ``raw`` is set to ``True``.\n * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``\n\n engine_kwargs : dict, default None\n * For ``'cython'`` engine, there are no accepted ``engine_kwargs``\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be\n applied to both the ``func`` and the ``apply`` rolling aggregation.\n\n args : tuple, default None\n Positional arguments to be passed into func.\n\n kwargs : dict, default None\n Keyword arguments to be passed into func.\n\n ").replace('\n', '', 1) numba_notes = 'See :ref:`window.numba_engine` and :ref:`enhancingperf.numba` for extended documentation and performance considerations for the Numba engine.\n\n' def window_agg_numba_parameters(version: str='1.3') -> str: return dedent("\n engine : str, default None\n * ``'cython'`` : Runs the operation through C-extensions from cython.\n * ``'numba'`` : Runs the operation through JIT compiled code from numba.\n * ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``\n\n .. versionadded:: {version}.0\n\n engine_kwargs : dict, default None\n * For ``'cython'`` engine, there are no accepted ``engine_kwargs``\n * For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``\n and ``parallel`` dictionary keys. The values must either be ``True`` or\n ``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is\n ``{{'nopython': True, 'nogil': False, 'parallel': False}}``\n\n .. versionadded:: {version}.0\n\n ").replace('\n', '', 1).replace('{version}', version) # File: pandas-main/pandas/core/window/ewm.py from __future__ import annotations import datetime from functools import partial from textwrap import dedent from typing import TYPE_CHECKING import numpy as np from pandas._libs.tslibs import Timedelta import pandas._libs.window.aggregations as window_aggregations from pandas.util._decorators import doc from pandas.core.dtypes.common import is_datetime64_dtype, is_numeric_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import isna from pandas.core import common from pandas.core.arrays.datetimelike import dtype_to_unit from pandas.core.indexers.objects import BaseIndexer, ExponentialMovingWindowIndexer, GroupbyIndexer from pandas.core.util.numba_ import get_jit_arguments, maybe_use_numba from pandas.core.window.common import zsqrt from pandas.core.window.doc import _shared_docs, create_section_header, kwargs_numeric_only, numba_notes, template_header, template_returns, template_see_also, window_agg_numba_parameters from pandas.core.window.numba_ import generate_numba_ewm_func, generate_numba_ewm_table_func from pandas.core.window.online import EWMMeanState, generate_online_numba_ewma_func from pandas.core.window.rolling import BaseWindow, BaseWindowGroupby if TYPE_CHECKING: from pandas._typing import TimedeltaConvertibleTypes, npt from pandas import DataFrame, Series from pandas.core.generic import NDFrame def get_center_of_mass(comass: float | None, span: float | None, halflife: float | None, alpha: float | None) -> float: valid_count = common.count_not_none(comass, span, halflife, alpha) if valid_count > 1: raise ValueError('comass, span, halflife, and alpha are mutually exclusive') if comass is not None: if comass < 0: raise ValueError('comass must satisfy: comass >= 0') elif span is not None: if span < 1: raise ValueError('span must satisfy: span >= 1') comass = (span - 1) / 2 elif halflife is not None: if halflife <= 0: raise ValueError('halflife must satisfy: halflife > 0') decay = 1 - np.exp(np.log(0.5) / halflife) comass = 1 / decay - 1 elif alpha is not None: if alpha <= 0 or alpha > 1: raise ValueError('alpha must satisfy: 0 < alpha <= 1') comass = (1 - alpha) / alpha else: raise ValueError('Must pass one of comass, span, halflife, or alpha') return float(comass) def _calculate_deltas(times: np.ndarray | NDFrame, halflife: float | TimedeltaConvertibleTypes | None) -> npt.NDArray[np.float64]: unit = dtype_to_unit(times.dtype) if isinstance(times, ABCSeries): times = times._values _times = np.asarray(times.view(np.int64), dtype=np.float64) _halflife = float(Timedelta(halflife).as_unit(unit)._value) return np.diff(_times) / _halflife class ExponentialMovingWindow(BaseWindow): _attributes = ['com', 'span', 'halflife', 'alpha', 'min_periods', 'adjust', 'ignore_na', 'times', 'method'] def __init__(self, obj: NDFrame, com: float | None=None, span: float | None=None, halflife: float | TimedeltaConvertibleTypes | None=None, alpha: float | None=None, min_periods: int | None=0, adjust: bool=True, ignore_na: bool=False, times: np.ndarray | NDFrame | None=None, method: str='single', *, selection=None) -> None: super().__init__(obj=obj, min_periods=1 if min_periods is None else max(int(min_periods), 1), on=None, center=False, closed=None, method=method, selection=selection) self.com = com self.span = span self.halflife = halflife self.alpha = alpha self.adjust = adjust self.ignore_na = ignore_na self.times = times if self.times is not None: times_dtype = getattr(self.times, 'dtype', None) if not (is_datetime64_dtype(times_dtype) or isinstance(times_dtype, DatetimeTZDtype)): raise ValueError('times must be datetime64 dtype.') if len(self.times) != len(obj): raise ValueError('times must be the same length as the object.') if not isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)): raise ValueError('halflife must be a timedelta convertible object') if isna(self.times).any(): raise ValueError('Cannot convert NaT values to integer') self._deltas = _calculate_deltas(self.times, self.halflife) if common.count_not_none(self.com, self.span, self.alpha) > 0: if not self.adjust: raise NotImplementedError('None of com, span, or alpha can be specified if times is provided and adjust=False') self._com = get_center_of_mass(self.com, self.span, None, self.alpha) else: self._com = 1.0 else: if self.halflife is not None and isinstance(self.halflife, (str, datetime.timedelta, np.timedelta64)): raise ValueError('halflife can only be a timedelta convertible argument if times is not None.') self._deltas = np.ones(max(self.obj.shape[0] - 1, 0), dtype=np.float64) self._com = get_center_of_mass(self.com, self.span, self.halflife, self.alpha) def _check_window_bounds(self, start: np.ndarray, end: np.ndarray, num_vals: int) -> None: pass def _get_window_indexer(self) -> BaseIndexer: return ExponentialMovingWindowIndexer() def online(self, engine: str='numba', engine_kwargs=None) -> OnlineExponentialMovingWindow: return OnlineExponentialMovingWindow(obj=self.obj, com=self.com, span=self.span, halflife=self.halflife, alpha=self.alpha, min_periods=self.min_periods, adjust=self.adjust, ignore_na=self.ignore_na, times=self.times, engine=engine, engine_kwargs=engine_kwargs, selection=self._selection) @doc(_shared_docs['aggregate'], see_also=dedent('\n See Also\n --------\n pandas.DataFrame.rolling.aggregate\n '), examples=dedent('\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 1.000000 4.000000 7.000000\n 1 1.666667 4.666667 7.666667\n 2 2.428571 5.428571 8.428571\n '), klass='Series/Dataframe', axis='') def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs) agg = aggregate @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(' >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).mean()\n 0 1.000000\n 1 1.555556\n 2 2.147541\n 3 2.775068\n dtype: float64\n '), window_method='ewm', aggregation_description='(exponential weighted moment) mean', agg_method='mean') def mean(self, numeric_only: bool=False, engine=None, engine_kwargs=None): if maybe_use_numba(engine): if self.method == 'single': func = generate_numba_ewm_func else: func = generate_numba_ewm_table_func ewm_func = func(**get_jit_arguments(engine_kwargs), com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=tuple(self._deltas), normalize=True) return self._apply(ewm_func, name='mean') elif engine in ('cython', None): if engine_kwargs is not None: raise ValueError('cython engine does not accept engine_kwargs') deltas = None if self.times is None else self._deltas window_func = partial(window_aggregations.ewm, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=deltas, normalize=True) return self._apply(window_func, name='mean', numeric_only=numeric_only) else: raise ValueError("engine must be either 'numba' or 'cython'") @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(' >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).sum()\n 0 1.000\n 1 2.800\n 2 5.240\n 3 8.192\n dtype: float64\n '), window_method='ewm', aggregation_description='(exponential weighted moment) sum', agg_method='sum') def sum(self, numeric_only: bool=False, engine=None, engine_kwargs=None): if not self.adjust: raise NotImplementedError('sum is not implemented with adjust=False') if self.times is not None: raise NotImplementedError('sum is not implemented with times') if maybe_use_numba(engine): if self.method == 'single': func = generate_numba_ewm_func else: func = generate_numba_ewm_table_func ewm_func = func(**get_jit_arguments(engine_kwargs), com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=tuple(self._deltas), normalize=False) return self._apply(ewm_func, name='sum') elif engine in ('cython', None): if engine_kwargs is not None: raise ValueError('cython engine does not accept engine_kwargs') deltas = None if self.times is None else self._deltas window_func = partial(window_aggregations.ewm, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, deltas=deltas, normalize=False) return self._apply(window_func, name='sum', numeric_only=numeric_only) else: raise ValueError("engine must be either 'numba' or 'cython'") @doc(template_header, create_section_header('Parameters'), dedent(' bias : bool, default False\n Use a standard estimation bias correction.\n '), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(' >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).std()\n 0 NaN\n 1 0.707107\n 2 0.995893\n 3 1.277320\n dtype: float64\n '), window_method='ewm', aggregation_description='(exponential weighted moment) standard deviation', agg_method='std') def std(self, bias: bool=False, numeric_only: bool=False): if numeric_only and self._selected_obj.ndim == 1 and (not is_numeric_dtype(self._selected_obj.dtype)): raise NotImplementedError(f'{type(self).__name__}.std does not implement numeric_only') if self.times is not None: raise NotImplementedError('std is not implemented with times') return zsqrt(self.var(bias=bias, numeric_only=numeric_only)) @doc(template_header, create_section_header('Parameters'), dedent(' bias : bool, default False\n Use a standard estimation bias correction.\n '), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(' >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.ewm(alpha=.2).var()\n 0 NaN\n 1 0.500000\n 2 0.991803\n 3 1.631547\n dtype: float64\n '), window_method='ewm', aggregation_description='(exponential weighted moment) variance', agg_method='var') def var(self, bias: bool=False, numeric_only: bool=False): if self.times is not None: raise NotImplementedError('var is not implemented with times') window_func = window_aggregations.ewmcov wfunc = partial(window_func, com=self._com, adjust=self.adjust, ignore_na=self.ignore_na, bias=bias) def var_func(values, begin, end, min_periods): return wfunc(values, begin, end, min_periods, values) return self._apply(var_func, name='var', numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent(' other : Series or DataFrame , optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndex DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n bias : bool, default False\n Use a standard estimation bias correction.\n '), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(' >>> ser1 = pd.Series([1, 2, 3, 4])\n >>> ser2 = pd.Series([10, 11, 13, 16])\n >>> ser1.ewm(alpha=.2).cov(ser2)\n 0 NaN\n 1 0.500000\n 2 1.524590\n 3 3.408836\n dtype: float64\n '), window_method='ewm', aggregation_description='(exponential weighted moment) sample covariance', agg_method='cov') def cov(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, bias: bool=False, numeric_only: bool=False): if self.times is not None: raise NotImplementedError('cov is not implemented with times') from pandas import Series self._validate_numeric_only('cov', numeric_only) def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = self.min_periods if self.min_periods is not None else window_indexer.window_size (start, end) = window_indexer.get_window_bounds(num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step) result = window_aggregations.ewmcov(x_array, start, end, self.min_periods, y_array, self._com, self.adjust, self.ignore_na, bias) return Series(result, index=x.index, name=x.name, copy=False) return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func, numeric_only) @doc(template_header, create_section_header('Parameters'), dedent(' other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndex DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n '), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(' >>> ser1 = pd.Series([1, 2, 3, 4])\n >>> ser2 = pd.Series([10, 11, 13, 16])\n >>> ser1.ewm(alpha=.2).corr(ser2)\n 0 NaN\n 1 1.000000\n 2 0.982821\n 3 0.977802\n dtype: float64\n '), window_method='ewm', aggregation_description='(exponential weighted moment) sample correlation', agg_method='corr') def corr(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, numeric_only: bool=False): if self.times is not None: raise NotImplementedError('corr is not implemented with times') from pandas import Series self._validate_numeric_only('corr', numeric_only) def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = self.min_periods if self.min_periods is not None else window_indexer.window_size (start, end) = window_indexer.get_window_bounds(num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step) def _cov(X, Y): return window_aggregations.ewmcov(X, start, end, min_periods, Y, self._com, self.adjust, self.ignore_na, True) with np.errstate(all='ignore'): cov = _cov(x_array, y_array) x_var = _cov(x_array, x_array) y_var = _cov(y_array, y_array) result = cov / zsqrt(x_var * y_var) return Series(result, index=x.index, name=x.name, copy=False) return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func, numeric_only) class ExponentialMovingWindowGroupby(BaseWindowGroupby, ExponentialMovingWindow): _attributes = ExponentialMovingWindow._attributes + BaseWindowGroupby._attributes def __init__(self, obj, *args, _grouper=None, **kwargs) -> None: super().__init__(obj, *args, _grouper=_grouper, **kwargs) if not obj.empty and self.times is not None: groupby_order = np.concatenate(list(self._grouper.indices.values())) self._deltas = _calculate_deltas(self.times.take(groupby_order), self.halflife) def _get_window_indexer(self) -> GroupbyIndexer: window_indexer = GroupbyIndexer(groupby_indices=self._grouper.indices, window_indexer=ExponentialMovingWindowIndexer) return window_indexer class OnlineExponentialMovingWindow(ExponentialMovingWindow): def __init__(self, obj: NDFrame, com: float | None=None, span: float | None=None, halflife: float | TimedeltaConvertibleTypes | None=None, alpha: float | None=None, min_periods: int | None=0, adjust: bool=True, ignore_na: bool=False, times: np.ndarray | NDFrame | None=None, engine: str='numba', engine_kwargs: dict[str, bool] | None=None, *, selection=None) -> None: if times is not None: raise NotImplementedError('times is not implemented with online operations.') super().__init__(obj=obj, com=com, span=span, halflife=halflife, alpha=alpha, min_periods=min_periods, adjust=adjust, ignore_na=ignore_na, times=times, selection=selection) self._mean = EWMMeanState(self._com, self.adjust, self.ignore_na, obj.shape) if maybe_use_numba(engine): self.engine = engine self.engine_kwargs = engine_kwargs else: raise ValueError("'numba' is the only supported engine") def reset(self) -> None: self._mean.reset() def aggregate(self, func, *args, **kwargs): raise NotImplementedError('aggregate is not implemented.') def std(self, bias: bool=False, *args, **kwargs): raise NotImplementedError('std is not implemented.') def corr(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, numeric_only: bool=False): raise NotImplementedError('corr is not implemented.') def cov(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, bias: bool=False, numeric_only: bool=False): raise NotImplementedError('cov is not implemented.') def var(self, bias: bool=False, numeric_only: bool=False): raise NotImplementedError('var is not implemented.') def mean(self, *args, update=None, update_times=None, **kwargs): result_kwargs = {} is_frame = self._selected_obj.ndim == 2 if update_times is not None: raise NotImplementedError('update_times is not implemented.') update_deltas = np.ones(max(self._selected_obj.shape[-1] - 1, 0), dtype=np.float64) if update is not None: if self._mean.last_ewm is None: raise ValueError('Must call mean with update=None first before passing update') result_from = 1 result_kwargs['index'] = update.index if is_frame: last_value = self._mean.last_ewm[np.newaxis, :] result_kwargs['columns'] = update.columns else: last_value = self._mean.last_ewm result_kwargs['name'] = update.name np_array = np.concatenate((last_value, update.to_numpy())) else: result_from = 0 result_kwargs['index'] = self._selected_obj.index if is_frame: result_kwargs['columns'] = self._selected_obj.columns else: result_kwargs['name'] = self._selected_obj.name np_array = self._selected_obj.astype(np.float64).to_numpy() ewma_func = generate_online_numba_ewma_func(**get_jit_arguments(self.engine_kwargs)) result = self._mean.run_ewm(np_array if is_frame else np_array[:, np.newaxis], update_deltas, self.min_periods, ewma_func) if not is_frame: result = result.squeeze() result = result[result_from:] result = self._selected_obj._constructor(result, **result_kwargs) return result # File: pandas-main/pandas/core/window/expanding.py from __future__ import annotations from textwrap import dedent from typing import TYPE_CHECKING, Any, Literal from pandas.util._decorators import doc from pandas.core.indexers.objects import BaseIndexer, ExpandingIndexer, GroupbyIndexer from pandas.core.window.doc import _shared_docs, create_section_header, kwargs_numeric_only, numba_notes, template_header, template_returns, template_see_also, window_agg_numba_parameters, window_apply_parameters from pandas.core.window.rolling import BaseWindowGroupby, RollingAndExpandingMixin if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import QuantileInterpolation, WindowingRankType from pandas import DataFrame, Series from pandas.core.generic import NDFrame class Expanding(RollingAndExpandingMixin): _attributes: list[str] = ['min_periods', 'method'] def __init__(self, obj: NDFrame, min_periods: int=1, method: str='single', selection=None) -> None: super().__init__(obj=obj, min_periods=min_periods, method=method, selection=selection) def _get_window_indexer(self) -> BaseIndexer: return ExpandingIndexer() @doc(_shared_docs['aggregate'], see_also=dedent('\n See Also\n --------\n DataFrame.aggregate : Similar DataFrame method.\n Series.aggregate : Similar Series method.\n '), examples=dedent('\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.ewm(alpha=0.5).mean()\n A B C\n 0 1.000000 4.000000 7.000000\n 1 1.666667 4.666667 7.666667\n 2 2.428571 5.428571 8.428571\n '), klass='Series/Dataframe', axis='') def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs) agg = aggregate @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().count()\n a 1.0\n b 2.0\n c 3.0\n d 4.0\n dtype: float64\n "), window_method='expanding', aggregation_description='count of non NaN observations', agg_method='count') def count(self, numeric_only: bool=False): return super().count(numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), window_apply_parameters, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().apply(lambda s: s.max() - 2 * s.min())\n a -1.0\n b 0.0\n c 1.0\n d 2.0\n dtype: float64\n "), window_method='expanding', aggregation_description='custom aggregation function', agg_method='apply') def apply(self, func: Callable[..., Any], raw: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None, args: tuple[Any, ...] | None=None, kwargs: dict[str, Any] | None=None): return super().apply(func, raw=raw, engine=engine, engine_kwargs=engine_kwargs, args=args, kwargs=kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(" >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().sum()\n a 1.0\n b 3.0\n c 6.0\n d 10.0\n dtype: float64\n "), window_method='expanding', aggregation_description='sum', agg_method='sum') def sum(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().sum(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(" >>> ser = pd.Series([3, 2, 1, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().max()\n a 3.0\n b 3.0\n c 3.0\n d 4.0\n dtype: float64\n "), window_method='expanding', aggregation_description='maximum', agg_method='max') def max(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().max(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(" >>> ser = pd.Series([2, 3, 4, 1], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().min()\n a 2.0\n b 2.0\n c 2.0\n d 1.0\n dtype: float64\n "), window_method='expanding', aggregation_description='minimum', agg_method='min') def min(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().min(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(" >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().mean()\n a 1.0\n b 1.5\n c 2.0\n d 2.5\n dtype: float64\n "), window_method='expanding', aggregation_description='mean', agg_method='mean') def mean(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().mean(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(" >>> ser = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser.expanding().median()\n a 1.0\n b 1.5\n c 2.0\n d 2.5\n dtype: float64\n "), window_method='expanding', aggregation_description='median', agg_method='median') def median(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().median(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n ').replace('\n', '', 1), kwargs_numeric_only, window_agg_numba_parameters('1.4'), create_section_header('Returns'), template_returns, create_section_header('See Also'), 'numpy.std : Equivalent method for NumPy array.\n', template_see_also, create_section_header('Notes'), dedent('\n The default ``ddof`` of 1 used in :meth:`Series.std` is different\n than the default ``ddof`` of 0 in :func:`numpy.std`.\n\n A minimum of one period is required for the rolling calculation.\n\n ').replace('\n', '', 1), create_section_header('Examples'), dedent('\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n\n >>> s.expanding(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 0.957427\n 4 0.894427\n 5 0.836660\n 6 0.786796\n dtype: float64\n ').replace('\n', '', 1), window_method='expanding', aggregation_description='standard deviation', agg_method='std') def std(self, ddof: int=1, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().std(ddof=ddof, numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n ').replace('\n', '', 1), kwargs_numeric_only, window_agg_numba_parameters('1.4'), create_section_header('Returns'), template_returns, create_section_header('See Also'), 'numpy.var : Equivalent method for NumPy array.\n', template_see_also, create_section_header('Notes'), dedent('\n The default ``ddof`` of 1 used in :meth:`Series.var` is different\n than the default ``ddof`` of 0 in :func:`numpy.var`.\n\n A minimum of one period is required for the rolling calculation.\n\n ').replace('\n', '', 1), create_section_header('Examples'), dedent('\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n\n >>> s.expanding(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 0.916667\n 4 0.800000\n 5 0.700000\n 6 0.619048\n dtype: float64\n ').replace('\n', '', 1), window_method='expanding', aggregation_description='variance', agg_method='var') def var(self, ddof: int=1, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().var(ddof=ddof, numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n ').replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), 'A minimum of one period is required for the calculation.\n\n', create_section_header('Examples'), dedent('\n >>> s = pd.Series([0, 1, 2, 3])\n\n >>> s.expanding().sem()\n 0 NaN\n 1 0.707107\n 2 0.707107\n 3 0.745356\n dtype: float64\n ').replace('\n', '', 1), window_method='expanding', aggregation_description='standard error of mean', agg_method='sem') def sem(self, ddof: int=1, numeric_only: bool=False): return super().sem(ddof=ddof, numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), 'scipy.stats.skew : Third moment of a probability density.\n', template_see_also, create_section_header('Notes'), 'A minimum of three periods is required for the rolling calculation.\n\n', create_section_header('Examples'), dedent(" >>> ser = pd.Series([-1, 0, 2, -1, 2], index=['a', 'b', 'c', 'd', 'e'])\n >>> ser.expanding().skew()\n a NaN\n b NaN\n c 0.935220\n d 1.414214\n e 0.315356\n dtype: float64\n "), window_method='expanding', aggregation_description='unbiased skewness', agg_method='skew') def skew(self, numeric_only: bool=False): return super().skew(numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), 'scipy.stats.kurtosis : Reference SciPy method.\n', template_see_also, create_section_header('Notes'), 'A minimum of four periods is required for the calculation.\n\n', create_section_header('Examples'), dedent('\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}")\n -1.200000\n >>> print(f"{{scipy.stats.kurtosis(arr, bias=False):.6f}}")\n 4.999874\n >>> s = pd.Series(arr)\n >>> s.expanding(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 4.999874\n dtype: float64\n ').replace('\n', '', 1), window_method='expanding', aggregation_description="Fisher's definition of kurtosis without bias", agg_method='kurt') def kurt(self, numeric_only: bool=False): return super().kurt(numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent("\n q : float\n Quantile to compute. 0 <= quantile <= 1.\n\n .. deprecated:: 2.1.0\n This was renamed from 'quantile' to 'q' in version 2.1.0.\n interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n ").replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser = pd.Series([1, 2, 3, 4, 5, 6], index=['a', 'b', 'c', 'd', 'e', 'f'])\n >>> ser.expanding(min_periods=4).quantile(.25)\n a NaN\n b NaN\n c NaN\n d 1.75\n e 2.00\n f 2.25\n dtype: float64\n "), window_method='expanding', aggregation_description='quantile', agg_method='quantile') def quantile(self, q: float, interpolation: QuantileInterpolation='linear', numeric_only: bool=False): return super().quantile(q=q, interpolation=interpolation, numeric_only=numeric_only) @doc(template_header, '.. versionadded:: 1.4.0 \n\n', create_section_header('Parameters'), dedent("\n method : {{'average', 'min', 'max'}}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n ").replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent('\n >>> s = pd.Series([1, 4, 2, 3, 5, 3])\n >>> s.expanding().rank()\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 5.0\n 5 3.5\n dtype: float64\n\n >>> s.expanding().rank(method="max")\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 5.0\n 5 4.0\n dtype: float64\n\n >>> s.expanding().rank(method="min")\n 0 1.0\n 1 2.0\n 2 2.0\n 3 3.0\n 4 5.0\n 5 3.0\n dtype: float64\n ').replace('\n', '', 1), window_method='expanding', aggregation_description='rank', agg_method='rank') def rank(self, method: WindowingRankType='average', ascending: bool=True, pct: bool=False, numeric_only: bool=False): return super().rank(method=method, ascending=ascending, pct=pct, numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent('\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd'])\n >>> ser1.expanding().cov(ser2)\n a NaN\n b 0.500000\n c 1.500000\n d 3.333333\n dtype: float64\n "), window_method='expanding', aggregation_description='sample covariance', agg_method='cov') def cov(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, ddof: int=1, numeric_only: bool=False): return super().cov(other=other, pairwise=pairwise, ddof=ddof, numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent('\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n\n ').replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), dedent("\n cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n ").replace('\n', '', 1), template_see_also, create_section_header('Notes'), dedent("\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\n\n "), create_section_header('Examples'), dedent(" >>> ser1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])\n >>> ser2 = pd.Series([10, 11, 13, 16], index=['a', 'b', 'c', 'd'])\n >>> ser1.expanding().corr(ser2)\n a NaN\n b 1.000000\n c 0.981981\n d 0.975900\n dtype: float64\n "), window_method='expanding', aggregation_description='correlation', agg_method='corr') def corr(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, ddof: int=1, numeric_only: bool=False): return super().corr(other=other, pairwise=pairwise, ddof=ddof, numeric_only=numeric_only) class ExpandingGroupby(BaseWindowGroupby, Expanding): _attributes = Expanding._attributes + BaseWindowGroupby._attributes def _get_window_indexer(self) -> GroupbyIndexer: window_indexer = GroupbyIndexer(groupby_indices=self._grouper.indices, window_indexer=ExpandingIndexer) return window_indexer # File: pandas-main/pandas/core/window/numba_.py from __future__ import annotations import functools from typing import TYPE_CHECKING, Any import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.core.util.numba_ import jit_user_function if TYPE_CHECKING: from collections.abc import Callable from pandas._typing import Scalar @functools.cache def generate_numba_apply_func(func: Callable[..., Scalar], nopython: bool, nogil: bool, parallel: bool): numba_func = jit_user_function(func) if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def roll_apply(values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, *args: Any) -> np.ndarray: result = np.empty(len(begin)) for i in numba.prange(len(result)): start = begin[i] stop = end[i] window = values[start:stop] count_nan = np.sum(np.isnan(window)) if len(window) - count_nan >= minimum_periods: result[i] = numba_func(window, *args) else: result[i] = np.nan return result return roll_apply @functools.cache def generate_numba_ewm_func(nopython: bool, nogil: bool, parallel: bool, com: float, adjust: bool, ignore_na: bool, deltas: tuple, normalize: bool): if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def ewm(values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int) -> np.ndarray: result = np.empty(len(values)) alpha = 1.0 / (1.0 + com) old_wt_factor = 1.0 - alpha new_wt = 1.0 if adjust else alpha for i in numba.prange(len(begin)): start = begin[i] stop = end[i] window = values[start:stop] sub_result = np.empty(len(window)) weighted = window[0] nobs = int(not np.isnan(weighted)) sub_result[0] = weighted if nobs >= minimum_periods else np.nan old_wt = 1.0 for j in range(1, len(window)): cur = window[j] is_observation = not np.isnan(cur) nobs += is_observation if not np.isnan(weighted): if is_observation or not ignore_na: if normalize: old_wt *= old_wt_factor ** deltas[start + j - 1] if not adjust and com == 1: new_wt = 1.0 - old_wt else: weighted = old_wt_factor * weighted if is_observation: if normalize: if weighted != cur: weighted = old_wt * weighted + new_wt * cur if normalize: weighted = weighted / (old_wt + new_wt) if adjust: old_wt += new_wt else: old_wt = 1.0 else: weighted += cur elif is_observation: weighted = cur sub_result[j] = weighted if nobs >= minimum_periods else np.nan result[start:stop] = sub_result return result return ewm @functools.cache def generate_numba_table_func(func: Callable[..., np.ndarray], nopython: bool, nogil: bool, parallel: bool): numba_func = jit_user_function(func) if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def roll_table(values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int, *args: Any): result = np.empty((len(begin), values.shape[1])) min_periods_mask = np.empty(result.shape) for i in numba.prange(len(result)): start = begin[i] stop = end[i] window = values[start:stop] count_nan = np.sum(np.isnan(window), axis=0) nan_mask = len(window) - count_nan >= minimum_periods if nan_mask.any(): result[i, :] = numba_func(window, *args) min_periods_mask[i, :] = nan_mask result = np.where(min_periods_mask, result, np.nan) return result return roll_table @functools.cache def generate_manual_numpy_nan_agg_with_axis(nan_func): if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=True, nogil=True, parallel=True) def nan_agg_with_axis(table): result = np.empty(table.shape[1]) for i in numba.prange(table.shape[1]): partition = table[:, i] result[i] = nan_func(partition) return result return nan_agg_with_axis @functools.cache def generate_numba_ewm_table_func(nopython: bool, nogil: bool, parallel: bool, com: float, adjust: bool, ignore_na: bool, deltas: tuple, normalize: bool): if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def ewm_table(values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int) -> np.ndarray: alpha = 1.0 / (1.0 + com) old_wt_factor = 1.0 - alpha new_wt = 1.0 if adjust else alpha old_wt = np.ones(values.shape[1]) result = np.empty(values.shape) weighted = values[0].copy() nobs = (~np.isnan(weighted)).astype(np.int64) result[0] = np.where(nobs >= minimum_periods, weighted, np.nan) for i in range(1, len(values)): cur = values[i] is_observations = ~np.isnan(cur) nobs += is_observations.astype(np.int64) for j in numba.prange(len(cur)): if not np.isnan(weighted[j]): if is_observations[j] or not ignore_na: if normalize: old_wt[j] *= old_wt_factor ** deltas[i - 1] if not adjust and com == 1: new_wt = 1.0 - old_wt[j] else: weighted[j] = old_wt_factor * weighted[j] if is_observations[j]: if normalize: if weighted[j] != cur[j]: weighted[j] = old_wt[j] * weighted[j] + new_wt * cur[j] if normalize: weighted[j] = weighted[j] / (old_wt[j] + new_wt) if adjust: old_wt[j] += new_wt else: old_wt[j] = 1.0 else: weighted[j] += cur[j] elif is_observations[j]: weighted[j] = cur[j] result[i] = np.where(nobs >= minimum_periods, weighted, np.nan) return result return ewm_table # File: pandas-main/pandas/core/window/online.py from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas.compat._optional import import_optional_dependency def generate_online_numba_ewma_func(nopython: bool, nogil: bool, parallel: bool): if TYPE_CHECKING: import numba else: numba = import_optional_dependency('numba') @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) def online_ewma(values: np.ndarray, deltas: np.ndarray, minimum_periods: int, old_wt_factor: float, new_wt: float, old_wt: np.ndarray, adjust: bool, ignore_na: bool): result = np.empty(values.shape) weighted_avg = values[0].copy() nobs = (~np.isnan(weighted_avg)).astype(np.int64) result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) for i in range(1, len(values)): cur = values[i] is_observations = ~np.isnan(cur) nobs += is_observations.astype(np.int64) for j in numba.prange(len(cur)): if not np.isnan(weighted_avg[j]): if is_observations[j] or not ignore_na: old_wt[j] *= old_wt_factor ** deltas[j - 1] if is_observations[j]: if weighted_avg[j] != cur[j]: weighted_avg[j] = (old_wt[j] * weighted_avg[j] + new_wt * cur[j]) / (old_wt[j] + new_wt) if adjust: old_wt[j] += new_wt else: old_wt[j] = 1.0 elif is_observations[j]: weighted_avg[j] = cur[j] result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan) return (result, old_wt) return online_ewma class EWMMeanState: def __init__(self, com, adjust, ignore_na, shape) -> None: alpha = 1.0 / (1.0 + com) self.shape = shape self.adjust = adjust self.ignore_na = ignore_na self.new_wt = 1.0 if adjust else alpha self.old_wt_factor = 1.0 - alpha self.old_wt = np.ones(self.shape[-1]) self.last_ewm = None def run_ewm(self, weighted_avg, deltas, min_periods, ewm_func): (result, old_wt) = ewm_func(weighted_avg, deltas, min_periods, self.old_wt_factor, self.new_wt, self.old_wt, self.adjust, self.ignore_na) self.old_wt = old_wt self.last_ewm = result[-1] return result def reset(self) -> None: self.old_wt = np.ones(self.shape[-1]) self.last_ewm = None # File: pandas-main/pandas/core/window/rolling.py """""" from __future__ import annotations import copy from datetime import timedelta from functools import partial import inspect from textwrap import dedent from typing import TYPE_CHECKING, Any, Literal import numpy as np from pandas._libs.tslibs import BaseOffset, Timedelta, to_offset import pandas._libs.window.aggregations as window_aggregations from pandas.compat._optional import import_optional_dependency from pandas.errors import DataError from pandas.util._decorators import doc from pandas.core.dtypes.common import ensure_float64, is_bool, is_integer, is_numeric_dtype, needs_i8_conversion from pandas.core.dtypes.dtypes import ArrowDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.dtypes.missing import notna from pandas.core._numba import executor from pandas.core.algorithms import factorize from pandas.core.apply import ResamplerWindowApply from pandas.core.arrays import ExtensionArray from pandas.core.base import SelectionMixin import pandas.core.common as com from pandas.core.indexers.objects import BaseIndexer, FixedWindowIndexer, GroupbyIndexer, VariableWindowIndexer from pandas.core.indexes.api import DatetimeIndex, Index, MultiIndex, PeriodIndex, TimedeltaIndex from pandas.core.reshape.concat import concat from pandas.core.util.numba_ import get_jit_arguments, maybe_use_numba from pandas.core.window.common import flex_binary_moment, zsqrt from pandas.core.window.doc import _shared_docs, create_section_header, kwargs_numeric_only, kwargs_scipy, numba_notes, template_header, template_returns, template_see_also, window_agg_numba_parameters, window_apply_parameters from pandas.core.window.numba_ import generate_manual_numpy_nan_agg_with_axis, generate_numba_apply_func, generate_numba_table_func if TYPE_CHECKING: from collections.abc import Callable from collections.abc import Hashable, Iterator, Sized from pandas._typing import ArrayLike, NDFrameT, QuantileInterpolation, WindowingRankType, npt from pandas import DataFrame, Series from pandas.core.generic import NDFrame from pandas.core.groupby.ops import BaseGrouper from pandas.core.arrays.datetimelike import dtype_to_unit class BaseWindow(SelectionMixin): _attributes: list[str] = [] exclusions: frozenset[Hashable] = frozenset() _on: Index def __init__(self, obj: NDFrame, window=None, min_periods: int | None=None, center: bool | None=False, win_type: str | None=None, on: str | Index | None=None, closed: str | None=None, step: int | None=None, method: str='single', *, selection=None) -> None: self.obj = obj self.on = on self.closed = closed self.step = step self.window = window self.min_periods = min_periods self.center = center self.win_type = win_type self.method = method self._win_freq_i8: int | None = None if self.on is None: self._on = self.obj.index elif isinstance(self.on, Index): self._on = self.on elif isinstance(self.obj, ABCDataFrame) and self.on in self.obj.columns: self._on = Index(self.obj[self.on]) else: raise ValueError(f'invalid on specified as {self.on}, must be a column (of DataFrame), an Index or None') self._selection = selection self._validate() def _validate(self) -> None: if self.center is not None and (not is_bool(self.center)): raise ValueError('center must be a boolean') if self.min_periods is not None: if not is_integer(self.min_periods): raise ValueError('min_periods must be an integer') if self.min_periods < 0: raise ValueError('min_periods must be >= 0') if is_integer(self.window) and self.min_periods > self.window: raise ValueError(f'min_periods {self.min_periods} must be <= window {self.window}') if self.closed is not None and self.closed not in ['right', 'both', 'left', 'neither']: raise ValueError("closed must be 'right', 'left', 'both' or 'neither'") if not isinstance(self.obj, (ABCSeries, ABCDataFrame)): raise TypeError(f'invalid type: {type(self)}') if isinstance(self.window, BaseIndexer): get_window_bounds_signature = inspect.signature(self.window.get_window_bounds).parameters.keys() expected_signature = inspect.signature(BaseIndexer().get_window_bounds).parameters.keys() if get_window_bounds_signature != expected_signature: raise ValueError(f'{type(self.window).__name__} does not implement the correct signature for get_window_bounds') if self.method not in ['table', 'single']: raise ValueError("method must be 'table' or 'single") if self.step is not None: if not is_integer(self.step): raise ValueError('step must be an integer') if self.step < 0: raise ValueError('step must be >= 0') def _check_window_bounds(self, start: np.ndarray, end: np.ndarray, num_vals: int) -> None: if len(start) != len(end): raise ValueError(f'start ({len(start)}) and end ({len(end)}) bounds must be the same length') if len(start) != (num_vals + (self.step or 1) - 1) // (self.step or 1): raise ValueError(f'start and end bounds ({len(start)}) must be the same length as the object ({num_vals}) divided by the step ({self.step}) if given and rounded up') def _slice_axis_for_step(self, index: Index, result: Sized | None=None) -> Index: return index if result is None or len(result) == len(index) else index[::self.step] def _validate_numeric_only(self, name: str, numeric_only: bool) -> None: if self._selected_obj.ndim == 1 and numeric_only and (not is_numeric_dtype(self._selected_obj.dtype)): raise NotImplementedError(f'{type(self).__name__}.{name} does not implement numeric_only') def _make_numeric_only(self, obj: NDFrameT) -> NDFrameT: result = obj.select_dtypes(include=['number'], exclude=['timedelta']) return result def _create_data(self, obj: NDFrameT, numeric_only: bool=False) -> NDFrameT: if self.on is not None and (not isinstance(self.on, Index)) and (obj.ndim == 2): obj = obj.reindex(columns=obj.columns.difference([self.on])) if obj.ndim > 1 and numeric_only: obj = self._make_numeric_only(obj) return obj def _gotitem(self, key, ndim, subset=None): if subset is None: subset = self.obj kwargs = {attr: getattr(self, attr) for attr in self._attributes} selection = self._infer_selection(key, subset) new_win = type(self)(subset, selection=selection, **kwargs) return new_win def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self.obj: return self[attr] raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") def _dir_additions(self): return self.obj._dir_additions() def __repr__(self) -> str: attrs_list = (f'{attr_name}={getattr(self, attr_name)}' for attr_name in self._attributes if getattr(self, attr_name, None) is not None and attr_name[0] != '_') attrs = ','.join(attrs_list) return f'{type(self).__name__} [{attrs}]' def __iter__(self) -> Iterator: obj = self._selected_obj.set_axis(self._on) obj = self._create_data(obj) indexer = self._get_window_indexer() (start, end) = indexer.get_window_bounds(num_values=len(obj), min_periods=self.min_periods, center=self.center, closed=self.closed, step=self.step) self._check_window_bounds(start, end, len(obj)) for (s, e) in zip(start, end): result = obj.iloc[slice(s, e)] yield result def _prep_values(self, values: ArrayLike) -> np.ndarray: if needs_i8_conversion(values.dtype): raise NotImplementedError(f'ops for {type(self).__name__} for this dtype {values.dtype} are not implemented') try: if isinstance(values, ExtensionArray): values = values.to_numpy(np.float64, na_value=np.nan) else: values = ensure_float64(values) except (ValueError, TypeError) as err: raise TypeError(f'cannot handle this type -> {values.dtype}') from err inf = np.isinf(values) if inf.any(): values = np.where(inf, np.nan, values) return values def _insert_on_column(self, result: DataFrame, obj: DataFrame) -> None: from pandas import Series if self.on is not None and (not self._on.equals(obj.index)): name = self._on.name extra_col = Series(self._on, index=self.obj.index, name=name, copy=False) if name in result.columns: result[name] = extra_col elif name in result.index.names: pass elif name in self._selected_obj.columns: old_cols = self._selected_obj.columns new_cols = result.columns old_loc = old_cols.get_loc(name) overlap = new_cols.intersection(old_cols[:old_loc]) new_loc = len(overlap) result.insert(new_loc, name, extra_col) else: result[name] = extra_col @property def _index_array(self) -> npt.NDArray[np.int64] | None: if isinstance(self._on, (PeriodIndex, DatetimeIndex, TimedeltaIndex)): return self._on.asi8 elif isinstance(self._on.dtype, ArrowDtype) and self._on.dtype.kind in 'mM': return self._on.to_numpy(dtype=np.int64) return None def _resolve_output(self, out: DataFrame, obj: DataFrame) -> DataFrame: if out.shape[1] == 0 and obj.shape[1] > 0: raise DataError('No numeric types to aggregate') if out.shape[1] == 0: return obj.astype('float64') self._insert_on_column(out, obj) return out def _get_window_indexer(self) -> BaseIndexer: if isinstance(self.window, BaseIndexer): return self.window if self._win_freq_i8 is not None: return VariableWindowIndexer(index_array=self._index_array, window_size=self._win_freq_i8, center=self.center) return FixedWindowIndexer(window_size=self.window) def _apply_series(self, homogeneous_func: Callable[..., ArrayLike], name: str | None=None) -> Series: obj = self._create_data(self._selected_obj) if name == 'count': obj = notna(obj).astype(int) try: values = self._prep_values(obj._values) except (TypeError, NotImplementedError) as err: raise DataError('No numeric types to aggregate') from err result = homogeneous_func(values) index = self._slice_axis_for_step(obj.index, result) return obj._constructor(result, index=index, name=obj.name) def _apply_columnwise(self, homogeneous_func: Callable[..., ArrayLike], name: str, numeric_only: bool=False) -> DataFrame | Series: self._validate_numeric_only(name, numeric_only) if self._selected_obj.ndim == 1: return self._apply_series(homogeneous_func, name) obj = self._create_data(self._selected_obj, numeric_only) if name == 'count': obj = notna(obj).astype(int) obj._mgr = obj._mgr.consolidate() taker = [] res_values = [] for (i, arr) in enumerate(obj._iter_column_arrays()): try: arr = self._prep_values(arr) except (TypeError, NotImplementedError) as err: raise DataError(f'Cannot aggregate non-numeric type: {arr.dtype}') from err res = homogeneous_func(arr) res_values.append(res) taker.append(i) index = self._slice_axis_for_step(obj.index, res_values[0] if len(res_values) > 0 else None) df = type(obj)._from_arrays(res_values, index=index, columns=obj.columns.take(taker), verify_integrity=False) return self._resolve_output(df, obj) def _apply_tablewise(self, homogeneous_func: Callable[..., ArrayLike], name: str | None=None, numeric_only: bool=False) -> DataFrame | Series: if self._selected_obj.ndim == 1: raise ValueError("method='table' not applicable for Series objects.") obj = self._create_data(self._selected_obj, numeric_only) values = self._prep_values(obj.to_numpy()) result = homogeneous_func(values) index = self._slice_axis_for_step(obj.index, result) columns = obj.columns if result.shape[1] == len(obj.columns) else obj.columns[::self.step] out = obj._constructor(result, index=index, columns=columns) return self._resolve_output(out, obj) def _apply_pairwise(self, target: DataFrame | Series, other: DataFrame | Series | None, pairwise: bool | None, func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series], numeric_only: bool) -> DataFrame | Series: target = self._create_data(target, numeric_only) if other is None: other = target pairwise = True if pairwise is None else pairwise elif not isinstance(other, (ABCDataFrame, ABCSeries)): raise ValueError('other must be a DataFrame or Series') elif other.ndim == 2 and numeric_only: other = self._make_numeric_only(other) return flex_binary_moment(target, other, func, pairwise=bool(pairwise)) def _apply(self, func: Callable[..., Any], name: str, numeric_only: bool=False, numba_args: tuple[Any, ...]=(), **kwargs): window_indexer = self._get_window_indexer() min_periods = self.min_periods if self.min_periods is not None else window_indexer.window_size def homogeneous_func(values: np.ndarray): if values.size == 0: return values.copy() def calc(x): (start, end) = window_indexer.get_window_bounds(num_values=len(x), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step) self._check_window_bounds(start, end, len(x)) return func(x, start, end, min_periods, *numba_args) with np.errstate(all='ignore'): result = calc(values) return result if self.method == 'single': return self._apply_columnwise(homogeneous_func, name, numeric_only) else: return self._apply_tablewise(homogeneous_func, name, numeric_only) def _numba_apply(self, func: Callable[..., Any], engine_kwargs: dict[str, bool] | None=None, **func_kwargs): window_indexer = self._get_window_indexer() min_periods = self.min_periods if self.min_periods is not None else window_indexer.window_size obj = self._create_data(self._selected_obj) values = self._prep_values(obj.to_numpy()) if values.ndim == 1: values = values.reshape(-1, 1) (start, end) = window_indexer.get_window_bounds(num_values=len(values), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step) self._check_window_bounds(start, end, len(values)) dtype_mapping = executor.float_dtype_mapping aggregator = executor.generate_shared_aggregator(func, dtype_mapping, is_grouped_kernel=False, **get_jit_arguments(engine_kwargs)) result = aggregator(values.T, start=start, end=end, min_periods=min_periods, **func_kwargs).T index = self._slice_axis_for_step(obj.index, result) if obj.ndim == 1: result = result.squeeze() out = obj._constructor(result, index=index, name=obj.name) return out else: columns = self._slice_axis_for_step(obj.columns, result.T) out = obj._constructor(result, index=index, columns=columns) return self._resolve_output(out, obj) def aggregate(self, func, *args, **kwargs): result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: return self.apply(func, raw=False, args=args, kwargs=kwargs) return result agg = aggregate class BaseWindowGroupby(BaseWindow): _grouper: BaseGrouper _as_index: bool _attributes: list[str] = ['_grouper'] def __init__(self, obj: DataFrame | Series, *args, _grouper: BaseGrouper, _as_index: bool=True, **kwargs) -> None: from pandas.core.groupby.ops import BaseGrouper if not isinstance(_grouper, BaseGrouper): raise ValueError('Must pass a BaseGrouper object.') self._grouper = _grouper self._as_index = _as_index obj = obj.drop(columns=self._grouper.names, errors='ignore') if kwargs.get('step') is not None: raise NotImplementedError('step not implemented for groupby') super().__init__(obj, *args, **kwargs) def _apply(self, func: Callable[..., Any], name: str, numeric_only: bool=False, numba_args: tuple[Any, ...]=(), **kwargs) -> DataFrame | Series: result = super()._apply(func, name, numeric_only, numba_args, **kwargs) grouped_object_index = self.obj.index grouped_index_name = [*grouped_object_index.names] groupby_keys = copy.copy(self._grouper.names) result_index_names = groupby_keys + grouped_index_name drop_columns = [key for key in self._grouper.names if key not in self.obj.index.names or key is None] if len(drop_columns) != len(groupby_keys): result = result.drop(columns=drop_columns, errors='ignore') codes = self._grouper.codes levels = copy.copy(self._grouper.levels) group_indices = self._grouper.indices.values() if group_indices: indexer = np.concatenate(list(group_indices)) else: indexer = np.array([], dtype=np.intp) codes = [c.take(indexer) for c in codes] if grouped_object_index is not None: idx = grouped_object_index.take(indexer) if not isinstance(idx, MultiIndex): idx = MultiIndex.from_arrays([idx]) codes.extend(list(idx.codes)) levels.extend(list(idx.levels)) result_index = MultiIndex(levels, codes, names=result_index_names, verify_integrity=False) result.index = result_index if not self._as_index: result = result.reset_index(level=list(range(len(groupby_keys)))) return result def _apply_pairwise(self, target: DataFrame | Series, other: DataFrame | Series | None, pairwise: bool | None, func: Callable[[DataFrame | Series, DataFrame | Series], DataFrame | Series], numeric_only: bool) -> DataFrame | Series: target = target.drop(columns=self._grouper.names, errors='ignore') result = super()._apply_pairwise(target, other, pairwise, func, numeric_only) if other is not None and (not all((len(group) == len(other) for group in self._grouper.indices.values()))): old_result_len = len(result) result = concat([result.take(gb_indices).reindex(result.index) for gb_indices in self._grouper.indices.values()]) gb_pairs = (com.maybe_make_list(pair) for pair in self._grouper.indices.keys()) groupby_codes = [] groupby_levels = [] for gb_level_pair in map(list, zip(*gb_pairs)): labels = np.repeat(np.array(gb_level_pair), old_result_len) (codes, levels) = factorize(labels) groupby_codes.append(codes) groupby_levels.append(levels) else: groupby_codes = self._grouper.codes groupby_levels = self._grouper.levels group_indices = self._grouper.indices.values() if group_indices: indexer = np.concatenate(list(group_indices)) else: indexer = np.array([], dtype=np.intp) if target.ndim == 1: repeat_by = 1 else: repeat_by = len(target.columns) groupby_codes = [np.repeat(c.take(indexer), repeat_by) for c in groupby_codes] if isinstance(result.index, MultiIndex): result_codes = list(result.index.codes) result_levels = list(result.index.levels) result_names = list(result.index.names) else: (idx_codes, idx_levels) = factorize(result.index) result_codes = [idx_codes] result_levels = [idx_levels] result_names = [result.index.name] result_codes = groupby_codes + result_codes result_levels = groupby_levels + result_levels result_names = self._grouper.names + result_names result_index = MultiIndex(result_levels, result_codes, names=result_names, verify_integrity=False) result.index = result_index return result def _create_data(self, obj: NDFrameT, numeric_only: bool=False) -> NDFrameT: if not obj.empty: groupby_order = np.concatenate(list(self._grouper.indices.values())).astype(np.int64) obj = obj.take(groupby_order) return super()._create_data(obj, numeric_only) def _gotitem(self, key, ndim, subset=None): if self.on is not None: subset = self.obj.set_index(self._on) return super()._gotitem(key, ndim, subset=subset) class Window(BaseWindow): _attributes = ['window', 'min_periods', 'center', 'win_type', 'on', 'closed', 'step', 'method'] def _validate(self) -> None: super()._validate() if not isinstance(self.win_type, str): raise ValueError(f'Invalid win_type {self.win_type}') signal = import_optional_dependency('scipy.signal.windows', extra='Scipy is required to generate window weight.') self._scipy_weight_generator = getattr(signal, self.win_type, None) if self._scipy_weight_generator is None: raise ValueError(f'Invalid win_type {self.win_type}') if isinstance(self.window, BaseIndexer): raise NotImplementedError('BaseIndexer subclasses not implemented with win_types.') if not is_integer(self.window) or self.window < 0: raise ValueError('window must be an integer 0 or greater') if self.method != 'single': raise NotImplementedError("'single' is the only supported method type.") def _center_window(self, result: np.ndarray, offset: int) -> np.ndarray: if offset > 0: lead_indexer = [slice(offset, None)] result = np.copy(result[tuple(lead_indexer)]) return result def _apply(self, func: Callable[[np.ndarray, int, int], np.ndarray], name: str, numeric_only: bool=False, numba_args: tuple[Any, ...]=(), **kwargs): window = self._scipy_weight_generator(self.window, **kwargs) offset = (len(window) - 1) // 2 if self.center else 0 def homogeneous_func(values: np.ndarray): if values.size == 0: return values.copy() def calc(x): additional_nans = np.full(offset, np.nan) x = np.concatenate((x, additional_nans)) return func(x, window, self.min_periods if self.min_periods is not None else len(window)) with np.errstate(all='ignore'): result = np.asarray(calc(values)) if self.center: result = self._center_window(result, offset) return result return self._apply_columnwise(homogeneous_func, name, numeric_only)[::self.step] @doc(_shared_docs['aggregate'], see_also=dedent('\n See Also\n --------\n DataFrame.aggregate : Similar DataFrame method.\n Series.aggregate : Similar Series method.\n '), examples=dedent('\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2, win_type="boxcar").agg("mean")\n A B C\n 0 NaN NaN NaN\n 1 1.5 4.5 7.5\n 2 2.5 5.5 8.5\n '), klass='Series/DataFrame', axis='') def aggregate(self, func, *args, **kwargs): result = ResamplerWindowApply(self, func, args=args, kwargs=kwargs).agg() if result is None: result = func(self) return result agg = aggregate @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, kwargs_scipy, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n \n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method\n (`sum` in this case):\n\n >>> ser.rolling(2, win_type='gaussian').sum(std=3)\n 0 NaN\n 1 0.986207\n 2 5.917243\n 3 6.903450\n 4 9.862071\n dtype: float64\n "), window_method='rolling', aggregation_description='weighted window sum', agg_method='sum') def sum(self, numeric_only: bool=False, **kwargs): window_func = window_aggregations.roll_weighted_sum return self._apply(window_func, name='sum', numeric_only=numeric_only, **kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, kwargs_scipy, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n \n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method:\n\n >>> ser.rolling(2, win_type='gaussian').mean(std=3)\n 0 NaN\n 1 0.5\n 2 3.0\n 3 3.5\n 4 5.0\n dtype: float64\n "), window_method='rolling', aggregation_description='weighted window mean', agg_method='mean') def mean(self, numeric_only: bool=False, **kwargs): window_func = window_aggregations.roll_weighted_mean return self._apply(window_func, name='mean', numeric_only=numeric_only, **kwargs) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, kwargs_scipy, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n \n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method:\n\n >>> ser.rolling(2, win_type='gaussian').var(std=3)\n 0 NaN\n 1 0.5\n 2 8.0\n 3 4.5\n 4 18.0\n dtype: float64\n "), window_method='rolling', aggregation_description='weighted window variance', agg_method='var') def var(self, ddof: int=1, numeric_only: bool=False, **kwargs): window_func = partial(window_aggregations.roll_weighted_var, ddof=ddof) kwargs.pop('name', None) return self._apply(window_func, name='var', numeric_only=numeric_only, **kwargs) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, kwargs_scipy, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(" >>> ser = pd.Series([0, 1, 5, 2, 8])\n\n To get an instance of :class:`~pandas.core.window.rolling.Window` we need\n to pass the parameter `win_type`.\n\n >>> type(ser.rolling(2, win_type='gaussian'))\n \n\n In order to use the `SciPy` Gaussian window we need to provide the parameters\n `M` and `std`. The parameter `M` corresponds to 2 in our example.\n We pass the second parameter `std` as a parameter of the following method:\n\n >>> ser.rolling(2, win_type='gaussian').std(std=3)\n 0 NaN\n 1 0.707107\n 2 2.828427\n 3 2.121320\n 4 4.242641\n dtype: float64\n "), window_method='rolling', aggregation_description='weighted window standard deviation', agg_method='std') def std(self, ddof: int=1, numeric_only: bool=False, **kwargs): return zsqrt(self.var(ddof=ddof, name='std', numeric_only=numeric_only, **kwargs)) class RollingAndExpandingMixin(BaseWindow): def count(self, numeric_only: bool=False): window_func = window_aggregations.roll_sum return self._apply(window_func, name='count', numeric_only=numeric_only) def apply(self, func: Callable[..., Any], raw: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None, args: tuple[Any, ...] | None=None, kwargs: dict[str, Any] | None=None): if args is None: args = () if kwargs is None: kwargs = {} if not is_bool(raw): raise ValueError('raw parameter must be `True` or `False`') numba_args: tuple[Any, ...] = () if maybe_use_numba(engine): if raw is False: raise ValueError('raw must be `True` when using the numba engine') numba_args = args if self.method == 'single': apply_func = generate_numba_apply_func(func, **get_jit_arguments(engine_kwargs, kwargs)) else: apply_func = generate_numba_table_func(func, **get_jit_arguments(engine_kwargs, kwargs)) elif engine in ('cython', None): if engine_kwargs is not None: raise ValueError('cython engine does not accept engine_kwargs') apply_func = self._generate_cython_apply_func(args, kwargs, raw, func) else: raise ValueError("engine must be either 'numba' or 'cython'") return self._apply(apply_func, name='apply', numba_args=numba_args) def _generate_cython_apply_func(self, args: tuple[Any, ...], kwargs: dict[str, Any], raw: bool | np.bool_, function: Callable[..., Any]) -> Callable[[np.ndarray, np.ndarray, np.ndarray, int], np.ndarray]: from pandas import Series window_func = partial(window_aggregations.roll_apply, args=args, kwargs=kwargs, raw=raw, function=function) def apply_func(values, begin, end, min_periods, raw=raw): if not raw: values = Series(values, index=self._on, copy=False) return window_func(values, begin, end, min_periods) return apply_func def sum(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): if self.method == 'table': func = generate_manual_numpy_nan_agg_with_axis(np.nansum) return self.apply(func, raw=True, engine=engine, engine_kwargs=engine_kwargs) else: from pandas.core._numba.kernels import sliding_sum return self._numba_apply(sliding_sum, engine_kwargs) window_func = window_aggregations.roll_sum return self._apply(window_func, name='sum', numeric_only=numeric_only) def max(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): if self.method == 'table': func = generate_manual_numpy_nan_agg_with_axis(np.nanmax) return self.apply(func, raw=True, engine=engine, engine_kwargs=engine_kwargs) else: from pandas.core._numba.kernels import sliding_min_max return self._numba_apply(sliding_min_max, engine_kwargs, is_max=True) window_func = window_aggregations.roll_max return self._apply(window_func, name='max', numeric_only=numeric_only) def min(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): if self.method == 'table': func = generate_manual_numpy_nan_agg_with_axis(np.nanmin) return self.apply(func, raw=True, engine=engine, engine_kwargs=engine_kwargs) else: from pandas.core._numba.kernels import sliding_min_max return self._numba_apply(sliding_min_max, engine_kwargs, is_max=False) window_func = window_aggregations.roll_min return self._apply(window_func, name='min', numeric_only=numeric_only) def mean(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): if self.method == 'table': func = generate_manual_numpy_nan_agg_with_axis(np.nanmean) return self.apply(func, raw=True, engine=engine, engine_kwargs=engine_kwargs) else: from pandas.core._numba.kernels import sliding_mean return self._numba_apply(sliding_mean, engine_kwargs) window_func = window_aggregations.roll_mean return self._apply(window_func, name='mean', numeric_only=numeric_only) def median(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): if self.method == 'table': func = generate_manual_numpy_nan_agg_with_axis(np.nanmedian) else: func = np.nanmedian return self.apply(func, raw=True, engine=engine, engine_kwargs=engine_kwargs) window_func = window_aggregations.roll_median_c return self._apply(window_func, name='median', numeric_only=numeric_only) def std(self, ddof: int=1, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): if self.method == 'table': raise NotImplementedError("std not supported with method='table'") from pandas.core._numba.kernels import sliding_var return zsqrt(self._numba_apply(sliding_var, engine_kwargs, ddof=ddof)) window_func = window_aggregations.roll_var def zsqrt_func(values, begin, end, min_periods): return zsqrt(window_func(values, begin, end, min_periods, ddof=ddof)) return self._apply(zsqrt_func, name='std', numeric_only=numeric_only) def var(self, ddof: int=1, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): if maybe_use_numba(engine): if self.method == 'table': raise NotImplementedError("var not supported with method='table'") from pandas.core._numba.kernels import sliding_var return self._numba_apply(sliding_var, engine_kwargs, ddof=ddof) window_func = partial(window_aggregations.roll_var, ddof=ddof) return self._apply(window_func, name='var', numeric_only=numeric_only) def skew(self, numeric_only: bool=False): window_func = window_aggregations.roll_skew return self._apply(window_func, name='skew', numeric_only=numeric_only) def sem(self, ddof: int=1, numeric_only: bool=False): self._validate_numeric_only('sem', numeric_only) return self.std(numeric_only=numeric_only) / (self.count(numeric_only=numeric_only) - ddof).pow(0.5) def kurt(self, numeric_only: bool=False): window_func = window_aggregations.roll_kurt return self._apply(window_func, name='kurt', numeric_only=numeric_only) def quantile(self, q: float, interpolation: QuantileInterpolation='linear', numeric_only: bool=False): if q == 1.0: window_func = window_aggregations.roll_max elif q == 0.0: window_func = window_aggregations.roll_min else: window_func = partial(window_aggregations.roll_quantile, quantile=q, interpolation=interpolation) return self._apply(window_func, name='quantile', numeric_only=numeric_only) def rank(self, method: WindowingRankType='average', ascending: bool=True, pct: bool=False, numeric_only: bool=False): window_func = partial(window_aggregations.roll_rank, method=method, ascending=ascending, percentile=pct) return self._apply(window_func, name='rank', numeric_only=numeric_only) def cov(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, ddof: int=1, numeric_only: bool=False): if self.step is not None: raise NotImplementedError('step not implemented for cov') self._validate_numeric_only('cov', numeric_only) from pandas import Series def cov_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = self.min_periods if self.min_periods is not None else window_indexer.window_size (start, end) = window_indexer.get_window_bounds(num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step) self._check_window_bounds(start, end, len(x_array)) with np.errstate(all='ignore'): mean_x_y = window_aggregations.roll_mean(x_array * y_array, start, end, min_periods) mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods) mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods) count_x_y = window_aggregations.roll_sum(notna(x_array + y_array).astype(np.float64), start, end, 0) result = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof)) return Series(result, index=x.index, name=x.name, copy=False) return self._apply_pairwise(self._selected_obj, other, pairwise, cov_func, numeric_only) def corr(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, ddof: int=1, numeric_only: bool=False): if self.step is not None: raise NotImplementedError('step not implemented for corr') self._validate_numeric_only('corr', numeric_only) from pandas import Series def corr_func(x, y): x_array = self._prep_values(x) y_array = self._prep_values(y) window_indexer = self._get_window_indexer() min_periods = self.min_periods if self.min_periods is not None else window_indexer.window_size (start, end) = window_indexer.get_window_bounds(num_values=len(x_array), min_periods=min_periods, center=self.center, closed=self.closed, step=self.step) self._check_window_bounds(start, end, len(x_array)) with np.errstate(all='ignore'): mean_x_y = window_aggregations.roll_mean(x_array * y_array, start, end, min_periods) mean_x = window_aggregations.roll_mean(x_array, start, end, min_periods) mean_y = window_aggregations.roll_mean(y_array, start, end, min_periods) count_x_y = window_aggregations.roll_sum(notna(x_array + y_array).astype(np.float64), start, end, 0) x_var = window_aggregations.roll_var(x_array, start, end, min_periods, ddof) y_var = window_aggregations.roll_var(y_array, start, end, min_periods, ddof) numerator = (mean_x_y - mean_x * mean_y) * (count_x_y / (count_x_y - ddof)) denominator = (x_var * y_var) ** 0.5 result = numerator / denominator return Series(result, index=x.index, name=x.name, copy=False) return self._apply_pairwise(self._selected_obj, other, pairwise, corr_func, numeric_only) class Rolling(RollingAndExpandingMixin): _attributes: list[str] = ['window', 'min_periods', 'center', 'win_type', 'on', 'closed', 'step', 'method'] def _validate(self) -> None: super()._validate() if (self.obj.empty or isinstance(self._on, (DatetimeIndex, TimedeltaIndex, PeriodIndex)) or (isinstance(self._on.dtype, ArrowDtype) and self._on.dtype.kind in 'mM')) and isinstance(self.window, (str, BaseOffset, timedelta)): self._validate_datetimelike_monotonic() try: freq = to_offset(self.window) except (TypeError, ValueError) as err: raise ValueError(f'passed window {self.window} is not compatible with a datetimelike index') from err if isinstance(self._on, PeriodIndex): self._win_freq_i8 = freq.nanos / (self._on.freq.nanos / self._on.freq.n) else: try: unit = dtype_to_unit(self._on.dtype) except TypeError: unit = 'ns' self._win_freq_i8 = Timedelta(freq.nanos).as_unit(unit)._value if self.min_periods is None: self.min_periods = 1 if self.step is not None: raise NotImplementedError('step is not supported with frequency windows') elif isinstance(self.window, BaseIndexer): pass elif not is_integer(self.window) or self.window < 0: raise ValueError('window must be an integer 0 or greater') def _validate_datetimelike_monotonic(self) -> None: if self._on.hasnans: self._raise_monotonic_error('values must not have NaT') if not (self._on.is_monotonic_increasing or self._on.is_monotonic_decreasing): self._raise_monotonic_error('values must be monotonic') def _raise_monotonic_error(self, msg: str): on = self.on if on is None: on = 'index' raise ValueError(f'{on} {msg}') @doc(_shared_docs['aggregate'], see_also=dedent('\n See Also\n --------\n Series.rolling : Calling object with Series data.\n DataFrame.rolling : Calling object with DataFrame data.\n '), examples=dedent('\n Examples\n --------\n >>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]})\n >>> df\n A B C\n 0 1 4 7\n 1 2 5 8\n 2 3 6 9\n\n >>> df.rolling(2).sum()\n A B C\n 0 NaN NaN NaN\n 1 3.0 9.0 15.0\n 2 5.0 11.0 17.0\n\n >>> df.rolling(2).agg({"A": "sum", "B": "min"})\n A B\n 0 NaN NaN\n 1 3.0 4.0\n 2 5.0 5.0\n '), klass='Series/Dataframe', axis='') def aggregate(self, func, *args, **kwargs): return super().aggregate(func, *args, **kwargs) agg = aggregate @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent('\n >>> s = pd.Series([2, 3, np.nan, 10])\n >>> s.rolling(2).count()\n 0 NaN\n 1 2.0\n 2 1.0\n 3 1.0\n dtype: float64\n >>> s.rolling(3).count()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n dtype: float64\n >>> s.rolling(4).count()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 3.0\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='count of non NaN observations', agg_method='count') def count(self, numeric_only: bool=False): return super().count(numeric_only) @doc(template_header, create_section_header('Parameters'), window_apply_parameters, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(' >>> ser = pd.Series([1, 6, 5, 4])\n >>> ser.rolling(2).apply(lambda s: s.sum() - s.min())\n 0 NaN\n 1 6.0\n 2 6.0\n 3 5.0\n dtype: float64\n '), window_method='rolling', aggregation_description='custom aggregation function', agg_method='apply') def apply(self, func: Callable[..., Any], raw: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None, args: tuple[Any, ...] | None=None, kwargs: dict[str, Any] | None=None): return super().apply(func, raw=raw, engine=engine, engine_kwargs=engine_kwargs, args=args, kwargs=kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent('\n >>> s = pd.Series([1, 2, 3, 4, 5])\n >>> s\n 0 1\n 1 2\n 2 3\n 3 4\n 4 5\n dtype: int64\n\n >>> s.rolling(3).sum()\n 0 NaN\n 1 NaN\n 2 6.0\n 3 9.0\n 4 12.0\n dtype: float64\n\n >>> s.rolling(3, center=True).sum()\n 0 NaN\n 1 6.0\n 2 9.0\n 3 12.0\n 4 NaN\n dtype: float64\n\n For DataFrame, each sum is computed column-wise.\n\n >>> df = pd.DataFrame({{"A": s, "B": s ** 2}})\n >>> df\n A B\n 0 1 1\n 1 2 4\n 2 3 9\n 3 4 16\n 4 5 25\n\n >>> df.rolling(3).sum()\n A B\n 0 NaN NaN\n 1 NaN NaN\n 2 6.0 14.0\n 3 9.0 29.0\n 4 12.0 50.0\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='sum', agg_method='sum') def sum(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().sum(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, dedent('\n *args : iterable, optional\n Positional arguments passed into ``func``.\n\n ').replace('\n', '', 1), window_agg_numba_parameters(), dedent('\n **kwargs : mapping, optional\n A dictionary of keyword arguments passed into ``func``.\n\n ').replace('\n', '', 1), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent(' >>> ser = pd.Series([1, 2, 3, 4])\n >>> ser.rolling(2).max()\n 0 NaN\n 1 2.0\n 2 3.0\n 3 4.0\n dtype: float64\n '), window_method='rolling', aggregation_description='maximum', agg_method='max') def max(self, numeric_only: bool=False, *args, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None, **kwargs): return super().max(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent('\n Performing a rolling minimum with a window size of 3.\n\n >>> s = pd.Series([4, 3, 5, 2, 6])\n >>> s.rolling(3).min()\n 0 NaN\n 1 NaN\n 2 3.0\n 3 2.0\n 4 2.0\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='minimum', agg_method='min') def min(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().min(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent('\n The below examples will show rolling mean calculations with window sizes of\n two and three, respectively.\n\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).mean()\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n\n >>> s.rolling(3).mean()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 3.0\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='mean', agg_method='mean') def mean(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().mean(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, window_agg_numba_parameters(), create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), numba_notes, create_section_header('Examples'), dedent('\n Compute the rolling median of a series with a window size of 3.\n\n >>> s = pd.Series([0, 1, 2, 3, 4])\n >>> s.rolling(3).median()\n 0 NaN\n 1 NaN\n 2 1.0\n 3 2.0\n 4 3.0\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='median', agg_method='median') def median(self, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().median(numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, window_agg_numba_parameters('1.4'), create_section_header('Returns'), template_returns, create_section_header('See Also'), 'numpy.std : Equivalent method for NumPy array.\n', template_see_also, create_section_header('Notes'), dedent('\n The default ``ddof`` of 1 used in :meth:`Series.std` is different\n than the default ``ddof`` of 0 in :func:`numpy.std`.\n\n A minimum of one period is required for the rolling calculation.\n\n ').replace('\n', '', 1), create_section_header('Examples'), dedent('\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).std()\n 0 NaN\n 1 NaN\n 2 0.577350\n 3 1.000000\n 4 1.000000\n 5 1.154701\n 6 0.000000\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='standard deviation', agg_method='std') def std(self, ddof: int=1, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().std(ddof=ddof, numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, window_agg_numba_parameters('1.4'), create_section_header('Returns'), template_returns, create_section_header('See Also'), 'numpy.var : Equivalent method for NumPy array.\n', template_see_also, create_section_header('Notes'), dedent('\n The default ``ddof`` of 1 used in :meth:`Series.var` is different\n than the default ``ddof`` of 0 in :func:`numpy.var`.\n\n A minimum of one period is required for the rolling calculation.\n\n ').replace('\n', '', 1), create_section_header('Examples'), dedent('\n >>> s = pd.Series([5, 5, 6, 7, 5, 5, 5])\n >>> s.rolling(3).var()\n 0 NaN\n 1 NaN\n 2 0.333333\n 3 1.000000\n 4 1.000000\n 5 1.333333\n 6 0.000000\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='variance', agg_method='var') def var(self, ddof: int=1, numeric_only: bool=False, engine: Literal['cython', 'numba'] | None=None, engine_kwargs: dict[str, bool] | None=None): return super().var(ddof=ddof, numeric_only=numeric_only, engine=engine, engine_kwargs=engine_kwargs) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), 'scipy.stats.skew : Third moment of a probability density.\n', template_see_also, create_section_header('Notes'), dedent('\n A minimum of three periods is required for the rolling calculation.\n\n '), create_section_header('Examples'), dedent(' >>> ser = pd.Series([1, 5, 2, 7, 15, 6])\n >>> ser.rolling(3).skew().round(6)\n 0 NaN\n 1 NaN\n 2 1.293343\n 3 -0.585583\n 4 0.670284\n 5 1.652317\n dtype: float64\n '), window_method='rolling', aggregation_description='unbiased skewness', agg_method='skew') def skew(self, numeric_only: bool=False): return super().skew(numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent('\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Notes'), 'A minimum of one period is required for the calculation.\n\n', create_section_header('Examples'), dedent('\n >>> s = pd.Series([0, 1, 2, 3])\n >>> s.rolling(2, min_periods=1).sem()\n 0 NaN\n 1 0.707107\n 2 0.707107\n 3 0.707107\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='standard error of mean', agg_method='sem') def sem(self, ddof: int=1, numeric_only: bool=False): self._validate_numeric_only('sem', numeric_only) return self.std(numeric_only=numeric_only) / (self.count(numeric_only) - ddof).pow(0.5) @doc(template_header, create_section_header('Parameters'), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), 'scipy.stats.kurtosis : Reference SciPy method.\n', template_see_also, create_section_header('Notes'), 'A minimum of four periods is required for the calculation.\n\n', create_section_header('Examples'), dedent('\n The example below will show a rolling calculation with a window size of\n four matching the equivalent function call using `scipy.stats`.\n\n >>> arr = [1, 2, 3, 4, 999]\n >>> import scipy.stats\n >>> print(f"{{scipy.stats.kurtosis(arr[:-1], bias=False):.6f}}")\n -1.200000\n >>> print(f"{{scipy.stats.kurtosis(arr[1:], bias=False):.6f}}")\n 3.999946\n >>> s = pd.Series(arr)\n >>> s.rolling(4).kurt()\n 0 NaN\n 1 NaN\n 2 NaN\n 3 -1.200000\n 4 3.999946\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description="Fisher's definition of kurtosis without bias", agg_method='kurt') def kurt(self, numeric_only: bool=False): return super().kurt(numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent("\n q : float\n Quantile to compute. 0 <= quantile <= 1.\n\n .. deprecated:: 2.1.0\n This was renamed from 'quantile' to 'q' in version 2.1.0.\n interpolation : {{'linear', 'lower', 'higher', 'midpoint', 'nearest'}}\n This optional parameter specifies the interpolation method to use,\n when the desired quantile lies between two data points `i` and `j`:\n\n * linear: `i + (j - i) * fraction`, where `fraction` is the\n fractional part of the index surrounded by `i` and `j`.\n * lower: `i`.\n * higher: `j`.\n * nearest: `i` or `j` whichever is nearest.\n * midpoint: (`i` + `j`) / 2.\n ").replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent("\n >>> s = pd.Series([1, 2, 3, 4])\n >>> s.rolling(2).quantile(.4, interpolation='lower')\n 0 NaN\n 1 1.0\n 2 2.0\n 3 3.0\n dtype: float64\n\n >>> s.rolling(2).quantile(.4, interpolation='midpoint')\n 0 NaN\n 1 1.5\n 2 2.5\n 3 3.5\n dtype: float64\n ").replace('\n', '', 1), window_method='rolling', aggregation_description='quantile', agg_method='quantile') def quantile(self, q: float, interpolation: QuantileInterpolation='linear', numeric_only: bool=False): return super().quantile(q=q, interpolation=interpolation, numeric_only=numeric_only) @doc(template_header, '.. versionadded:: 1.4.0 \n\n', create_section_header('Parameters'), dedent("\n method : {{'average', 'min', 'max'}}, default 'average'\n How to rank the group of records that have the same value (i.e. ties):\n\n * average: average rank of the group\n * min: lowest rank in the group\n * max: highest rank in the group\n\n ascending : bool, default True\n Whether or not the elements should be ranked in ascending order.\n pct : bool, default False\n Whether or not to display the returned rankings in percentile\n form.\n ").replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent('\n >>> s = pd.Series([1, 4, 2, 3, 5, 3])\n >>> s.rolling(3).rank()\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 1.5\n dtype: float64\n\n >>> s.rolling(3).rank(method="max")\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 2.0\n dtype: float64\n\n >>> s.rolling(3).rank(method="min")\n 0 NaN\n 1 NaN\n 2 2.0\n 3 2.0\n 4 3.0\n 5 1.0\n dtype: float64\n ').replace('\n', '', 1), window_method='rolling', aggregation_description='rank', agg_method='rank') def rank(self, method: WindowingRankType='average', ascending: bool=True, pct: bool=False, numeric_only: bool=False): return super().rank(method=method, ascending=ascending, pct=pct, numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent('\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), template_see_also, create_section_header('Examples'), dedent(' >>> ser1 = pd.Series([1, 2, 3, 4])\n >>> ser2 = pd.Series([1, 4, 5, 8])\n >>> ser1.rolling(2).cov(ser2)\n 0 NaN\n 1 1.5\n 2 0.5\n 3 1.5\n dtype: float64\n '), window_method='rolling', aggregation_description='sample covariance', agg_method='cov') def cov(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, ddof: int=1, numeric_only: bool=False): return super().cov(other=other, pairwise=pairwise, ddof=ddof, numeric_only=numeric_only) @doc(template_header, create_section_header('Parameters'), dedent('\n other : Series or DataFrame, optional\n If not supplied then will default to self and produce pairwise\n output.\n pairwise : bool, default None\n If False then only matching columns between self and other will be\n used and the output will be a DataFrame.\n If True then all pairwise combinations will be calculated and the\n output will be a MultiIndexed DataFrame in the case of DataFrame\n inputs. In the case of missing elements, only complete pairwise\n observations will be used.\n ddof : int, default 1\n Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of elements.\n ').replace('\n', '', 1), kwargs_numeric_only, create_section_header('Returns'), template_returns, create_section_header('See Also'), dedent("\n cov : Similar method to calculate covariance.\n numpy.corrcoef : NumPy Pearson's correlation calculation.\n ").replace('\n', '', 1), template_see_also, create_section_header('Notes'), dedent("\n This function uses Pearson's definition of correlation\n (https://en.wikipedia.org/wiki/Pearson_correlation_coefficient).\n\n When `other` is not specified, the output will be self correlation (e.g.\n all 1's), except for :class:`~pandas.DataFrame` inputs with `pairwise`\n set to `True`.\n\n Function will return ``NaN`` for correlations of equal valued sequences;\n this is the result of a 0/0 division error.\n\n When `pairwise` is set to `False`, only matching columns between `self` and\n `other` will be used.\n\n When `pairwise` is set to `True`, the output will be a MultiIndex DataFrame\n with the original index on the first level, and the `other` DataFrame\n columns on the second level.\n\n In the case of missing elements, only complete pairwise observations\n will be used.\n\n ").replace('\n', '', 1), create_section_header('Examples'), dedent("\n The below example shows a rolling calculation with a window size of\n four matching the equivalent function call using :meth:`numpy.corrcoef`.\n\n >>> v1 = [3, 3, 3, 5, 8]\n >>> v2 = [3, 4, 4, 4, 8]\n >>> np.corrcoef(v1[:-1], v2[:-1])\n array([[1. , 0.33333333],\n [0.33333333, 1. ]])\n >>> np.corrcoef(v1[1:], v2[1:])\n array([[1. , 0.9169493],\n [0.9169493, 1. ]])\n >>> s1 = pd.Series(v1)\n >>> s2 = pd.Series(v2)\n >>> s1.rolling(4).corr(s2)\n 0 NaN\n 1 NaN\n 2 NaN\n 3 0.333333\n 4 0.916949\n dtype: float64\n\n The below example shows a similar rolling calculation on a\n DataFrame using the pairwise option.\n\n >>> matrix = np.array([[51., 35.],\n ... [49., 30.],\n ... [47., 32.],\n ... [46., 31.],\n ... [50., 36.]])\n >>> np.corrcoef(matrix[:-1, 0], matrix[:-1, 1])\n array([[1. , 0.6263001],\n [0.6263001, 1. ]])\n >>> np.corrcoef(matrix[1:, 0], matrix[1:, 1])\n array([[1. , 0.55536811],\n [0.55536811, 1. ]])\n >>> df = pd.DataFrame(matrix, columns=['X', 'Y'])\n >>> df\n X Y\n 0 51.0 35.0\n 1 49.0 30.0\n 2 47.0 32.0\n 3 46.0 31.0\n 4 50.0 36.0\n >>> df.rolling(4).corr(pairwise=True)\n X Y\n 0 X NaN NaN\n Y NaN NaN\n 1 X NaN NaN\n Y NaN NaN\n 2 X NaN NaN\n Y NaN NaN\n 3 X 1.000000 0.626300\n Y 0.626300 1.000000\n 4 X 1.000000 0.555368\n Y 0.555368 1.000000\n ").replace('\n', '', 1), window_method='rolling', aggregation_description='correlation', agg_method='corr') def corr(self, other: DataFrame | Series | None=None, pairwise: bool | None=None, ddof: int=1, numeric_only: bool=False): return super().corr(other=other, pairwise=pairwise, ddof=ddof, numeric_only=numeric_only) Rolling.__doc__ = Window.__doc__ class RollingGroupby(BaseWindowGroupby, Rolling): _attributes = Rolling._attributes + BaseWindowGroupby._attributes def _get_window_indexer(self) -> GroupbyIndexer: rolling_indexer: type[BaseIndexer] indexer_kwargs: dict[str, Any] | None = None index_array = self._index_array if isinstance(self.window, BaseIndexer): rolling_indexer = type(self.window) indexer_kwargs = self.window.__dict__.copy() assert isinstance(indexer_kwargs, dict) indexer_kwargs.pop('index_array', None) window = self.window elif self._win_freq_i8 is not None: rolling_indexer = VariableWindowIndexer window = self._win_freq_i8 else: rolling_indexer = FixedWindowIndexer window = self.window window_indexer = GroupbyIndexer(index_array=index_array, window_size=window, groupby_indices=self._grouper.indices, window_indexer=rolling_indexer, indexer_kwargs=indexer_kwargs) return window_indexer def _validate_datetimelike_monotonic(self) -> None: if self._on.hasnans: self._raise_monotonic_error('values must not have NaT') for group_indices in self._grouper.indices.values(): group_on = self._on.take(group_indices) if not (group_on.is_monotonic_increasing or group_on.is_monotonic_decreasing): on = 'index' if self.on is None else self.on raise ValueError(f'Each group within {on} must be monotonic. Sort the values in {on} first.') # File: pandas-main/pandas/errors/__init__.py """""" from __future__ import annotations import ctypes from pandas._config.config import OptionError from pandas._libs.tslibs import OutOfBoundsDatetime, OutOfBoundsTimedelta from pandas.util.version import InvalidVersion class IntCastingNaNError(ValueError): class NullFrequencyError(ValueError): class PerformanceWarning(Warning): class UnsupportedFunctionCall(ValueError): class UnsortedIndexError(KeyError): class ParserError(ValueError): class DtypeWarning(Warning): class EmptyDataError(ValueError): class ParserWarning(Warning): class MergeError(ValueError): class AbstractMethodError(NotImplementedError): def __init__(self, class_instance, methodtype: str='method') -> None: types = {'method', 'classmethod', 'staticmethod', 'property'} if methodtype not in types: raise ValueError(f'methodtype must be one of {methodtype}, got {types} instead.') self.methodtype = methodtype self.class_instance = class_instance def __str__(self) -> str: if self.methodtype == 'classmethod': name = self.class_instance.__name__ else: name = type(self.class_instance).__name__ return f'This {self.methodtype} must be defined in the concrete class {name}' class NumbaUtilError(Exception): class DuplicateLabelError(ValueError): class InvalidIndexError(Exception): class DataError(Exception): class SpecificationError(Exception): class ChainedAssignmentError(Warning): class NumExprClobberingError(NameError): class UndefinedVariableError(NameError): def __init__(self, name: str, is_local: bool | None=None) -> None: base_msg = f'{name!r} is not defined' if is_local: msg = f'local variable {base_msg}' else: msg = f'name {base_msg}' super().__init__(msg) class IndexingError(Exception): class PyperclipException(RuntimeError): class PyperclipWindowsException(PyperclipException): def __init__(self, message: str) -> None: message += f' ({ctypes.WinError()})' super().__init__(message) class CSSWarning(UserWarning): class PossibleDataLossError(Exception): class ClosedFileError(Exception): class IncompatibilityWarning(Warning): class AttributeConflictWarning(Warning): class DatabaseError(OSError): class PossiblePrecisionLoss(Warning): class ValueLabelTypeMismatch(Warning): class InvalidColumnName(Warning): class CategoricalConversionWarning(Warning): class LossySetitemError(Exception): class NoBufferPresent(Exception): class InvalidComparison(Exception): __all__ = ['AbstractMethodError', 'AttributeConflictWarning', 'CategoricalConversionWarning', 'ChainedAssignmentError', 'ClosedFileError', 'CSSWarning', 'DatabaseError', 'DataError', 'DtypeWarning', 'DuplicateLabelError', 'EmptyDataError', 'IncompatibilityWarning', 'IntCastingNaNError', 'InvalidColumnName', 'InvalidComparison', 'InvalidIndexError', 'InvalidVersion', 'IndexingError', 'LossySetitemError', 'MergeError', 'NoBufferPresent', 'NullFrequencyError', 'NumbaUtilError', 'NumExprClobberingError', 'OptionError', 'OutOfBoundsDatetime', 'OutOfBoundsTimedelta', 'ParserError', 'ParserWarning', 'PerformanceWarning', 'PossibleDataLossError', 'PossiblePrecisionLoss', 'PyperclipException', 'PyperclipWindowsException', 'SpecificationError', 'UndefinedVariableError', 'UnsortedIndexError', 'UnsupportedFunctionCall', 'ValueLabelTypeMismatch'] # File: pandas-main/pandas/errors/cow.py _chained_assignment_msg = "A value is trying to be set on a copy of a DataFrame or Series through chained assignment.\nWhen using the Copy-on-Write mode, such chained assignment never works to update the original DataFrame or Series, because the intermediate object on which we are setting values always behaves as a copy.\n\nTry using '.loc[row_indexer, col_indexer] = value' instead, to perform the assignment in a single step.\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/copy_on_write.html" _chained_assignment_method_msg = "A value is trying to be set on a copy of a DataFrame or Series through chained assignment using an inplace method.\nWhen using the Copy-on-Write mode, such inplace method never works to update the original DataFrame or Series, because the intermediate object on which we are setting values always behaves as a copy.\n\nFor example, when doing 'df[col].method(value, inplace=True)', try using 'df.method({col: value}, inplace=True)' instead, to perform the operation inplace on the original object.\n\n" # File: pandas-main/pandas/io/_util.py from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas.compat._optional import import_optional_dependency import pandas as pd if TYPE_CHECKING: from collections.abc import Callable def _arrow_dtype_mapping() -> dict: pa = import_optional_dependency('pyarrow') return {pa.int8(): pd.Int8Dtype(), pa.int16(): pd.Int16Dtype(), pa.int32(): pd.Int32Dtype(), pa.int64(): pd.Int64Dtype(), pa.uint8(): pd.UInt8Dtype(), pa.uint16(): pd.UInt16Dtype(), pa.uint32(): pd.UInt32Dtype(), pa.uint64(): pd.UInt64Dtype(), pa.bool_(): pd.BooleanDtype(), pa.string(): pd.StringDtype(), pa.float32(): pd.Float32Dtype(), pa.float64(): pd.Float64Dtype(), pa.string(): pd.StringDtype(), pa.large_string(): pd.StringDtype()} def arrow_string_types_mapper() -> Callable: pa = import_optional_dependency('pyarrow') return {pa.string(): pd.StringDtype(na_value=np.nan), pa.large_string(): pd.StringDtype(na_value=np.nan)}.get # File: pandas-main/pandas/io/api.py """""" from pandas.io.clipboards import read_clipboard from pandas.io.excel import ExcelFile, ExcelWriter, read_excel from pandas.io.feather_format import read_feather from pandas.io.html import read_html from pandas.io.json import read_json from pandas.io.orc import read_orc from pandas.io.parquet import read_parquet from pandas.io.parsers import read_csv, read_fwf, read_table from pandas.io.pickle import read_pickle, to_pickle from pandas.io.pytables import HDFStore, read_hdf from pandas.io.sas import read_sas from pandas.io.spss import read_spss from pandas.io.sql import read_sql, read_sql_query, read_sql_table from pandas.io.stata import read_stata from pandas.io.xml import read_xml __all__ = ['ExcelFile', 'ExcelWriter', 'HDFStore', 'read_clipboard', 'read_csv', 'read_excel', 'read_feather', 'read_fwf', 'read_hdf', 'read_html', 'read_json', 'read_orc', 'read_parquet', 'read_pickle', 'read_sas', 'read_spss', 'read_sql', 'read_sql_query', 'read_sql_table', 'read_stata', 'read_table', 'read_xml', 'to_pickle'] # File: pandas-main/pandas/io/clipboard/__init__.py """""" __version__ = '1.8.2' import contextlib import ctypes from ctypes import c_size_t, c_wchar, c_wchar_p, get_errno, sizeof import os import platform from shutil import which as _executable_exists import subprocess import time import warnings from pandas.errors import PyperclipException, PyperclipWindowsException from pandas.util._exceptions import find_stack_level HAS_DISPLAY = os.getenv('DISPLAY') EXCEPT_MSG = '\n Pyperclip could not find a copy/paste mechanism for your system.\n For more information, please visit\n https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error\n ' ENCODING = 'utf-8' class PyperclipTimeoutException(PyperclipException): pass def _stringifyText(text) -> str: acceptedTypes = (str, int, float, bool) if not isinstance(text, acceptedTypes): raise PyperclipException(f'only str, int, float, and bool values can be copied to the clipboard, not {type(text).__name__}') return str(text) def init_osx_pbcopy_clipboard(): def copy_osx_pbcopy(text): text = _stringifyText(text) with subprocess.Popen(['pbcopy', 'w'], stdin=subprocess.PIPE, close_fds=True) as p: p.communicate(input=text.encode(ENCODING)) def paste_osx_pbcopy(): with subprocess.Popen(['pbpaste', 'r'], stdout=subprocess.PIPE, close_fds=True) as p: stdout = p.communicate()[0] return stdout.decode(ENCODING) return (copy_osx_pbcopy, paste_osx_pbcopy) def init_osx_pyobjc_clipboard(): def copy_osx_pyobjc(text): text = _stringifyText(text) newStr = Foundation.NSString.stringWithString_(text).nsstring() newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding) board = AppKit.NSPasteboard.generalPasteboard() board.declareTypes_owner_([AppKit.NSStringPboardType], None) board.setData_forType_(newData, AppKit.NSStringPboardType) def paste_osx_pyobjc(): board = AppKit.NSPasteboard.generalPasteboard() content = board.stringForType_(AppKit.NSStringPboardType) return content return (copy_osx_pyobjc, paste_osx_pyobjc) def init_qt_clipboard(): global QApplication try: from qtpy.QtWidgets import QApplication except ImportError: try: from PyQt5.QtWidgets import QApplication except ImportError: from PyQt4.QtGui import QApplication app = QApplication.instance() if app is None: app = QApplication([]) def copy_qt(text): text = _stringifyText(text) cb = app.clipboard() cb.setText(text) def paste_qt() -> str: cb = app.clipboard() return str(cb.text()) return (copy_qt, paste_qt) def init_xclip_clipboard(): DEFAULT_SELECTION = 'c' PRIMARY_SELECTION = 'p' def copy_xclip(text, primary=False): text = _stringifyText(text) selection = DEFAULT_SELECTION if primary: selection = PRIMARY_SELECTION with subprocess.Popen(['xclip', '-selection', selection], stdin=subprocess.PIPE, close_fds=True) as p: p.communicate(input=text.encode(ENCODING)) def paste_xclip(primary=False): selection = DEFAULT_SELECTION if primary: selection = PRIMARY_SELECTION with subprocess.Popen(['xclip', '-selection', selection, '-o'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) as p: stdout = p.communicate()[0] return stdout.decode(ENCODING) return (copy_xclip, paste_xclip) def init_xsel_clipboard(): DEFAULT_SELECTION = '-b' PRIMARY_SELECTION = '-p' def copy_xsel(text, primary=False): text = _stringifyText(text) selection_flag = DEFAULT_SELECTION if primary: selection_flag = PRIMARY_SELECTION with subprocess.Popen(['xsel', selection_flag, '-i'], stdin=subprocess.PIPE, close_fds=True) as p: p.communicate(input=text.encode(ENCODING)) def paste_xsel(primary=False): selection_flag = DEFAULT_SELECTION if primary: selection_flag = PRIMARY_SELECTION with subprocess.Popen(['xsel', selection_flag, '-o'], stdout=subprocess.PIPE, close_fds=True) as p: stdout = p.communicate()[0] return stdout.decode(ENCODING) return (copy_xsel, paste_xsel) def init_wl_clipboard(): PRIMARY_SELECTION = '-p' def copy_wl(text, primary=False): text = _stringifyText(text) args = ['wl-copy'] if primary: args.append(PRIMARY_SELECTION) if not text: args.append('--clear') subprocess.check_call(args, close_fds=True) else: p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True) p.communicate(input=text.encode(ENCODING)) def paste_wl(primary=False): args = ['wl-paste', '-n'] if primary: args.append(PRIMARY_SELECTION) p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True) (stdout, _stderr) = p.communicate() return stdout.decode(ENCODING) return (copy_wl, paste_wl) def init_klipper_clipboard(): def copy_klipper(text): text = _stringifyText(text) with subprocess.Popen(['qdbus', 'org.kde.klipper', '/klipper', 'setClipboardContents', text.encode(ENCODING)], stdin=subprocess.PIPE, close_fds=True) as p: p.communicate(input=None) def paste_klipper(): with subprocess.Popen(['qdbus', 'org.kde.klipper', '/klipper', 'getClipboardContents'], stdout=subprocess.PIPE, close_fds=True) as p: stdout = p.communicate()[0] clipboardContents = stdout.decode(ENCODING) assert len(clipboardContents) > 0 assert clipboardContents.endswith('\n') if clipboardContents.endswith('\n'): clipboardContents = clipboardContents[:-1] return clipboardContents return (copy_klipper, paste_klipper) def init_dev_clipboard_clipboard(): def copy_dev_clipboard(text): text = _stringifyText(text) if text == '': warnings.warn('Pyperclip cannot copy a blank string to the clipboard on Cygwin. This is effectively a no-op.', stacklevel=find_stack_level()) if '\r' in text: warnings.warn('Pyperclip cannot handle \\r characters on Cygwin.', stacklevel=find_stack_level()) with open('/dev/clipboard', 'w', encoding='utf-8') as fd: fd.write(text) def paste_dev_clipboard() -> str: with open('/dev/clipboard', encoding='utf-8') as fd: content = fd.read() return content return (copy_dev_clipboard, paste_dev_clipboard) def init_no_clipboard(): class ClipboardUnavailable: def __call__(self, *args, **kwargs): raise PyperclipException(EXCEPT_MSG) def __bool__(self) -> bool: return False return (ClipboardUnavailable(), ClipboardUnavailable()) class CheckedCall: def __init__(self, f) -> None: super().__setattr__('f', f) def __call__(self, *args): ret = self.f(*args) if not ret and get_errno(): raise PyperclipWindowsException('Error calling ' + self.f.__name__) return ret def __setattr__(self, key, value): setattr(self.f, key, value) def init_windows_clipboard(): global HGLOBAL, LPVOID, DWORD, LPCSTR, INT global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE from ctypes.wintypes import BOOL, DWORD, HANDLE, HGLOBAL, HINSTANCE, HMENU, HWND, INT, LPCSTR, LPVOID, UINT windll = ctypes.windll msvcrt = ctypes.CDLL('msvcrt') safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA) safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT, INT, INT, HWND, HMENU, HINSTANCE, LPVOID] safeCreateWindowExA.restype = HWND safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow) safeDestroyWindow.argtypes = [HWND] safeDestroyWindow.restype = BOOL OpenClipboard = windll.user32.OpenClipboard OpenClipboard.argtypes = [HWND] OpenClipboard.restype = BOOL safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard) safeCloseClipboard.argtypes = [] safeCloseClipboard.restype = BOOL safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard) safeEmptyClipboard.argtypes = [] safeEmptyClipboard.restype = BOOL safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData) safeGetClipboardData.argtypes = [UINT] safeGetClipboardData.restype = HANDLE safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData) safeSetClipboardData.argtypes = [UINT, HANDLE] safeSetClipboardData.restype = HANDLE safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc) safeGlobalAlloc.argtypes = [UINT, c_size_t] safeGlobalAlloc.restype = HGLOBAL safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock) safeGlobalLock.argtypes = [HGLOBAL] safeGlobalLock.restype = LPVOID safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock) safeGlobalUnlock.argtypes = [HGLOBAL] safeGlobalUnlock.restype = BOOL wcslen = CheckedCall(msvcrt.wcslen) wcslen.argtypes = [c_wchar_p] wcslen.restype = UINT GMEM_MOVEABLE = 2 CF_UNICODETEXT = 13 @contextlib.contextmanager def window(): hwnd = safeCreateWindowExA(0, b'STATIC', None, 0, 0, 0, 0, 0, None, None, None, None) try: yield hwnd finally: safeDestroyWindow(hwnd) @contextlib.contextmanager def clipboard(hwnd): t = time.time() + 0.5 success = False while time.time() < t: success = OpenClipboard(hwnd) if success: break time.sleep(0.01) if not success: raise PyperclipWindowsException('Error calling OpenClipboard') try: yield finally: safeCloseClipboard() def copy_windows(text): text = _stringifyText(text) with window() as hwnd: with clipboard(hwnd): safeEmptyClipboard() if text: count = wcslen(text) + 1 handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar)) locked_handle = safeGlobalLock(handle) ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text), count * sizeof(c_wchar)) safeGlobalUnlock(handle) safeSetClipboardData(CF_UNICODETEXT, handle) def paste_windows(): with clipboard(None): handle = safeGetClipboardData(CF_UNICODETEXT) if not handle: return '' return c_wchar_p(handle).value return (copy_windows, paste_windows) def init_wsl_clipboard(): def copy_wsl(text): text = _stringifyText(text) with subprocess.Popen(['clip.exe'], stdin=subprocess.PIPE, close_fds=True) as p: p.communicate(input=text.encode(ENCODING)) def paste_wsl(): with subprocess.Popen(['powershell.exe', '-command', 'Get-Clipboard'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) as p: stdout = p.communicate()[0] return stdout[:-2].decode(ENCODING) return (copy_wsl, paste_wsl) def determine_clipboard(): global Foundation, AppKit, qtpy, PyQt4, PyQt5 if 'cygwin' in platform.system().lower(): if os.path.exists('/dev/clipboard'): warnings.warn("Pyperclip's support for Cygwin is not perfect, see https://github.com/asweigart/pyperclip/issues/55", stacklevel=find_stack_level()) return init_dev_clipboard_clipboard() elif os.name == 'nt' or platform.system() == 'Windows': return init_windows_clipboard() if platform.system() == 'Linux': if _executable_exists('wslconfig.exe'): return init_wsl_clipboard() if os.name == 'mac' or platform.system() == 'Darwin': try: import AppKit import Foundation except ImportError: return init_osx_pbcopy_clipboard() else: return init_osx_pyobjc_clipboard() if HAS_DISPLAY: if os.environ.get('WAYLAND_DISPLAY') and _executable_exists('wl-copy'): return init_wl_clipboard() if _executable_exists('xsel'): return init_xsel_clipboard() if _executable_exists('xclip'): return init_xclip_clipboard() if _executable_exists('klipper') and _executable_exists('qdbus'): return init_klipper_clipboard() try: import qtpy except ImportError: try: import PyQt5 except ImportError: try: import PyQt4 except ImportError: pass else: return init_qt_clipboard() else: return init_qt_clipboard() else: return init_qt_clipboard() return init_no_clipboard() def set_clipboard(clipboard): global copy, paste clipboard_types = {'pbcopy': init_osx_pbcopy_clipboard, 'pyobjc': init_osx_pyobjc_clipboard, 'qt': init_qt_clipboard, 'xclip': init_xclip_clipboard, 'xsel': init_xsel_clipboard, 'wl-clipboard': init_wl_clipboard, 'klipper': init_klipper_clipboard, 'windows': init_windows_clipboard, 'no': init_no_clipboard} if clipboard not in clipboard_types: allowed_clipboard_types = [repr(_) for _ in clipboard_types] raise ValueError(f"Argument must be one of {', '.join(allowed_clipboard_types)}") (copy, paste) = clipboard_types[clipboard]() def lazy_load_stub_copy(text): global copy, paste (copy, paste) = determine_clipboard() return copy(text) def lazy_load_stub_paste(): global copy, paste (copy, paste) = determine_clipboard() return paste() def is_available() -> bool: return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste (copy, paste) = (lazy_load_stub_copy, lazy_load_stub_paste) def waitForPaste(timeout=None): startTime = time.time() while True: clipboardText = paste() if clipboardText != '': return clipboardText time.sleep(0.01) if timeout is not None and time.time() > startTime + timeout: raise PyperclipTimeoutException('waitForPaste() timed out after ' + str(timeout) + ' seconds.') def waitForNewPaste(timeout=None): startTime = time.time() originalText = paste() while True: currentText = paste() if currentText != originalText: return currentText time.sleep(0.01) if timeout is not None and time.time() > startTime + timeout: raise PyperclipTimeoutException('waitForNewPaste() timed out after ' + str(timeout) + ' seconds.') __all__ = ['copy', 'paste', 'waitForPaste', 'waitForNewPaste', 'set_clipboard', 'determine_clipboard'] clipboard_get = paste clipboard_set = copy # File: pandas-main/pandas/io/clipboards.py """""" from __future__ import annotations from io import StringIO from typing import TYPE_CHECKING import warnings from pandas._libs import lib from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.generic import ABCDataFrame from pandas import get_option, option_context if TYPE_CHECKING: from pandas._typing import DtypeBackend def read_clipboard(sep: str='\\s+', dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, **kwargs): encoding = kwargs.pop('encoding', 'utf-8') if encoding is not None and encoding.lower().replace('-', '') != 'utf8': raise NotImplementedError('reading from clipboard only supports utf-8 encoding') check_dtype_backend(dtype_backend) from pandas.io.clipboard import clipboard_get from pandas.io.parsers import read_csv text = clipboard_get() try: text = text.decode(kwargs.get('encoding') or get_option('display.encoding')) except AttributeError: pass lines = text[:10000].split('\n')[:-1][:10] counts = {x.lstrip(' ').count('\t') for x in lines} if len(lines) > 1 and len(counts) == 1 and (counts.pop() != 0): sep = '\t' index_length = len(lines[0]) - len(lines[0].lstrip(' \t')) if index_length != 0: kwargs.setdefault('index_col', list(range(index_length))) elif not isinstance(sep, str): raise ValueError(f'sep={sep!r} must be a string') if len(sep) > 1 and kwargs.get('engine') is None: kwargs['engine'] = 'python' elif len(sep) > 1 and kwargs.get('engine') == 'c': warnings.warn('read_clipboard with regex separator does not work properly with c engine.', stacklevel=find_stack_level()) return read_csv(StringIO(text), sep=sep, dtype_backend=dtype_backend, **kwargs) def to_clipboard(obj, excel: bool | None=True, sep: str | None=None, **kwargs) -> None: encoding = kwargs.pop('encoding', 'utf-8') if encoding is not None and encoding.lower().replace('-', '') != 'utf8': raise ValueError('clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_set if excel is None: excel = True if excel: try: if sep is None: sep = '\t' buf = StringIO() obj.to_csv(buf, sep=sep, encoding='utf-8', **kwargs) text = buf.getvalue() clipboard_set(text) return except TypeError: warnings.warn('to_clipboard in excel mode requires a single character separator.', stacklevel=find_stack_level()) elif sep is not None: warnings.warn('to_clipboard with excel=False ignores the sep argument.', stacklevel=find_stack_level()) if isinstance(obj, ABCDataFrame): with option_context('display.max_colwidth', None): objstr = obj.to_string(**kwargs) else: objstr = str(obj) clipboard_set(objstr) # File: pandas-main/pandas/io/common.py """""" from __future__ import annotations from abc import ABC, abstractmethod import codecs from collections import defaultdict from collections.abc import Hashable, Mapping, Sequence import dataclasses import functools import gzip from io import BufferedIOBase, BytesIO, RawIOBase, StringIO, TextIOBase, TextIOWrapper import mmap import os from pathlib import Path import re import tarfile from typing import IO, TYPE_CHECKING, Any, AnyStr, DefaultDict, Generic, Literal, TypeVar, cast, overload from urllib.parse import urljoin, urlparse as parse_url, uses_netloc, uses_params, uses_relative import warnings import zipfile from pandas._typing import BaseBuffer, ReadCsvBuffer from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_bool, is_file_like, is_integer, is_list_like from pandas.core.dtypes.generic import ABCMultiIndex from pandas.core.shared_docs import _shared_docs _VALID_URLS = set(uses_relative + uses_netloc + uses_params) _VALID_URLS.discard('') _RFC_3986_PATTERN = re.compile('^[A-Za-z][A-Za-z0-9+\\-+.]*://') BaseBufferT = TypeVar('BaseBufferT', bound=BaseBuffer) if TYPE_CHECKING: from types import TracebackType from pandas._typing import CompressionDict, CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer from pandas import MultiIndex @dataclasses.dataclass class IOArgs: filepath_or_buffer: str | BaseBuffer encoding: str mode: str compression: CompressionDict should_close: bool = False @dataclasses.dataclass class IOHandles(Generic[AnyStr]): handle: IO[AnyStr] compression: CompressionDict created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list) is_wrapped: bool = False def close(self) -> None: if self.is_wrapped: assert isinstance(self.handle, TextIOWrapper) self.handle.flush() self.handle.detach() self.created_handles.remove(self.handle) for handle in self.created_handles: handle.close() self.created_handles = [] self.is_wrapped = False def __enter__(self) -> IOHandles[AnyStr]: return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: self.close() def is_url(url: object) -> bool: if not isinstance(url, str): return False return parse_url(url).scheme in _VALID_URLS @overload def _expand_user(filepath_or_buffer: str) -> str: ... @overload def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: ... def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT: if isinstance(filepath_or_buffer, str): return os.path.expanduser(filepath_or_buffer) return filepath_or_buffer def validate_header_arg(header: object) -> None: if header is None: return if is_integer(header): header = cast(int, header) if header < 0: raise ValueError('Passing negative integer to header is invalid. For no header, use header=None instead') return if is_list_like(header, allow_sets=False): header = cast(Sequence, header) if not all(map(is_integer, header)): raise ValueError('header must be integer or list of integers') if any((i < 0 for i in header)): raise ValueError('cannot specify multi-index header with negative integers') return if is_bool(header): raise TypeError('Passing a bool to header is invalid. Use header=None for no header or header=int or list-like of ints to specify the row(s) making up the column names') raise ValueError('header must be integer or list of integers') @overload def stringify_path(filepath_or_buffer: FilePath, convert_file_like: bool=...) -> str: ... @overload def stringify_path(filepath_or_buffer: BaseBufferT, convert_file_like: bool=...) -> BaseBufferT: ... def stringify_path(filepath_or_buffer: FilePath | BaseBufferT, convert_file_like: bool=False) -> str | BaseBufferT: if not convert_file_like and is_file_like(filepath_or_buffer): return cast(BaseBufferT, filepath_or_buffer) if isinstance(filepath_or_buffer, os.PathLike): filepath_or_buffer = filepath_or_buffer.__fspath__() return _expand_user(filepath_or_buffer) def urlopen(*args: Any, **kwargs: Any) -> Any: import urllib.request return urllib.request.urlopen(*args, **kwargs) def is_fsspec_url(url: FilePath | BaseBuffer) -> bool: return isinstance(url, str) and bool(_RFC_3986_PATTERN.match(url)) and (not url.startswith(('http://', 'https://'))) @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'filepath_or_buffer') def _get_filepath_or_buffer(filepath_or_buffer: FilePath | BaseBuffer, encoding: str='utf-8', compression: CompressionOptions | None=None, mode: str='r', storage_options: StorageOptions | None=None) -> IOArgs: filepath_or_buffer = stringify_path(filepath_or_buffer) (compression_method, compression) = get_compression_method(compression) compression_method = infer_compression(filepath_or_buffer, compression_method) if compression_method and hasattr(filepath_or_buffer, 'write') and ('b' not in mode): warnings.warn('compression has no effect when passing a non-binary object as input.', RuntimeWarning, stacklevel=find_stack_level()) compression_method = None compression = dict(compression, method=compression_method) if 'w' in mode and compression_method in ['bz2', 'xz'] and (encoding in ['utf-16', 'utf-32']): warnings.warn(f'{compression} will not write the byte order mark for {encoding}', UnicodeWarning, stacklevel=find_stack_level()) if 'a' in mode and compression_method in ['zip', 'tar']: warnings.warn("zip and tar do not support mode 'a' properly. This combination will result in multiple files with same name being added to the archive.", RuntimeWarning, stacklevel=find_stack_level()) fsspec_mode = mode if 't' not in fsspec_mode and 'b' not in fsspec_mode: fsspec_mode += 'b' if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer): storage_options = storage_options or {} import urllib.request req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options) with urlopen(req_info) as req: content_encoding = req.headers.get('Content-Encoding', None) if content_encoding == 'gzip': compression = {'method': 'gzip'} reader = BytesIO(req.read()) return IOArgs(filepath_or_buffer=reader, encoding=encoding, compression=compression, should_close=True, mode=fsspec_mode) if is_fsspec_url(filepath_or_buffer): assert isinstance(filepath_or_buffer, str) if filepath_or_buffer.startswith('s3a://'): filepath_or_buffer = filepath_or_buffer.replace('s3a://', 's3://') if filepath_or_buffer.startswith('s3n://'): filepath_or_buffer = filepath_or_buffer.replace('s3n://', 's3://') fsspec = import_optional_dependency('fsspec') err_types_to_retry_with_anon: list[Any] = [] try: import_optional_dependency('botocore') from botocore.exceptions import ClientError, NoCredentialsError err_types_to_retry_with_anon = [ClientError, NoCredentialsError, PermissionError] except ImportError: pass try: file_obj = fsspec.open(filepath_or_buffer, mode=fsspec_mode, **storage_options or {}).open() except tuple(err_types_to_retry_with_anon): if storage_options is None: storage_options = {'anon': True} else: storage_options = dict(storage_options) storage_options['anon'] = True file_obj = fsspec.open(filepath_or_buffer, mode=fsspec_mode, **storage_options or {}).open() return IOArgs(filepath_or_buffer=file_obj, encoding=encoding, compression=compression, should_close=True, mode=fsspec_mode) elif storage_options: raise ValueError('storage_options passed with file object or non-fsspec file path') if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)): return IOArgs(filepath_or_buffer=_expand_user(filepath_or_buffer), encoding=encoding, compression=compression, should_close=False, mode=mode) if not (hasattr(filepath_or_buffer, 'read') or hasattr(filepath_or_buffer, 'write')): msg = f'Invalid file path or buffer object type: {type(filepath_or_buffer)}' raise ValueError(msg) return IOArgs(filepath_or_buffer=filepath_or_buffer, encoding=encoding, compression=compression, should_close=False, mode=mode) def file_path_to_url(path: str) -> str: from urllib.request import pathname2url return urljoin('file:', pathname2url(path)) extension_to_compression = {'.tar': 'tar', '.tar.gz': 'tar', '.tar.bz2': 'tar', '.tar.xz': 'tar', '.gz': 'gzip', '.bz2': 'bz2', '.zip': 'zip', '.xz': 'xz', '.zst': 'zstd'} _supported_compressions = set(extension_to_compression.values()) def get_compression_method(compression: CompressionOptions) -> tuple[str | None, CompressionDict]: compression_method: str | None if isinstance(compression, Mapping): compression_args = dict(compression) try: compression_method = compression_args.pop('method') except KeyError as err: raise ValueError("If mapping, compression must have key 'method'") from err else: compression_args = {} compression_method = compression return (compression_method, compression_args) @doc(compression_options=_shared_docs['compression_options'] % 'filepath_or_buffer') def infer_compression(filepath_or_buffer: FilePath | BaseBuffer, compression: str | None) -> str | None: if compression is None: return None if compression == 'infer': filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True) if not isinstance(filepath_or_buffer, str): return None for (extension, compression) in extension_to_compression.items(): if filepath_or_buffer.lower().endswith(extension): return compression return None if compression in _supported_compressions: return compression valid = ['infer', None] + sorted(_supported_compressions) msg = f'Unrecognized compression type: {compression}\nValid compression types are {valid}' raise ValueError(msg) def check_parent_directory(path: Path | str) -> None: parent = Path(path).parent if not parent.is_dir(): raise OSError(f"Cannot save file into a non-existent directory: '{parent}'") @overload def get_handle(path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None=..., compression: CompressionOptions=..., memory_map: bool=..., is_text: Literal[False], errors: str | None=..., storage_options: StorageOptions=...) -> IOHandles[bytes]: ... @overload def get_handle(path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None=..., compression: CompressionOptions=..., memory_map: bool=..., is_text: Literal[True]=..., errors: str | None=..., storage_options: StorageOptions=...) -> IOHandles[str]: ... @overload def get_handle(path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None=..., compression: CompressionOptions=..., memory_map: bool=..., is_text: bool=..., errors: str | None=..., storage_options: StorageOptions=...) -> IOHandles[str] | IOHandles[bytes]: ... @doc(compression_options=_shared_docs['compression_options'] % 'path_or_buf') def get_handle(path_or_buf: FilePath | BaseBuffer, mode: str, *, encoding: str | None=None, compression: CompressionOptions | None=None, memory_map: bool=False, is_text: bool=True, errors: str | None=None, storage_options: StorageOptions | None=None) -> IOHandles[str] | IOHandles[bytes]: encoding = encoding or 'utf-8' errors = errors or 'strict' if _is_binary_mode(path_or_buf, mode) and 'b' not in mode: mode += 'b' codecs.lookup(encoding) if isinstance(errors, str): codecs.lookup_error(errors) ioargs = _get_filepath_or_buffer(path_or_buf, encoding=encoding, compression=compression, mode=mode, storage_options=storage_options) handle = ioargs.filepath_or_buffer handles: list[BaseBuffer] (handle, memory_map, handles) = _maybe_memory_map(handle, memory_map) is_path = isinstance(handle, str) compression_args = dict(ioargs.compression) compression = compression_args.pop('method') if 'r' not in mode and is_path: check_parent_directory(str(handle)) if compression: if compression != 'zstd': ioargs.mode = ioargs.mode.replace('t', '') elif compression == 'zstd' and 'b' not in ioargs.mode: ioargs.mode += 'b' if compression == 'gzip': if isinstance(handle, str): handle = gzip.GzipFile(filename=handle, mode=ioargs.mode, **compression_args) else: handle = gzip.GzipFile(fileobj=handle, mode=ioargs.mode, **compression_args) elif compression == 'bz2': import bz2 handle = bz2.BZ2File(handle, mode=ioargs.mode, **compression_args) elif compression == 'zip': handle = _BytesZipFile(handle, ioargs.mode, **compression_args) if handle.buffer.mode == 'r': handles.append(handle) zip_names = handle.buffer.namelist() if len(zip_names) == 1: handle = handle.buffer.open(zip_names.pop()) elif not zip_names: raise ValueError(f'Zero files found in ZIP file {path_or_buf}') else: raise ValueError(f'Multiple files found in ZIP file. Only one file per ZIP: {zip_names}') elif compression == 'tar': compression_args.setdefault('mode', ioargs.mode) if isinstance(handle, str): handle = _BytesTarFile(name=handle, **compression_args) else: handle = _BytesTarFile(fileobj=handle, **compression_args) assert isinstance(handle, _BytesTarFile) if 'r' in handle.buffer.mode: handles.append(handle) files = handle.buffer.getnames() if len(files) == 1: file = handle.buffer.extractfile(files[0]) assert file is not None handle = file elif not files: raise ValueError(f'Zero files found in TAR archive {path_or_buf}') else: raise ValueError(f'Multiple files found in TAR archive. Only one file per TAR archive: {files}') elif compression == 'xz': import lzma handle = lzma.LZMAFile(handle, ioargs.mode, **compression_args) elif compression == 'zstd': zstd = import_optional_dependency('zstandard') if 'r' in ioargs.mode: open_args = {'dctx': zstd.ZstdDecompressor(**compression_args)} else: open_args = {'cctx': zstd.ZstdCompressor(**compression_args)} handle = zstd.open(handle, mode=ioargs.mode, **open_args) else: msg = f'Unrecognized compression type: {compression}' raise ValueError(msg) assert not isinstance(handle, str) handles.append(handle) elif isinstance(handle, str): if ioargs.encoding and 'b' not in ioargs.mode: handle = open(handle, ioargs.mode, encoding=ioargs.encoding, errors=errors, newline='') else: handle = open(handle, ioargs.mode) handles.append(handle) is_wrapped = False if not is_text and ioargs.mode == 'rb' and isinstance(handle, TextIOBase): handle = _BytesIOWrapper(handle, encoding=ioargs.encoding) elif is_text and (compression or memory_map or _is_binary_mode(handle, ioargs.mode)): if not hasattr(handle, 'readable') or not hasattr(handle, 'writable') or (not hasattr(handle, 'seekable')): handle = _IOWrapper(handle) handle = TextIOWrapper(handle, encoding=ioargs.encoding, errors=errors, newline='') handles.append(handle) is_wrapped = not (isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close) if 'r' in ioargs.mode and (not hasattr(handle, 'read')): raise TypeError(f'Expected file path name or file-like object, got {type(ioargs.filepath_or_buffer)} type') handles.reverse() if ioargs.should_close: assert not isinstance(ioargs.filepath_or_buffer, str) handles.append(ioargs.filepath_or_buffer) return IOHandles(handle=handle, created_handles=handles, is_wrapped=is_wrapped, compression=ioargs.compression) class _BufferedWriter(BytesIO, ABC): buffer = BytesIO() @abstractmethod def write_to_buffer(self) -> None: ... def close(self) -> None: if self.closed: return if self.getbuffer().nbytes: self.seek(0) with self.buffer: self.write_to_buffer() else: self.buffer.close() super().close() class _BytesTarFile(_BufferedWriter): def __init__(self, name: str | None=None, mode: Literal['r', 'a', 'w', 'x']='r', fileobj: ReadBuffer[bytes] | WriteBuffer[bytes] | None=None, archive_name: str | None=None, **kwargs: Any) -> None: super().__init__() self.archive_name = archive_name self.name = name self.buffer: tarfile.TarFile = tarfile.TarFile.open(name=name, mode=self.extend_mode(mode), fileobj=fileobj, **kwargs) def extend_mode(self, mode: str) -> str: mode = mode.replace('b', '') if mode != 'w': return mode if self.name is not None: suffix = Path(self.name).suffix if suffix in ('.gz', '.xz', '.bz2'): mode = f'{mode}:{suffix[1:]}' return mode def infer_filename(self) -> str | None: if self.name is None: return None filename = Path(self.name) if filename.suffix == '.tar': return filename.with_suffix('').name elif filename.suffix in ('.tar.gz', '.tar.bz2', '.tar.xz'): return filename.with_suffix('').with_suffix('').name return filename.name def write_to_buffer(self) -> None: archive_name = self.archive_name or self.infer_filename() or 'tar' tarinfo = tarfile.TarInfo(name=archive_name) tarinfo.size = len(self.getvalue()) self.buffer.addfile(tarinfo, self) class _BytesZipFile(_BufferedWriter): def __init__(self, file: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], mode: str, archive_name: str | None=None, **kwargs: Any) -> None: super().__init__() mode = mode.replace('b', '') self.archive_name = archive_name kwargs.setdefault('compression', zipfile.ZIP_DEFLATED) self.buffer: zipfile.ZipFile = zipfile.ZipFile(file, mode, **kwargs) def infer_filename(self) -> str | None: if isinstance(self.buffer.filename, (os.PathLike, str)): filename = Path(self.buffer.filename) if filename.suffix == '.zip': return filename.with_suffix('').name return filename.name return None def write_to_buffer(self) -> None: archive_name = self.archive_name or self.infer_filename() or 'zip' self.buffer.writestr(archive_name, self.getvalue()) class _IOWrapper: def __init__(self, buffer: BaseBuffer) -> None: self.buffer = buffer def __getattr__(self, name: str) -> Any: return getattr(self.buffer, name) def readable(self) -> bool: if hasattr(self.buffer, 'readable'): return self.buffer.readable() return True def seekable(self) -> bool: if hasattr(self.buffer, 'seekable'): return self.buffer.seekable() return True def writable(self) -> bool: if hasattr(self.buffer, 'writable'): return self.buffer.writable() return True class _BytesIOWrapper: def __init__(self, buffer: StringIO | TextIOBase, encoding: str='utf-8') -> None: self.buffer = buffer self.encoding = encoding self.overflow = b'' def __getattr__(self, attr: str) -> Any: return getattr(self.buffer, attr) def read(self, n: int | None=-1) -> bytes: assert self.buffer is not None bytestring = self.buffer.read(n).encode(self.encoding) combined_bytestring = self.overflow + bytestring if n is None or n < 0 or n >= len(combined_bytestring): self.overflow = b'' return combined_bytestring else: to_return = combined_bytestring[:n] self.overflow = combined_bytestring[n:] return to_return def _maybe_memory_map(handle: str | BaseBuffer, memory_map: bool) -> tuple[str | BaseBuffer, bool, list[BaseBuffer]]: handles: list[BaseBuffer] = [] memory_map &= hasattr(handle, 'fileno') or isinstance(handle, str) if not memory_map: return (handle, memory_map, handles) handle = cast(ReadCsvBuffer, handle) if isinstance(handle, str): handle = open(handle, 'rb') handles.append(handle) try: wrapped = _IOWrapper(mmap.mmap(handle.fileno(), 0, access=mmap.ACCESS_READ)) finally: for handle in reversed(handles): handle.close() return (wrapped, memory_map, [wrapped]) def file_exists(filepath_or_buffer: FilePath | BaseBuffer) -> bool: exists = False filepath_or_buffer = stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): return exists try: exists = os.path.exists(filepath_or_buffer) except (TypeError, ValueError): pass return exists def _is_binary_mode(handle: FilePath | BaseBuffer, mode: str) -> bool: if 't' in mode or 'b' in mode: return 'b' in mode text_classes = (codecs.StreamWriter, codecs.StreamReader, codecs.StreamReaderWriter) if issubclass(type(handle), text_classes): return False return isinstance(handle, _get_binary_io_classes()) or 'b' in getattr(handle, 'mode', mode) @functools.lru_cache def _get_binary_io_classes() -> tuple[type, ...]: binary_classes: tuple[type, ...] = (BufferedIOBase, RawIOBase) zstd = import_optional_dependency('zstandard', errors='ignore') if zstd is not None: with zstd.ZstdDecompressor().stream_reader(b'') as reader: binary_classes += (type(reader),) return binary_classes def is_potential_multi_index(columns: Sequence[Hashable] | MultiIndex, index_col: bool | Sequence[int] | None=None) -> bool: if index_col is None or isinstance(index_col, bool): index_columns = set() else: index_columns = set(index_col) return bool(len(columns) and (not isinstance(columns, ABCMultiIndex)) and all((isinstance(c, tuple) for c in columns if c not in index_columns))) def dedup_names(names: Sequence[Hashable], is_potential_multiindex: bool) -> Sequence[Hashable]: names = list(names) counts: DefaultDict[Hashable, int] = defaultdict(int) for (i, col) in enumerate(names): cur_count = counts[col] while cur_count > 0: counts[col] = cur_count + 1 if is_potential_multiindex: assert isinstance(col, tuple) col = col[:-1] + (f'{col[-1]}.{cur_count}',) else: col = f'{col}.{cur_count}' cur_count = counts[col] names[i] = col counts[col] = cur_count + 1 return names # File: pandas-main/pandas/io/excel/__init__.py from pandas.io.excel._base import ExcelFile, ExcelWriter, read_excel from pandas.io.excel._odswriter import ODSWriter as _ODSWriter from pandas.io.excel._openpyxl import OpenpyxlWriter as _OpenpyxlWriter from pandas.io.excel._util import register_writer from pandas.io.excel._xlsxwriter import XlsxWriter as _XlsxWriter __all__ = ['read_excel', 'ExcelWriter', 'ExcelFile'] register_writer(_OpenpyxlWriter) register_writer(_XlsxWriter) register_writer(_ODSWriter) # File: pandas-main/pandas/io/excel/_base.py from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence import datetime from functools import partial import os from textwrap import fill from typing import IO, TYPE_CHECKING, Any, Generic, Literal, TypeVar, Union, cast, overload import warnings import zipfile from pandas._config import config from pandas._libs import lib from pandas._libs.parsers import STR_NA_VALUES from pandas.compat._optional import get_version, import_optional_dependency from pandas.errors import EmptyDataError from pandas.util._decorators import Appender, doc from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import is_bool, is_file_like, is_float, is_integer, is_list_like from pandas.core.frame import DataFrame from pandas.core.shared_docs import _shared_docs from pandas.util.version import Version from pandas.io.common import IOHandles, get_handle, stringify_path, validate_header_arg from pandas.io.excel._util import fill_mi_header, get_default_engine, get_writer, maybe_convert_usecols, pop_header_name from pandas.io.parsers import TextParser from pandas.io.parsers.readers import validate_integer if TYPE_CHECKING: from types import TracebackType from pandas._typing import DtypeArg, DtypeBackend, ExcelWriterIfSheetExists, FilePath, HashableT, IntStrT, ReadBuffer, Self, SequenceNotStr, StorageOptions, WriteExcelBuffer _read_excel_doc = '\nRead an Excel file into a ``pandas`` ``DataFrame``.\n\nSupports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions\nread from a local filesystem or URL. Supports an option to read\na single sheet or a list of sheets.\n\nParameters\n----------\nio : str, ExcelFile, xlrd.Book, path object, or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: ``file://localhost/path/to/table.xlsx``.\n\n If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method,\n such as a file handle (e.g. via builtin ``open`` function)\n or ``StringIO``.\n\n .. deprecated:: 2.1.0\n Passing byte strings is deprecated. To read from a\n byte string, wrap it in a ``BytesIO`` object.\nsheet_name : str, int, list, or None, default 0\n Strings are used for sheet names. Integers are used in zero-indexed\n sheet positions (chart sheets do not count as a sheet position).\n Lists of strings/integers are used to request multiple sheets.\n Specify ``None`` to get all worksheets.\n\n Available cases:\n\n * Defaults to ``0``: 1st sheet as a `DataFrame`\n * ``1``: 2nd sheet as a `DataFrame`\n * ``"Sheet1"``: Load sheet with name "Sheet1"\n * ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"\n as a dict of `DataFrame`\n * ``None``: All worksheets.\n\nheader : int, list of int, default 0\n Row (0-indexed) to use for the column labels of the parsed\n DataFrame. If a list of integers is passed those row positions will\n be combined into a ``MultiIndex``. Use None if there is no header.\nnames : array-like, default None\n List of column names to use. If file contains no header row,\n then you should explicitly pass header=None.\nindex_col : int, str, list of int, default None\n Column (0-indexed) to use as the row labels of the DataFrame.\n Pass None if there is no such column. If a list is passed,\n those columns will be combined into a ``MultiIndex``. If a\n subset of data is selected with ``usecols``, index_col\n is based on the subset.\n\n Missing values will be forward filled to allow roundtripping with\n ``to_excel`` for ``merged_cells=True``. To avoid forward filling the\n missing values use ``set_index`` after reading the data instead of\n ``index_col``.\nusecols : str, list-like, or callable, default None\n * If None, then parse all columns.\n * If str, then indicates comma separated list of Excel column letters\n and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of\n both sides.\n * If list of int, then indicates list of column numbers to be parsed\n (0-indexed).\n * If list of string, then indicates list of column names to be parsed.\n * If callable, then evaluate each column name against it and parse the\n column if the callable returns ``True``.\n\n Returns a subset of the columns according to behavior above.\ndtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {{\'a\': np.float64, \'b\': np.int32}}\n Use ``object`` to preserve data as stored in Excel and not interpret dtype,\n which will necessarily result in ``object`` dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n If you use ``None``, it will infer the dtype of each column based on the data.\nengine : {{\'openpyxl\', \'calamine\', \'odf\', \'pyxlsb\', \'xlrd\'}}, default None\n If io is not a buffer or path, this must be set to identify io.\n Engine compatibility :\n\n - ``openpyxl`` supports newer Excel file formats.\n - ``calamine`` supports Excel (.xls, .xlsx, .xlsm, .xlsb)\n and OpenDocument (.ods) file formats.\n - ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).\n - ``pyxlsb`` supports Binary Excel files.\n - ``xlrd`` supports old-style Excel files (.xls).\n\n When ``engine=None``, the following logic will be used to determine the engine:\n\n - If ``path_or_buffer`` is an OpenDocument format (.odf, .ods, .odt),\n then `odf `_ will be used.\n - Otherwise if ``path_or_buffer`` is an xls format, ``xlrd`` will be used.\n - Otherwise if ``path_or_buffer`` is in xlsb format, ``pyxlsb`` will be used.\n - Otherwise ``openpyxl`` will be used.\nconverters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the Excel cell content, and return the transformed\n content.\ntrue_values : list, default None\n Values to consider as True.\nfalse_values : list, default None\n Values to consider as False.\nskiprows : list-like, int, or callable, optional\n Line numbers to skip (0-indexed) or number of lines to skip (int) at the\n start of the file. If callable, the callable function will be evaluated\n against the row indices, returning True if the row should be skipped and\n False otherwise. An example of a valid callable argument would be ``lambda\n x: x in [0, 2]``.\nnrows : int, default None\n Number of rows to parse.\nna_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted\n as NaN: \'' + fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=' ') + '\'.\nkeep_default_na : bool, default True\n Whether or not to include the default NaN values when parsing the data.\n Depending on whether ``na_values`` is passed in, the behavior is as follows:\n\n * If ``keep_default_na`` is True, and ``na_values`` are specified,\n ``na_values`` is appended to the default NaN values used for parsing.\n * If ``keep_default_na`` is True, and ``na_values`` are not specified, only\n the default NaN values are used for parsing.\n * If ``keep_default_na`` is False, and ``na_values`` are specified, only\n the NaN values specified ``na_values`` are used for parsing.\n * If ``keep_default_na`` is False, and ``na_values`` are not specified, no\n strings will be parsed as NaN.\n\n Note that if `na_filter` is passed in as False, the ``keep_default_na`` and\n ``na_values`` parameters will be ignored.\nna_filter : bool, default True\n Detect missing value markers (empty strings and the value of na_values). In\n data without any NAs, passing ``na_filter=False`` can improve the\n performance of reading a large file.\nverbose : bool, default False\n Indicate number of NA values placed in non-numeric columns.\nparse_dates : bool, list-like, or dict, default False\n The behavior is as follows:\n\n * ``bool``. If True -> try parsing the index.\n * ``list`` of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * ``list`` of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * ``dict``, e.g. {{\'foo\' : [1, 3]}} -> parse columns 1, 3 as date and call\n result \'foo\'\n\n If a column or index contains an unparsable date, the entire column or\n index will be returned unaltered as an object data type. If you don`t want to\n parse some cells as date just change their type in Excel to "Text".\n For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.\n\n Note: A fast-path exists for iso8601-formatted dates.\ndate_format : str or dict of column -> format, default ``None``\n If used in conjunction with ``parse_dates``, will parse dates according to this\n format. For anything more complex,\n please read in as ``object`` and then apply :func:`to_datetime` as-needed.\n\n .. versionadded:: 2.0.0\nthousands : str, default None\n Thousands separator for parsing string columns to numeric. Note that\n this parameter is only necessary for columns stored as TEXT in Excel,\n any numeric columns will automatically be parsed, regardless of display\n format.\ndecimal : str, default \'.\'\n Character to recognize as decimal point for parsing string columns to numeric.\n Note that this parameter is only necessary for columns stored as TEXT in Excel,\n any numeric columns will automatically be parsed, regardless of display\n format.(e.g. use \',\' for European data).\n\n .. versionadded:: 1.4.0\n\ncomment : str, default None\n Comments out remainder of line. Pass a character or characters to this\n argument to indicate comments in the input file. Any data between the\n comment string and the end of the current line is ignored.\nskipfooter : int, default 0\n Rows at the end to skip (0-indexed).\n{storage_options}\n\ndtype_backend : {{\'numpy_nullable\', \'pyarrow\'}}\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). If not specified, the default behavior\n is to not use nullable data types. If specified, the behavior\n is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n * ``"pyarrow"``: returns pyarrow-backed nullable\n :class:`ArrowDtype` :class:`DataFrame`\n\n .. versionadded:: 2.0\n\nengine_kwargs : dict, optional\n Arbitrary keyword arguments passed to excel engine.\n\nReturns\n-------\nDataFrame or dict of DataFrames\n DataFrame from the passed in Excel file. See notes in sheet_name\n argument for more information on when a dict of DataFrames is returned.\n\nSee Also\n--------\nDataFrame.to_excel : Write DataFrame to an Excel file.\nDataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\nread_csv : Read a comma-separated values (csv) file into DataFrame.\nread_fwf : Read a table of fixed-width formatted lines into DataFrame.\n\nNotes\n-----\nFor specific information on the methods used for each Excel engine, refer to the pandas\n:ref:`user guide `\n\nExamples\n--------\nThe file can be read using the file name as string or an open file object:\n\n>>> pd.read_excel(\'tmp.xlsx\', index_col=0) # doctest: +SKIP\n Name Value\n0 string1 1\n1 string2 2\n2 #Comment 3\n\n>>> pd.read_excel(open(\'tmp.xlsx\', \'rb\'),\n... sheet_name=\'Sheet3\') # doctest: +SKIP\n Unnamed: 0 Name Value\n0 0 string1 1\n1 1 string2 2\n2 2 #Comment 3\n\nIndex and header can be specified via the `index_col` and `header` arguments\n\n>>> pd.read_excel(\'tmp.xlsx\', index_col=None, header=None) # doctest: +SKIP\n 0 1 2\n0 NaN Name Value\n1 0.0 string1 1\n2 1.0 string2 2\n3 2.0 #Comment 3\n\nColumn types are inferred but can be explicitly specified\n\n>>> pd.read_excel(\'tmp.xlsx\', index_col=0,\n... dtype={{\'Name\': str, \'Value\': float}}) # doctest: +SKIP\n Name Value\n0 string1 1.0\n1 string2 2.0\n2 #Comment 3.0\n\nTrue, False, and NA values, and thousands separators have defaults,\nbut can be explicitly specified, too. Supply the values you would like\nas strings or lists of strings!\n\n>>> pd.read_excel(\'tmp.xlsx\', index_col=0,\n... na_values=[\'string1\', \'string2\']) # doctest: +SKIP\n Name Value\n0 NaN 1\n1 NaN 2\n2 #Comment 3\n\nComment lines in the excel input file can be skipped using the\n``comment`` kwarg.\n\n>>> pd.read_excel(\'tmp.xlsx\', index_col=0, comment=\'#\') # doctest: +SKIP\n Name Value\n0 string1 1.0\n1 string2 2.0\n2 None NaN\n' @overload def read_excel(io, sheet_name: str | int=..., *, header: int | Sequence[int] | None=..., names: SequenceNotStr[Hashable] | range | None=..., index_col: int | str | Sequence[int] | None=..., usecols: int | str | Sequence[int] | Sequence[str] | Callable[[HashableT], bool] | None=..., dtype: DtypeArg | None=..., engine: Literal['xlrd', 'openpyxl', 'odf', 'pyxlsb', 'calamine'] | None=..., converters: dict[str, Callable] | dict[int, Callable] | None=..., true_values: Iterable[Hashable] | None=..., false_values: Iterable[Hashable] | None=..., skiprows: Sequence[int] | int | Callable[[int], object] | None=..., nrows: int | None=..., na_values=..., keep_default_na: bool=..., na_filter: bool=..., verbose: bool=..., parse_dates: list | dict | bool=..., date_format: dict[Hashable, str] | str | None=..., thousands: str | None=..., decimal: str=..., comment: str | None=..., skipfooter: int=..., storage_options: StorageOptions=..., dtype_backend: DtypeBackend | lib.NoDefault=...) -> DataFrame: ... @overload def read_excel(io, sheet_name: list[IntStrT] | None, *, header: int | Sequence[int] | None=..., names: SequenceNotStr[Hashable] | range | None=..., index_col: int | str | Sequence[int] | None=..., usecols: int | str | Sequence[int] | Sequence[str] | Callable[[HashableT], bool] | None=..., dtype: DtypeArg | None=..., engine: Literal['xlrd', 'openpyxl', 'odf', 'pyxlsb', 'calamine'] | None=..., converters: dict[str, Callable] | dict[int, Callable] | None=..., true_values: Iterable[Hashable] | None=..., false_values: Iterable[Hashable] | None=..., skiprows: Sequence[int] | int | Callable[[int], object] | None=..., nrows: int | None=..., na_values=..., keep_default_na: bool=..., na_filter: bool=..., verbose: bool=..., parse_dates: list | dict | bool=..., date_format: dict[Hashable, str] | str | None=..., thousands: str | None=..., decimal: str=..., comment: str | None=..., skipfooter: int=..., storage_options: StorageOptions=..., dtype_backend: DtypeBackend | lib.NoDefault=...) -> dict[IntStrT, DataFrame]: ... @doc(storage_options=_shared_docs['storage_options']) @Appender(_read_excel_doc) def read_excel(io, sheet_name: str | int | list[IntStrT] | None=0, *, header: int | Sequence[int] | None=0, names: SequenceNotStr[Hashable] | range | None=None, index_col: int | str | Sequence[int] | None=None, usecols: int | str | Sequence[int] | Sequence[str] | Callable[[HashableT], bool] | None=None, dtype: DtypeArg | None=None, engine: Literal['xlrd', 'openpyxl', 'odf', 'pyxlsb', 'calamine'] | None=None, converters: dict[str, Callable] | dict[int, Callable] | None=None, true_values: Iterable[Hashable] | None=None, false_values: Iterable[Hashable] | None=None, skiprows: Sequence[int] | int | Callable[[int], object] | None=None, nrows: int | None=None, na_values=None, keep_default_na: bool=True, na_filter: bool=True, verbose: bool=False, parse_dates: list | dict | bool=False, date_format: dict[Hashable, str] | str | None=None, thousands: str | None=None, decimal: str='.', comment: str | None=None, skipfooter: int=0, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, engine_kwargs: dict | None=None) -> DataFrame | dict[IntStrT, DataFrame]: check_dtype_backend(dtype_backend) should_close = False if engine_kwargs is None: engine_kwargs = {} if not isinstance(io, ExcelFile): should_close = True io = ExcelFile(io, storage_options=storage_options, engine=engine, engine_kwargs=engine_kwargs) elif engine and engine != io.engine: raise ValueError('Engine should not be specified when passing an ExcelFile - ExcelFile already has the engine set') try: data = io.parse(sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=usecols, dtype=dtype, converters=converters, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, keep_default_na=keep_default_na, na_filter=na_filter, verbose=verbose, parse_dates=parse_dates, date_format=date_format, thousands=thousands, decimal=decimal, comment=comment, skipfooter=skipfooter, dtype_backend=dtype_backend) finally: if should_close: io.close() return data _WorkbookT = TypeVar('_WorkbookT') class BaseExcelReader(Generic[_WorkbookT]): book: _WorkbookT def __init__(self, filepath_or_buffer, storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: if engine_kwargs is None: engine_kwargs = {} self.handles = IOHandles(handle=filepath_or_buffer, compression={'method': None}) if not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)): self.handles = get_handle(filepath_or_buffer, 'rb', storage_options=storage_options, is_text=False) if isinstance(self.handles.handle, self._workbook_class): self.book = self.handles.handle elif hasattr(self.handles.handle, 'read'): self.handles.handle.seek(0) try: self.book = self.load_workbook(self.handles.handle, engine_kwargs) except Exception: self.close() raise else: raise ValueError('Must explicitly set engine if not passing in buffer or path for io.') @property def _workbook_class(self) -> type[_WorkbookT]: raise NotImplementedError def load_workbook(self, filepath_or_buffer, engine_kwargs) -> _WorkbookT: raise NotImplementedError def close(self) -> None: if hasattr(self, 'book'): if hasattr(self.book, 'close'): self.book.close() elif hasattr(self.book, 'release_resources'): self.book.release_resources() self.handles.close() @property def sheet_names(self) -> list[str]: raise NotImplementedError def get_sheet_by_name(self, name: str): raise NotImplementedError def get_sheet_by_index(self, index: int): raise NotImplementedError def get_sheet_data(self, sheet, rows: int | None=None): raise NotImplementedError def raise_if_bad_sheet_by_index(self, index: int) -> None: n_sheets = len(self.sheet_names) if index >= n_sheets: raise ValueError(f'Worksheet index {index} is invalid, {n_sheets} worksheets found') def raise_if_bad_sheet_by_name(self, name: str) -> None: if name not in self.sheet_names: raise ValueError(f"Worksheet named '{name}' not found") def _check_skiprows_func(self, skiprows: Callable, rows_to_use: int) -> int: i = 0 rows_used_so_far = 0 while rows_used_so_far < rows_to_use: if not skiprows(i): rows_used_so_far += 1 i += 1 return i def _calc_rows(self, header: int | Sequence[int] | None, index_col: int | Sequence[int] | None, skiprows: Sequence[int] | int | Callable[[int], object] | None, nrows: int | None) -> int | None: if nrows is None: return None if header is None: header_rows = 1 elif is_integer(header): header = cast(int, header) header_rows = 1 + header else: header = cast(Sequence, header) header_rows = 1 + header[-1] if is_list_like(header) and index_col is not None: header = cast(Sequence, header) if len(header) > 1: header_rows += 1 if skiprows is None: return header_rows + nrows if is_integer(skiprows): skiprows = cast(int, skiprows) return header_rows + nrows + skiprows if is_list_like(skiprows): def f(skiprows: Sequence, x: int) -> bool: return x in skiprows skiprows = cast(Sequence, skiprows) return self._check_skiprows_func(partial(f, skiprows), header_rows + nrows) if callable(skiprows): return self._check_skiprows_func(skiprows, header_rows + nrows) return None def parse(self, sheet_name: str | int | list[int] | list[str] | None=0, header: int | Sequence[int] | None=0, names: SequenceNotStr[Hashable] | range | None=None, index_col: int | Sequence[int] | None=None, usecols=None, dtype: DtypeArg | None=None, true_values: Iterable[Hashable] | None=None, false_values: Iterable[Hashable] | None=None, skiprows: Sequence[int] | int | Callable[[int], object] | None=None, nrows: int | None=None, na_values=None, verbose: bool=False, parse_dates: list | dict | bool=False, date_format: dict[Hashable, str] | str | None=None, thousands: str | None=None, decimal: str='.', comment: str | None=None, skipfooter: int=0, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, **kwds): validate_header_arg(header) validate_integer('nrows', nrows) ret_dict = False sheets: list[int] | list[str] if isinstance(sheet_name, list): sheets = sheet_name ret_dict = True elif sheet_name is None: sheets = self.sheet_names ret_dict = True elif isinstance(sheet_name, str): sheets = [sheet_name] else: sheets = [sheet_name] sheets = cast(Union[list[int], list[str]], list(dict.fromkeys(sheets).keys())) output = {} last_sheetname = None for asheetname in sheets: last_sheetname = asheetname if verbose: print(f'Reading sheet {asheetname}') if isinstance(asheetname, str): sheet = self.get_sheet_by_name(asheetname) else: sheet = self.get_sheet_by_index(asheetname) file_rows_needed = self._calc_rows(header, index_col, skiprows, nrows) data = self.get_sheet_data(sheet, file_rows_needed) if hasattr(sheet, 'close'): sheet.close() usecols = maybe_convert_usecols(usecols) if not data: output[asheetname] = DataFrame() continue output = self._parse_sheet(data=data, output=output, asheetname=asheetname, header=header, names=names, index_col=index_col, usecols=usecols, dtype=dtype, skiprows=skiprows, nrows=nrows, true_values=true_values, false_values=false_values, na_values=na_values, parse_dates=parse_dates, date_format=date_format, thousands=thousands, decimal=decimal, comment=comment, skipfooter=skipfooter, dtype_backend=dtype_backend, **kwds) if last_sheetname is None: raise ValueError('Sheet name is an empty list') if ret_dict: return output else: return output[last_sheetname] def _parse_sheet(self, data: list, output: dict, asheetname: str | int | None=None, header: int | Sequence[int] | None=0, names: SequenceNotStr[Hashable] | range | None=None, index_col: int | Sequence[int] | None=None, usecols=None, dtype: DtypeArg | None=None, skiprows: Sequence[int] | int | Callable[[int], object] | None=None, nrows: int | None=None, true_values: Iterable[Hashable] | None=None, false_values: Iterable[Hashable] | None=None, na_values=None, parse_dates: list | dict | bool=False, date_format: dict[Hashable, str] | str | None=None, thousands: str | None=None, decimal: str='.', comment: str | None=None, skipfooter: int=0, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, **kwds): is_list_header = False is_len_one_list_header = False if is_list_like(header): assert isinstance(header, Sequence) is_list_header = True if len(header) == 1: is_len_one_list_header = True if is_len_one_list_header: header = cast(Sequence[int], header)[0] header_names = None if header is not None and is_list_like(header): assert isinstance(header, Sequence) header_names = [] control_row = [True] * len(data[0]) for row in header: if is_integer(skiprows): assert isinstance(skiprows, int) row += skiprows if row > len(data) - 1: raise ValueError(f'header index {row} exceeds maximum index {len(data) - 1} of data.') (data[row], control_row) = fill_mi_header(data[row], control_row) if index_col is not None: (header_name, _) = pop_header_name(data[row], index_col) header_names.append(header_name) has_index_names = False if is_list_header and (not is_len_one_list_header) and (index_col is not None): index_col_set: set[int] if isinstance(index_col, int): index_col_set = {index_col} else: assert isinstance(index_col, Sequence) index_col_set = set(index_col) assert isinstance(header, Sequence) if len(header) < len(data): potential_index_names = data[len(header)] has_index_names = all((x == '' or x is None for (i, x) in enumerate(potential_index_names) if not control_row[i] and i not in index_col_set)) if is_list_like(index_col): if header is None: offset = 0 elif isinstance(header, int): offset = 1 + header else: offset = 1 + max(header) if has_index_names: offset += 1 if offset < len(data): assert isinstance(index_col, Sequence) for col in index_col: last = data[offset][col] for row in range(offset + 1, len(data)): if data[row][col] == '' or data[row][col] is None: data[row][col] = last else: last = data[row][col] try: parser = TextParser(data, names=names, header=header, index_col=index_col, has_index_names=has_index_names, dtype=dtype, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, skip_blank_lines=False, parse_dates=parse_dates, date_format=date_format, thousands=thousands, decimal=decimal, comment=comment, skipfooter=skipfooter, usecols=usecols, dtype_backend=dtype_backend, **kwds) output[asheetname] = parser.read(nrows=nrows) if header_names: output[asheetname].columns = output[asheetname].columns.set_names(header_names) except EmptyDataError: output[asheetname] = DataFrame() except Exception as err: err.args = (f'{err.args[0]} (sheet: {asheetname})', *err.args[1:]) raise err return output @doc(storage_options=_shared_docs['storage_options']) class ExcelWriter(Generic[_WorkbookT]): _engine: str _supported_extensions: tuple[str, ...] def __new__(cls, path: FilePath | WriteExcelBuffer | ExcelWriter, engine: str | None=None, date_format: str | None=None, datetime_format: str | None=None, mode: str='w', storage_options: StorageOptions | None=None, if_sheet_exists: ExcelWriterIfSheetExists | None=None, engine_kwargs: dict | None=None) -> Self: if cls is ExcelWriter: if engine is None or (isinstance(engine, str) and engine == 'auto'): if isinstance(path, str): ext = os.path.splitext(path)[-1][1:] else: ext = 'xlsx' try: engine = config.get_option(f'io.excel.{ext}.writer') if engine == 'auto': engine = get_default_engine(ext, mode='writer') except KeyError as err: raise ValueError(f"No engine for filetype: '{ext}'") from err assert engine is not None cls = get_writer(engine) return object.__new__(cls) _path = None @property def supported_extensions(self) -> tuple[str, ...]: return self._supported_extensions @property def engine(self) -> str: return self._engine @property def sheets(self) -> dict[str, Any]: raise NotImplementedError @property def book(self) -> _WorkbookT: raise NotImplementedError def _write_cells(self, cells, sheet_name: str | None=None, startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None) -> None: raise NotImplementedError def _save(self) -> None: raise NotImplementedError def __init__(self, path: FilePath | WriteExcelBuffer | ExcelWriter, engine: str | None=None, date_format: str | None=None, datetime_format: str | None=None, mode: str='w', storage_options: StorageOptions | None=None, if_sheet_exists: ExcelWriterIfSheetExists | None=None, engine_kwargs: dict[str, Any] | None=None) -> None: if isinstance(path, str): ext = os.path.splitext(path)[-1] self.check_extension(ext) if 'b' not in mode: mode += 'b' mode = mode.replace('a', 'r+') if if_sheet_exists not in (None, 'error', 'new', 'replace', 'overlay'): raise ValueError(f"'{if_sheet_exists}' is not valid for if_sheet_exists. Valid options are 'error', 'new', 'replace' and 'overlay'.") if if_sheet_exists and 'r+' not in mode: raise ValueError("if_sheet_exists is only valid in append mode (mode='a')") if if_sheet_exists is None: if_sheet_exists = 'error' self._if_sheet_exists = if_sheet_exists self._handles = IOHandles(cast(IO[bytes], path), compression={'compression': None}) if not isinstance(path, ExcelWriter): self._handles = get_handle(path, mode, storage_options=storage_options, is_text=False) self._cur_sheet = None if date_format is None: self._date_format = 'YYYY-MM-DD' else: self._date_format = date_format if datetime_format is None: self._datetime_format = 'YYYY-MM-DD HH:MM:SS' else: self._datetime_format = datetime_format self._mode = mode @property def date_format(self) -> str: return self._date_format @property def datetime_format(self) -> str: return self._datetime_format @property def if_sheet_exists(self) -> str: return self._if_sheet_exists def __fspath__(self) -> str: return getattr(self._handles.handle, 'name', '') def _get_sheet_name(self, sheet_name: str | None) -> str: if sheet_name is None: sheet_name = self._cur_sheet if sheet_name is None: raise ValueError('Must pass explicit sheet_name or set _cur_sheet property') return sheet_name def _value_with_fmt(self, val) -> tuple[int | float | bool | str | datetime.datetime | datetime.date, str | None]: fmt = None if is_integer(val): val = int(val) elif is_float(val): val = float(val) elif is_bool(val): val = bool(val) elif isinstance(val, datetime.datetime): fmt = self._datetime_format elif isinstance(val, datetime.date): fmt = self._date_format elif isinstance(val, datetime.timedelta): val = val.total_seconds() / 86400 fmt = '0' else: val = str(val) if len(val) > 32767: warnings.warn(f'Cell contents too long ({len(val)}), truncated to 32767 characters', UserWarning, stacklevel=find_stack_level()) return (val, fmt) @classmethod def check_extension(cls, ext: str) -> Literal[True]: if ext.startswith('.'): ext = ext[1:] if not any((ext in extension for extension in cls._supported_extensions)): raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'") return True def __enter__(self) -> Self: return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: self.close() def close(self) -> None: self._save() self._handles.close() XLS_SIGNATURES = (b'\t\x00\x04\x00\x07\x00\x10\x00', b'\t\x02\x06\x00\x00\x00\x10\x00', b'\t\x04\x06\x00\x00\x00\x10\x00', b'\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1') ZIP_SIGNATURE = b'PK\x03\x04' PEEK_SIZE = max(map(len, XLS_SIGNATURES + (ZIP_SIGNATURE,))) @doc(storage_options=_shared_docs['storage_options']) def inspect_excel_format(content_or_path: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None) -> str | None: with get_handle(content_or_path, 'rb', storage_options=storage_options, is_text=False) as handle: stream = handle.handle stream.seek(0) buf = stream.read(PEEK_SIZE) if buf is None: raise ValueError('stream is empty') assert isinstance(buf, bytes) peek = buf stream.seek(0) if any((peek.startswith(sig) for sig in XLS_SIGNATURES)): return 'xls' elif not peek.startswith(ZIP_SIGNATURE): return None with zipfile.ZipFile(stream) as zf: component_names = {name.replace('\\', '/').lower() for name in zf.namelist()} if 'xl/workbook.xml' in component_names: return 'xlsx' if 'xl/workbook.bin' in component_names: return 'xlsb' if 'content.xml' in component_names: return 'ods' return 'zip' @doc(storage_options=_shared_docs['storage_options']) class ExcelFile: from pandas.io.excel._calamine import CalamineReader from pandas.io.excel._odfreader import ODFReader from pandas.io.excel._openpyxl import OpenpyxlReader from pandas.io.excel._pyxlsb import PyxlsbReader from pandas.io.excel._xlrd import XlrdReader _engines: Mapping[str, Any] = {'xlrd': XlrdReader, 'openpyxl': OpenpyxlReader, 'odf': ODFReader, 'pyxlsb': PyxlsbReader, 'calamine': CalamineReader} def __init__(self, path_or_buffer, engine: str | None=None, storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: if engine_kwargs is None: engine_kwargs = {} if engine is not None and engine not in self._engines: raise ValueError(f'Unknown engine: {engine}') self._io = stringify_path(path_or_buffer) if engine is None: ext: str | None = None if not isinstance(path_or_buffer, (str, os.PathLike, ExcelFile)) and (not is_file_like(path_or_buffer)): if import_optional_dependency('xlrd', errors='ignore') is None: xlrd_version = None else: import xlrd xlrd_version = Version(get_version(xlrd)) if xlrd_version is not None and isinstance(path_or_buffer, xlrd.Book): ext = 'xls' if ext is None: ext = inspect_excel_format(content_or_path=path_or_buffer, storage_options=storage_options) if ext is None: raise ValueError('Excel file format cannot be determined, you must specify an engine manually.') engine = config.get_option(f'io.excel.{ext}.reader') if engine == 'auto': engine = get_default_engine(ext, mode='reader') assert engine is not None self.engine = engine self.storage_options = storage_options self._reader = self._engines[engine](self._io, storage_options=storage_options, engine_kwargs=engine_kwargs) def __fspath__(self): return self._io def parse(self, sheet_name: str | int | list[int] | list[str] | None=0, header: int | Sequence[int] | None=0, names: SequenceNotStr[Hashable] | range | None=None, index_col: int | Sequence[int] | None=None, usecols=None, converters=None, true_values: Iterable[Hashable] | None=None, false_values: Iterable[Hashable] | None=None, skiprows: Sequence[int] | int | Callable[[int], object] | None=None, nrows: int | None=None, na_values=None, parse_dates: list | dict | bool=False, date_format: str | dict[Hashable, str] | None=None, thousands: str | None=None, comment: str | None=None, skipfooter: int=0, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, **kwds) -> DataFrame | dict[str, DataFrame] | dict[int, DataFrame]: return self._reader.parse(sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=usecols, converters=converters, true_values=true_values, false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, parse_dates=parse_dates, date_format=date_format, thousands=thousands, comment=comment, skipfooter=skipfooter, dtype_backend=dtype_backend, **kwds) @property def book(self): return self._reader.book @property def sheet_names(self): return self._reader.sheet_names def close(self) -> None: self._reader.close() def __enter__(self) -> Self: return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: self.close() # File: pandas-main/pandas/io/excel/_calamine.py from __future__ import annotations from datetime import date, datetime, time, timedelta from typing import TYPE_CHECKING, Any, Union from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc import pandas as pd from pandas.core.shared_docs import _shared_docs from pandas.io.excel._base import BaseExcelReader if TYPE_CHECKING: from python_calamine import CalamineSheet, CalamineWorkbook from pandas._typing import FilePath, NaTType, ReadBuffer, Scalar, StorageOptions _CellValue = Union[int, float, str, bool, time, date, datetime, timedelta] class CalamineReader(BaseExcelReader['CalamineWorkbook']): @doc(storage_options=_shared_docs['storage_options']) def __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: import_optional_dependency('python_calamine') super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs) @property def _workbook_class(self) -> type[CalamineWorkbook]: from python_calamine import CalamineWorkbook return CalamineWorkbook def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs: Any) -> CalamineWorkbook: from python_calamine import load_workbook return load_workbook(filepath_or_buffer, **engine_kwargs) @property def sheet_names(self) -> list[str]: from python_calamine import SheetTypeEnum return [sheet.name for sheet in self.book.sheets_metadata if sheet.typ == SheetTypeEnum.WorkSheet] def get_sheet_by_name(self, name: str) -> CalamineSheet: self.raise_if_bad_sheet_by_name(name) return self.book.get_sheet_by_name(name) def get_sheet_by_index(self, index: int) -> CalamineSheet: self.raise_if_bad_sheet_by_index(index) return self.book.get_sheet_by_index(index) def get_sheet_data(self, sheet: CalamineSheet, file_rows_needed: int | None=None) -> list[list[Scalar | NaTType | time]]: def _convert_cell(value: _CellValue) -> Scalar | NaTType | time: if isinstance(value, float): val = int(value) if val == value: return val else: return value elif isinstance(value, date): return pd.Timestamp(value) elif isinstance(value, timedelta): return pd.Timedelta(value) elif isinstance(value, time): return value return value rows: list[list[_CellValue]] = sheet.to_python(skip_empty_area=False, nrows=file_rows_needed) data = [[_convert_cell(cell) for cell in row] for row in rows] return data # File: pandas-main/pandas/io/excel/_odfreader.py from __future__ import annotations from typing import TYPE_CHECKING, cast import numpy as np from pandas._typing import FilePath, ReadBuffer, Scalar, StorageOptions from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc import pandas as pd from pandas.core.shared_docs import _shared_docs from pandas.io.excel._base import BaseExcelReader if TYPE_CHECKING: from odf.opendocument import OpenDocument from pandas._libs.tslibs.nattype import NaTType @doc(storage_options=_shared_docs['storage_options']) class ODFReader(BaseExcelReader['OpenDocument']): def __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: import_optional_dependency('odf') super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs) @property def _workbook_class(self) -> type[OpenDocument]: from odf.opendocument import OpenDocument return OpenDocument def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs) -> OpenDocument: from odf.opendocument import load return load(filepath_or_buffer, **engine_kwargs) @property def empty_value(self) -> str: return '' @property def sheet_names(self) -> list[str]: from odf.table import Table tables = self.book.getElementsByType(Table) return [t.getAttribute('name') for t in tables] def get_sheet_by_index(self, index: int): from odf.table import Table self.raise_if_bad_sheet_by_index(index) tables = self.book.getElementsByType(Table) return tables[index] def get_sheet_by_name(self, name: str): from odf.table import Table self.raise_if_bad_sheet_by_name(name) tables = self.book.getElementsByType(Table) for table in tables: if table.getAttribute('name') == name: return table self.close() raise ValueError(f'sheet {name} not found') def get_sheet_data(self, sheet, file_rows_needed: int | None=None) -> list[list[Scalar | NaTType]]: from odf.table import CoveredTableCell, TableCell, TableRow covered_cell_name = CoveredTableCell().qname table_cell_name = TableCell().qname cell_names = {covered_cell_name, table_cell_name} sheet_rows = sheet.getElementsByType(TableRow) empty_rows = 0 max_row_len = 0 table: list[list[Scalar | NaTType]] = [] for sheet_row in sheet_rows: empty_cells = 0 table_row: list[Scalar | NaTType] = [] for sheet_cell in sheet_row.childNodes: if hasattr(sheet_cell, 'qname') and sheet_cell.qname in cell_names: if sheet_cell.qname == table_cell_name: value = self._get_cell_value(sheet_cell) else: value = self.empty_value column_repeat = self._get_column_repeat(sheet_cell) if value == self.empty_value: empty_cells += column_repeat else: table_row.extend([self.empty_value] * empty_cells) empty_cells = 0 table_row.extend([value] * column_repeat) if max_row_len < len(table_row): max_row_len = len(table_row) row_repeat = self._get_row_repeat(sheet_row) if len(table_row) == 0: empty_rows += row_repeat else: table.extend([[self.empty_value]] * empty_rows) empty_rows = 0 table.extend((table_row for _ in range(row_repeat))) if file_rows_needed is not None and len(table) >= file_rows_needed: break for row in table: if len(row) < max_row_len: row.extend([self.empty_value] * (max_row_len - len(row))) return table def _get_row_repeat(self, row) -> int: from odf.namespaces import TABLENS return int(row.attributes.get((TABLENS, 'number-rows-repeated'), 1)) def _get_column_repeat(self, cell) -> int: from odf.namespaces import TABLENS return int(cell.attributes.get((TABLENS, 'number-columns-repeated'), 1)) def _get_cell_value(self, cell) -> Scalar | NaTType: from odf.namespaces import OFFICENS if str(cell) == '#N/A': return np.nan cell_type = cell.attributes.get((OFFICENS, 'value-type')) if cell_type == 'boolean': if str(cell) == 'TRUE': return True return False if cell_type is None: return self.empty_value elif cell_type == 'float': cell_value = float(cell.attributes.get((OFFICENS, 'value'))) val = int(cell_value) if val == cell_value: return val return cell_value elif cell_type == 'percentage': cell_value = cell.attributes.get((OFFICENS, 'value')) return float(cell_value) elif cell_type == 'string': return self._get_cell_string_value(cell) elif cell_type == 'currency': cell_value = cell.attributes.get((OFFICENS, 'value')) return float(cell_value) elif cell_type == 'date': cell_value = cell.attributes.get((OFFICENS, 'date-value')) return pd.Timestamp(cell_value) elif cell_type == 'time': stamp = pd.Timestamp(str(cell)) return cast(Scalar, stamp.time()) else: self.close() raise ValueError(f'Unrecognized type {cell_type}') def _get_cell_string_value(self, cell) -> str: from odf.element import Element from odf.namespaces import TEXTNS from odf.office import Annotation from odf.text import S office_annotation = Annotation().qname text_s = S().qname value = [] for fragment in cell.childNodes: if isinstance(fragment, Element): if fragment.qname == text_s: spaces = int(fragment.attributes.get((TEXTNS, 'c'), 1)) value.append(' ' * spaces) elif fragment.qname == office_annotation: continue else: value.append(self._get_cell_string_value(fragment)) else: value.append(str(fragment).strip('\n')) return ''.join(value) # File: pandas-main/pandas/io/excel/_odswriter.py from __future__ import annotations from collections import defaultdict import datetime import json from typing import TYPE_CHECKING, Any, DefaultDict, cast, overload from pandas.io.excel._base import ExcelWriter from pandas.io.excel._util import combine_kwargs, validate_freeze_panes if TYPE_CHECKING: from odf.opendocument import OpenDocumentSpreadsheet from pandas._typing import ExcelWriterIfSheetExists, FilePath, StorageOptions, WriteExcelBuffer from pandas.io.formats.excel import ExcelCell class ODSWriter(ExcelWriter): _engine = 'odf' _supported_extensions = ('.ods',) def __init__(self, path: FilePath | WriteExcelBuffer | ExcelWriter, engine: str | None=None, date_format: str | None=None, datetime_format: str | None=None, mode: str='w', storage_options: StorageOptions | None=None, if_sheet_exists: ExcelWriterIfSheetExists | None=None, engine_kwargs: dict[str, Any] | None=None, **kwargs: Any) -> None: from odf.opendocument import OpenDocumentSpreadsheet if mode == 'a': raise ValueError('Append mode is not supported with odf!') engine_kwargs = combine_kwargs(engine_kwargs, kwargs) self._book = OpenDocumentSpreadsheet(**engine_kwargs) super().__init__(path, mode=mode, storage_options=storage_options, if_sheet_exists=if_sheet_exists, engine_kwargs=engine_kwargs) self._style_dict: dict[str, str] = {} @property def book(self) -> OpenDocumentSpreadsheet: return self._book @property def sheets(self) -> dict[str, Any]: from odf.table import Table result = {sheet.getAttribute('name'): sheet for sheet in self.book.getElementsByType(Table)} return result def _save(self) -> None: for sheet in self.sheets.values(): self.book.spreadsheet.addElement(sheet) self.book.save(self._handles.handle) def _write_cells(self, cells: list[ExcelCell], sheet_name: str | None=None, startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None) -> None: from odf.table import Table, TableCell, TableRow from odf.text import P sheet_name = self._get_sheet_name(sheet_name) assert sheet_name is not None if sheet_name in self.sheets: wks = self.sheets[sheet_name] else: wks = Table(name=sheet_name) self.book.spreadsheet.addElement(wks) if validate_freeze_panes(freeze_panes): freeze_panes = cast(tuple[int, int], freeze_panes) self._create_freeze_panes(sheet_name, freeze_panes) for _ in range(startrow): wks.addElement(TableRow()) rows: DefaultDict = defaultdict(TableRow) col_count: DefaultDict = defaultdict(int) for cell in sorted(cells, key=lambda cell: (cell.row, cell.col)): if not col_count[cell.row]: for _ in range(startcol): rows[cell.row].addElement(TableCell()) for _ in range(cell.col - col_count[cell.row]): rows[cell.row].addElement(TableCell()) col_count[cell.row] += 1 (pvalue, tc) = self._make_table_cell(cell) rows[cell.row].addElement(tc) col_count[cell.row] += 1 p = P(text=pvalue) tc.addElement(p) if len(rows) > 0: for row_nr in range(max(rows.keys()) + 1): wks.addElement(rows[row_nr]) def _make_table_cell_attributes(self, cell: ExcelCell) -> dict[str, int | str]: attributes: dict[str, int | str] = {} style_name = self._process_style(cell.style) if style_name is not None: attributes['stylename'] = style_name if cell.mergestart is not None and cell.mergeend is not None: attributes['numberrowsspanned'] = max(1, cell.mergestart) attributes['numbercolumnsspanned'] = cell.mergeend return attributes def _make_table_cell(self, cell: ExcelCell) -> tuple[object, Any]: from odf.table import TableCell attributes = self._make_table_cell_attributes(cell) (val, fmt) = self._value_with_fmt(cell.val) pvalue = value = val if isinstance(val, bool): value = str(val).lower() pvalue = str(val).upper() return (pvalue, TableCell(valuetype='boolean', booleanvalue=value, attributes=attributes)) elif isinstance(val, datetime.datetime): value = val.isoformat() pvalue = val.strftime('%c') return (pvalue, TableCell(valuetype='date', datevalue=value, attributes=attributes)) elif isinstance(val, datetime.date): value = f'{val.year}-{val.month:02d}-{val.day:02d}' pvalue = val.strftime('%x') return (pvalue, TableCell(valuetype='date', datevalue=value, attributes=attributes)) elif isinstance(val, str): return (pvalue, TableCell(valuetype='string', stringvalue=value, attributes=attributes)) else: return (pvalue, TableCell(valuetype='float', value=value, attributes=attributes)) @overload def _process_style(self, style: dict[str, Any]) -> str: ... @overload def _process_style(self, style: None) -> None: ... def _process_style(self, style: dict[str, Any] | None) -> str | None: from odf.style import ParagraphProperties, Style, TableCellProperties, TextProperties if style is None: return None style_key = json.dumps(style) if style_key in self._style_dict: return self._style_dict[style_key] name = f'pd{len(self._style_dict) + 1}' self._style_dict[style_key] = name odf_style = Style(name=name, family='table-cell') if 'font' in style: font = style['font'] if font.get('bold', False): odf_style.addElement(TextProperties(fontweight='bold')) if 'borders' in style: borders = style['borders'] for (side, thickness) in borders.items(): thickness_translation = {'thin': '0.75pt solid #000000'} odf_style.addElement(TableCellProperties(attributes={f'border{side}': thickness_translation[thickness]})) if 'alignment' in style: alignment = style['alignment'] horizontal = alignment.get('horizontal') if horizontal: odf_style.addElement(ParagraphProperties(textalign=horizontal)) vertical = alignment.get('vertical') if vertical: odf_style.addElement(TableCellProperties(verticalalign=vertical)) self.book.styles.addElement(odf_style) return name def _create_freeze_panes(self, sheet_name: str, freeze_panes: tuple[int, int]) -> None: from odf.config import ConfigItem, ConfigItemMapEntry, ConfigItemMapIndexed, ConfigItemMapNamed, ConfigItemSet config_item_set = ConfigItemSet(name='ooo:view-settings') self.book.settings.addElement(config_item_set) config_item_map_indexed = ConfigItemMapIndexed(name='Views') config_item_set.addElement(config_item_map_indexed) config_item_map_entry = ConfigItemMapEntry() config_item_map_indexed.addElement(config_item_map_entry) config_item_map_named = ConfigItemMapNamed(name='Tables') config_item_map_entry.addElement(config_item_map_named) config_item_map_entry = ConfigItemMapEntry(name=sheet_name) config_item_map_named.addElement(config_item_map_entry) config_item_map_entry.addElement(ConfigItem(name='HorizontalSplitMode', type='short', text='2')) config_item_map_entry.addElement(ConfigItem(name='VerticalSplitMode', type='short', text='2')) config_item_map_entry.addElement(ConfigItem(name='HorizontalSplitPosition', type='int', text=str(freeze_panes[0]))) config_item_map_entry.addElement(ConfigItem(name='VerticalSplitPosition', type='int', text=str(freeze_panes[1]))) config_item_map_entry.addElement(ConfigItem(name='PositionRight', type='int', text=str(freeze_panes[0]))) config_item_map_entry.addElement(ConfigItem(name='PositionBottom', type='int', text=str(freeze_panes[1]))) # File: pandas-main/pandas/io/excel/_openpyxl.py from __future__ import annotations import mmap from typing import TYPE_CHECKING, Any, cast import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc from pandas.core.shared_docs import _shared_docs from pandas.io.excel._base import BaseExcelReader, ExcelWriter from pandas.io.excel._util import combine_kwargs, validate_freeze_panes if TYPE_CHECKING: from openpyxl import Workbook from openpyxl.descriptors.serialisable import Serialisable from openpyxl.styles import Fill from pandas._typing import ExcelWriterIfSheetExists, FilePath, ReadBuffer, Scalar, StorageOptions, WriteExcelBuffer class OpenpyxlWriter(ExcelWriter): _engine = 'openpyxl' _supported_extensions = ('.xlsx', '.xlsm') def __init__(self, path: FilePath | WriteExcelBuffer | ExcelWriter, engine: str | None=None, date_format: str | None=None, datetime_format: str | None=None, mode: str='w', storage_options: StorageOptions | None=None, if_sheet_exists: ExcelWriterIfSheetExists | None=None, engine_kwargs: dict[str, Any] | None=None, **kwargs) -> None: from openpyxl.workbook import Workbook engine_kwargs = combine_kwargs(engine_kwargs, kwargs) super().__init__(path, mode=mode, storage_options=storage_options, if_sheet_exists=if_sheet_exists, engine_kwargs=engine_kwargs) if 'r+' in self._mode: from openpyxl import load_workbook try: self._book = load_workbook(self._handles.handle, **engine_kwargs) except TypeError: self._handles.handle.close() raise self._handles.handle.seek(0) else: try: self._book = Workbook(**engine_kwargs) except TypeError: self._handles.handle.close() raise if self.book.worksheets: self.book.remove(self.book.worksheets[0]) @property def book(self) -> Workbook: return self._book @property def sheets(self) -> dict[str, Any]: result = {name: self.book[name] for name in self.book.sheetnames} return result def _save(self) -> None: self.book.save(self._handles.handle) if 'r+' in self._mode and (not isinstance(self._handles.handle, mmap.mmap)): self._handles.handle.truncate() @classmethod def _convert_to_style_kwargs(cls, style_dict: dict) -> dict[str, Serialisable]: _style_key_map = {'borders': 'border'} style_kwargs: dict[str, Serialisable] = {} for (k, v) in style_dict.items(): k = _style_key_map.get(k, k) _conv_to_x = getattr(cls, f'_convert_to_{k}', lambda x: None) new_v = _conv_to_x(v) if new_v: style_kwargs[k] = new_v return style_kwargs @classmethod def _convert_to_color(cls, color_spec): from openpyxl.styles import Color if isinstance(color_spec, str): return Color(color_spec) else: return Color(**color_spec) @classmethod def _convert_to_font(cls, font_dict): from openpyxl.styles import Font _font_key_map = {'sz': 'size', 'b': 'bold', 'i': 'italic', 'u': 'underline', 'strike': 'strikethrough', 'vertalign': 'vertAlign'} font_kwargs = {} for (k, v) in font_dict.items(): k = _font_key_map.get(k, k) if k == 'color': v = cls._convert_to_color(v) font_kwargs[k] = v return Font(**font_kwargs) @classmethod def _convert_to_stop(cls, stop_seq): return map(cls._convert_to_color, stop_seq) @classmethod def _convert_to_fill(cls, fill_dict: dict[str, Any]) -> Fill: from openpyxl.styles import GradientFill, PatternFill _pattern_fill_key_map = {'patternType': 'fill_type', 'patterntype': 'fill_type', 'fgColor': 'start_color', 'fgcolor': 'start_color', 'bgColor': 'end_color', 'bgcolor': 'end_color'} _gradient_fill_key_map = {'fill_type': 'type'} pfill_kwargs = {} gfill_kwargs = {} for (k, v) in fill_dict.items(): pk = _pattern_fill_key_map.get(k) gk = _gradient_fill_key_map.get(k) if pk in ['start_color', 'end_color']: v = cls._convert_to_color(v) if gk == 'stop': v = cls._convert_to_stop(v) if pk: pfill_kwargs[pk] = v elif gk: gfill_kwargs[gk] = v else: pfill_kwargs[k] = v gfill_kwargs[k] = v try: return PatternFill(**pfill_kwargs) except TypeError: return GradientFill(**gfill_kwargs) @classmethod def _convert_to_side(cls, side_spec): from openpyxl.styles import Side _side_key_map = {'border_style': 'style'} if isinstance(side_spec, str): return Side(style=side_spec) side_kwargs = {} for (k, v) in side_spec.items(): k = _side_key_map.get(k, k) if k == 'color': v = cls._convert_to_color(v) side_kwargs[k] = v return Side(**side_kwargs) @classmethod def _convert_to_border(cls, border_dict): from openpyxl.styles import Border _border_key_map = {'diagonalup': 'diagonalUp', 'diagonaldown': 'diagonalDown'} border_kwargs = {} for (k, v) in border_dict.items(): k = _border_key_map.get(k, k) if k == 'color': v = cls._convert_to_color(v) if k in ['left', 'right', 'top', 'bottom', 'diagonal']: v = cls._convert_to_side(v) border_kwargs[k] = v return Border(**border_kwargs) @classmethod def _convert_to_alignment(cls, alignment_dict): from openpyxl.styles import Alignment return Alignment(**alignment_dict) @classmethod def _convert_to_number_format(cls, number_format_dict): return number_format_dict['format_code'] @classmethod def _convert_to_protection(cls, protection_dict): from openpyxl.styles import Protection return Protection(**protection_dict) def _write_cells(self, cells, sheet_name: str | None=None, startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None) -> None: sheet_name = self._get_sheet_name(sheet_name) _style_cache: dict[str, dict[str, Serialisable]] = {} if sheet_name in self.sheets and self._if_sheet_exists != 'new': if 'r+' in self._mode: if self._if_sheet_exists == 'replace': old_wks = self.sheets[sheet_name] target_index = self.book.index(old_wks) del self.book[sheet_name] wks = self.book.create_sheet(sheet_name, target_index) elif self._if_sheet_exists == 'error': raise ValueError(f"Sheet '{sheet_name}' already exists and if_sheet_exists is set to 'error'.") elif self._if_sheet_exists == 'overlay': wks = self.sheets[sheet_name] else: raise ValueError(f"'{self._if_sheet_exists}' is not valid for if_sheet_exists. Valid options are 'error', 'new', 'replace' and 'overlay'.") else: wks = self.sheets[sheet_name] else: wks = self.book.create_sheet() wks.title = sheet_name if validate_freeze_panes(freeze_panes): freeze_panes = cast(tuple[int, int], freeze_panes) wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1, column=freeze_panes[1] + 1) for cell in cells: xcell = wks.cell(row=startrow + cell.row + 1, column=startcol + cell.col + 1) (xcell.value, fmt) = self._value_with_fmt(cell.val) if fmt: xcell.number_format = fmt style_kwargs: dict[str, Serialisable] | None = {} if cell.style: key = str(cell.style) style_kwargs = _style_cache.get(key) if style_kwargs is None: style_kwargs = self._convert_to_style_kwargs(cell.style) _style_cache[key] = style_kwargs if style_kwargs: for (k, v) in style_kwargs.items(): setattr(xcell, k, v) if cell.mergestart is not None and cell.mergeend is not None: wks.merge_cells(start_row=startrow + cell.row + 1, start_column=startcol + cell.col + 1, end_column=startcol + cell.mergeend + 1, end_row=startrow + cell.mergestart + 1) if style_kwargs: first_row = startrow + cell.row + 1 last_row = startrow + cell.mergestart + 1 first_col = startcol + cell.col + 1 last_col = startcol + cell.mergeend + 1 for row in range(first_row, last_row + 1): for col in range(first_col, last_col + 1): if row == first_row and col == first_col: continue xcell = wks.cell(column=col, row=row) for (k, v) in style_kwargs.items(): setattr(xcell, k, v) class OpenpyxlReader(BaseExcelReader['Workbook']): @doc(storage_options=_shared_docs['storage_options']) def __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: import_optional_dependency('openpyxl') super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs) @property def _workbook_class(self) -> type[Workbook]: from openpyxl import Workbook return Workbook def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs) -> Workbook: from openpyxl import load_workbook default_kwargs = {'read_only': True, 'data_only': True, 'keep_links': False} return load_workbook(filepath_or_buffer, **default_kwargs | engine_kwargs) @property def sheet_names(self) -> list[str]: return [sheet.title for sheet in self.book.worksheets] def get_sheet_by_name(self, name: str): self.raise_if_bad_sheet_by_name(name) return self.book[name] def get_sheet_by_index(self, index: int): self.raise_if_bad_sheet_by_index(index) return self.book.worksheets[index] def _convert_cell(self, cell) -> Scalar: from openpyxl.cell.cell import TYPE_ERROR, TYPE_NUMERIC if cell.value is None: return '' elif cell.data_type == TYPE_ERROR: return np.nan elif cell.data_type == TYPE_NUMERIC: val = int(cell.value) if val == cell.value: return val return float(cell.value) return cell.value def get_sheet_data(self, sheet, file_rows_needed: int | None=None) -> list[list[Scalar]]: if self.book.read_only: sheet.reset_dimensions() data: list[list[Scalar]] = [] last_row_with_data = -1 for (row_number, row) in enumerate(sheet.rows): converted_row = [self._convert_cell(cell) for cell in row] while converted_row and converted_row[-1] == '': converted_row.pop() if converted_row: last_row_with_data = row_number data.append(converted_row) if file_rows_needed is not None and len(data) >= file_rows_needed: break data = data[:last_row_with_data + 1] if len(data) > 0: max_width = max((len(data_row) for data_row in data)) if min((len(data_row) for data_row in data)) < max_width: empty_cell: list[Scalar] = [''] data = [data_row + (max_width - len(data_row)) * empty_cell for data_row in data] return data # File: pandas-main/pandas/io/excel/_pyxlsb.py from __future__ import annotations from typing import TYPE_CHECKING from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc from pandas.core.shared_docs import _shared_docs from pandas.io.excel._base import BaseExcelReader if TYPE_CHECKING: from pyxlsb import Workbook from pandas._typing import FilePath, ReadBuffer, Scalar, StorageOptions class PyxlsbReader(BaseExcelReader['Workbook']): @doc(storage_options=_shared_docs['storage_options']) def __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: import_optional_dependency('pyxlsb') super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs) @property def _workbook_class(self) -> type[Workbook]: from pyxlsb import Workbook return Workbook def load_workbook(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs) -> Workbook: from pyxlsb import open_workbook return open_workbook(filepath_or_buffer, **engine_kwargs) @property def sheet_names(self) -> list[str]: return self.book.sheets def get_sheet_by_name(self, name: str): self.raise_if_bad_sheet_by_name(name) return self.book.get_sheet(name) def get_sheet_by_index(self, index: int): self.raise_if_bad_sheet_by_index(index) return self.book.get_sheet(index + 1) def _convert_cell(self, cell) -> Scalar: if cell.v is None: return '' if isinstance(cell.v, float): val = int(cell.v) if val == cell.v: return val else: return float(cell.v) return cell.v def get_sheet_data(self, sheet, file_rows_needed: int | None=None) -> list[list[Scalar]]: data: list[list[Scalar]] = [] previous_row_number = -1 for row in sheet.rows(sparse=True): row_number = row[0].r converted_row = [self._convert_cell(cell) for cell in row] while converted_row and converted_row[-1] == '': converted_row.pop() if converted_row: data.extend([[]] * (row_number - previous_row_number - 1)) data.append(converted_row) previous_row_number = row_number if file_rows_needed is not None and len(data) >= file_rows_needed: break if data: max_width = max((len(data_row) for data_row in data)) if min((len(data_row) for data_row in data)) < max_width: empty_cell: list[Scalar] = [''] data = [data_row + (max_width - len(data_row)) * empty_cell for data_row in data] return data # File: pandas-main/pandas/io/excel/_util.py from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, MutableMapping, Sequence from typing import TYPE_CHECKING, Any, Literal, TypeVar, overload from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_integer, is_list_like if TYPE_CHECKING: from pandas.io.excel._base import ExcelWriter ExcelWriter_t = type[ExcelWriter] usecols_func = TypeVar('usecols_func', bound=Callable[[Hashable], object]) _writers: MutableMapping[str, ExcelWriter_t] = {} def register_writer(klass: ExcelWriter_t) -> None: if not callable(klass): raise ValueError('Can only register callables as engines') engine_name = klass._engine _writers[engine_name] = klass def get_default_engine(ext: str, mode: Literal['reader', 'writer']='reader') -> str: _default_readers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xlsb': 'pyxlsb', 'xls': 'xlrd', 'ods': 'odf'} _default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xlsb': 'pyxlsb', 'ods': 'odf'} assert mode in ['reader', 'writer'] if mode == 'writer': xlsxwriter = import_optional_dependency('xlsxwriter', errors='warn') if xlsxwriter: _default_writers['xlsx'] = 'xlsxwriter' return _default_writers[ext] else: return _default_readers[ext] def get_writer(engine_name: str) -> ExcelWriter_t: try: return _writers[engine_name] except KeyError as err: raise ValueError(f"No Excel writer '{engine_name}'") from err def _excel2num(x: str) -> int: index = 0 for c in x.upper().strip(): cp = ord(c) if cp < ord('A') or cp > ord('Z'): raise ValueError(f'Invalid column name: {x}') index = index * 26 + cp - ord('A') + 1 return index - 1 def _range2cols(areas: str) -> list[int]: cols: list[int] = [] for rng in areas.split(','): if ':' in rng: rngs = rng.split(':') cols.extend(range(_excel2num(rngs[0]), _excel2num(rngs[1]) + 1)) else: cols.append(_excel2num(rng)) return cols @overload def maybe_convert_usecols(usecols: str | list[int]) -> list[int]: ... @overload def maybe_convert_usecols(usecols: list[str]) -> list[str]: ... @overload def maybe_convert_usecols(usecols: usecols_func) -> usecols_func: ... @overload def maybe_convert_usecols(usecols: None) -> None: ... def maybe_convert_usecols(usecols: str | list[int] | list[str] | usecols_func | None) -> None | list[int] | list[str] | usecols_func: if usecols is None: return usecols if is_integer(usecols): raise ValueError('Passing an integer for `usecols` is no longer supported. Please pass in a list of int from 0 to `usecols` inclusive instead.') if isinstance(usecols, str): return _range2cols(usecols) return usecols @overload def validate_freeze_panes(freeze_panes: tuple[int, int]) -> Literal[True]: ... @overload def validate_freeze_panes(freeze_panes: None) -> Literal[False]: ... def validate_freeze_panes(freeze_panes: tuple[int, int] | None) -> bool: if freeze_panes is not None: if len(freeze_panes) == 2 and all((isinstance(item, int) for item in freeze_panes)): return True raise ValueError('freeze_panes must be of form (row, column) where row and column are integers') return False def fill_mi_header(row: list[Hashable], control_row: list[bool]) -> tuple[list[Hashable], list[bool]]: last = row[0] for i in range(1, len(row)): if not control_row[i]: last = row[i] if row[i] == '' or row[i] is None: row[i] = last else: control_row[i] = False last = row[i] return (row, control_row) def pop_header_name(row: list[Hashable], index_col: int | Sequence[int]) -> tuple[Hashable | None, list[Hashable]]: if is_list_like(index_col): assert isinstance(index_col, Iterable) i = max(index_col) else: assert not isinstance(index_col, Iterable) i = index_col header_name = row[i] header_name = None if header_name == '' else header_name return (header_name, row[:i] + [''] + row[i + 1:]) def combine_kwargs(engine_kwargs: dict[str, Any] | None, kwargs: dict) -> dict: if engine_kwargs is None: result = {} else: result = engine_kwargs.copy() result.update(kwargs) return result # File: pandas-main/pandas/io/excel/_xlrd.py from __future__ import annotations from datetime import time import math from typing import TYPE_CHECKING import numpy as np from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc from pandas.core.shared_docs import _shared_docs from pandas.io.excel._base import BaseExcelReader if TYPE_CHECKING: from xlrd import Book from pandas._typing import Scalar, StorageOptions class XlrdReader(BaseExcelReader['Book']): @doc(storage_options=_shared_docs['storage_options']) def __init__(self, filepath_or_buffer, storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: err_msg = 'Install xlrd >= 2.0.1 for xls Excel support' import_optional_dependency('xlrd', extra=err_msg) super().__init__(filepath_or_buffer, storage_options=storage_options, engine_kwargs=engine_kwargs) @property def _workbook_class(self) -> type[Book]: from xlrd import Book return Book def load_workbook(self, filepath_or_buffer, engine_kwargs) -> Book: from xlrd import open_workbook if hasattr(filepath_or_buffer, 'read'): data = filepath_or_buffer.read() return open_workbook(file_contents=data, **engine_kwargs) else: return open_workbook(filepath_or_buffer, **engine_kwargs) @property def sheet_names(self): return self.book.sheet_names() def get_sheet_by_name(self, name): self.raise_if_bad_sheet_by_name(name) return self.book.sheet_by_name(name) def get_sheet_by_index(self, index): self.raise_if_bad_sheet_by_index(index) return self.book.sheet_by_index(index) def get_sheet_data(self, sheet, file_rows_needed: int | None=None) -> list[list[Scalar]]: from xlrd import XL_CELL_BOOLEAN, XL_CELL_DATE, XL_CELL_ERROR, XL_CELL_NUMBER, xldate epoch1904 = self.book.datemode def _parse_cell(cell_contents, cell_typ): if cell_typ == XL_CELL_DATE: try: cell_contents = xldate.xldate_as_datetime(cell_contents, epoch1904) except OverflowError: return cell_contents year = cell_contents.timetuple()[0:3] if not epoch1904 and year == (1899, 12, 31) or (epoch1904 and year == (1904, 1, 1)): cell_contents = time(cell_contents.hour, cell_contents.minute, cell_contents.second, cell_contents.microsecond) elif cell_typ == XL_CELL_ERROR: cell_contents = np.nan elif cell_typ == XL_CELL_BOOLEAN: cell_contents = bool(cell_contents) elif cell_typ == XL_CELL_NUMBER: if math.isfinite(cell_contents): val = int(cell_contents) if val == cell_contents: cell_contents = val return cell_contents nrows = sheet.nrows if file_rows_needed is not None: nrows = min(nrows, file_rows_needed) return [[_parse_cell(value, typ) for (value, typ) in zip(sheet.row_values(i), sheet.row_types(i))] for i in range(nrows)] # File: pandas-main/pandas/io/excel/_xlsxwriter.py from __future__ import annotations import json from typing import TYPE_CHECKING, Any from pandas.io.excel._base import ExcelWriter from pandas.io.excel._util import combine_kwargs, validate_freeze_panes if TYPE_CHECKING: from pandas._typing import ExcelWriterIfSheetExists, FilePath, StorageOptions, WriteExcelBuffer class _XlsxStyler: STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {'font': [(('name',), 'font_name'), (('sz',), 'font_size'), (('size',), 'font_size'), (('color', 'rgb'), 'font_color'), (('color',), 'font_color'), (('b',), 'bold'), (('bold',), 'bold'), (('i',), 'italic'), (('italic',), 'italic'), (('u',), 'underline'), (('underline',), 'underline'), (('strike',), 'font_strikeout'), (('vertAlign',), 'font_script'), (('vertalign',), 'font_script')], 'number_format': [(('format_code',), 'num_format'), ((), 'num_format')], 'protection': [(('locked',), 'locked'), (('hidden',), 'hidden')], 'alignment': [(('horizontal',), 'align'), (('vertical',), 'valign'), (('text_rotation',), 'rotation'), (('wrap_text',), 'text_wrap'), (('indent',), 'indent'), (('shrink_to_fit',), 'shrink')], 'fill': [(('patternType',), 'pattern'), (('patterntype',), 'pattern'), (('fill_type',), 'pattern'), (('start_color', 'rgb'), 'fg_color'), (('fgColor', 'rgb'), 'fg_color'), (('fgcolor', 'rgb'), 'fg_color'), (('start_color',), 'fg_color'), (('fgColor',), 'fg_color'), (('fgcolor',), 'fg_color'), (('end_color', 'rgb'), 'bg_color'), (('bgColor', 'rgb'), 'bg_color'), (('bgcolor', 'rgb'), 'bg_color'), (('end_color',), 'bg_color'), (('bgColor',), 'bg_color'), (('bgcolor',), 'bg_color')], 'border': [(('color', 'rgb'), 'border_color'), (('color',), 'border_color'), (('style',), 'border'), (('top', 'color', 'rgb'), 'top_color'), (('top', 'color'), 'top_color'), (('top', 'style'), 'top'), (('top',), 'top'), (('right', 'color', 'rgb'), 'right_color'), (('right', 'color'), 'right_color'), (('right', 'style'), 'right'), (('right',), 'right'), (('bottom', 'color', 'rgb'), 'bottom_color'), (('bottom', 'color'), 'bottom_color'), (('bottom', 'style'), 'bottom'), (('bottom',), 'bottom'), (('left', 'color', 'rgb'), 'left_color'), (('left', 'color'), 'left_color'), (('left', 'style'), 'left'), (('left',), 'left')]} @classmethod def convert(cls, style_dict, num_format_str=None) -> dict[str, Any]: props = {} if num_format_str is not None: props['num_format'] = num_format_str if style_dict is None: return props if 'borders' in style_dict: style_dict = style_dict.copy() style_dict['border'] = style_dict.pop('borders') for (style_group_key, style_group) in style_dict.items(): for (src, dst) in cls.STYLE_MAPPING.get(style_group_key, []): if dst in props: continue v = style_group for k in src: try: v = v[k] except (KeyError, TypeError): break else: props[dst] = v if isinstance(props.get('pattern'), str): props['pattern'] = 0 if props['pattern'] == 'none' else 1 for k in ['border', 'top', 'right', 'bottom', 'left']: if isinstance(props.get(k), str): try: props[k] = ['none', 'thin', 'medium', 'dashed', 'dotted', 'thick', 'double', 'hair', 'mediumDashed', 'dashDot', 'mediumDashDot', 'dashDotDot', 'mediumDashDotDot', 'slantDashDot'].index(props[k]) except ValueError: props[k] = 2 if isinstance(props.get('font_script'), str): props['font_script'] = ['baseline', 'superscript', 'subscript'].index(props['font_script']) if isinstance(props.get('underline'), str): props['underline'] = {'none': 0, 'single': 1, 'double': 2, 'singleAccounting': 33, 'doubleAccounting': 34}[props['underline']] if props.get('valign') == 'center': props['valign'] = 'vcenter' return props class XlsxWriter(ExcelWriter): _engine = 'xlsxwriter' _supported_extensions = ('.xlsx',) def __init__(self, path: FilePath | WriteExcelBuffer | ExcelWriter, engine: str | None=None, date_format: str | None=None, datetime_format: str | None=None, mode: str='w', storage_options: StorageOptions | None=None, if_sheet_exists: ExcelWriterIfSheetExists | None=None, engine_kwargs: dict[str, Any] | None=None, **kwargs) -> None: from xlsxwriter import Workbook engine_kwargs = combine_kwargs(engine_kwargs, kwargs) if mode == 'a': raise ValueError('Append mode is not supported with xlsxwriter!') super().__init__(path, engine=engine, date_format=date_format, datetime_format=datetime_format, mode=mode, storage_options=storage_options, if_sheet_exists=if_sheet_exists, engine_kwargs=engine_kwargs) try: self._book = Workbook(self._handles.handle, **engine_kwargs) except TypeError: self._handles.handle.close() raise @property def book(self): return self._book @property def sheets(self) -> dict[str, Any]: result = self.book.sheetnames return result def _save(self) -> None: self.book.close() def _write_cells(self, cells, sheet_name: str | None=None, startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None) -> None: sheet_name = self._get_sheet_name(sheet_name) wks = self.book.get_worksheet_by_name(sheet_name) if wks is None: wks = self.book.add_worksheet(sheet_name) style_dict = {'null': None} if validate_freeze_panes(freeze_panes): wks.freeze_panes(*freeze_panes) for cell in cells: (val, fmt) = self._value_with_fmt(cell.val) stylekey = json.dumps(cell.style) if fmt: stylekey += fmt if stylekey in style_dict: style = style_dict[stylekey] else: style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt)) style_dict[stylekey] = style if cell.mergestart is not None and cell.mergeend is not None: wks.merge_range(startrow + cell.row, startcol + cell.col, startrow + cell.mergestart, startcol + cell.mergeend, val, style) else: wks.write(startrow + cell.row, startcol + cell.col, val, style) # File: pandas-main/pandas/io/feather_format.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any import warnings from pandas._config import using_string_dtype from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend import pandas as pd from pandas.core.api import DataFrame from pandas.core.shared_docs import _shared_docs from pandas.io._util import arrow_string_types_mapper from pandas.io.common import get_handle if TYPE_CHECKING: from collections.abc import Hashable, Sequence from pandas._typing import DtypeBackend, FilePath, ReadBuffer, StorageOptions, WriteBuffer @doc(storage_options=_shared_docs['storage_options']) def to_feather(df: DataFrame, path: FilePath | WriteBuffer[bytes], storage_options: StorageOptions | None=None, **kwargs: Any) -> None: import_optional_dependency('pyarrow') from pyarrow import feather if not isinstance(df, DataFrame): raise ValueError('feather only support IO with DataFrames') with get_handle(path, 'wb', storage_options=storage_options, is_text=False) as handles: feather.write_feather(df, handles.handle, **kwargs) @doc(storage_options=_shared_docs['storage_options']) def read_feather(path: FilePath | ReadBuffer[bytes], columns: Sequence[Hashable] | None=None, use_threads: bool=True, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame: import_optional_dependency('pyarrow') from pyarrow import feather import pandas.core.arrays.arrow.extension_types check_dtype_backend(dtype_backend) with get_handle(path, 'rb', storage_options=storage_options, is_text=False) as handles: if dtype_backend is lib.no_default and (not using_string_dtype()): with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'make_block is deprecated', DeprecationWarning) return feather.read_feather(handles.handle, columns=columns, use_threads=bool(use_threads)) pa_table = feather.read_table(handles.handle, columns=columns, use_threads=bool(use_threads)) if dtype_backend == 'numpy_nullable': from pandas.io._util import _arrow_dtype_mapping return pa_table.to_pandas(types_mapper=_arrow_dtype_mapping().get) elif dtype_backend == 'pyarrow': return pa_table.to_pandas(types_mapper=pd.ArrowDtype) elif using_string_dtype(): return pa_table.to_pandas(types_mapper=arrow_string_types_mapper()) else: raise NotImplementedError # File: pandas-main/pandas/io/formats/_color_data.py from __future__ import annotations CSS4_COLORS = {'aliceblue': 'F0F8FF', 'antiquewhite': 'FAEBD7', 'aqua': '00FFFF', 'aquamarine': '7FFFD4', 'azure': 'F0FFFF', 'beige': 'F5F5DC', 'bisque': 'FFE4C4', 'black': '000000', 'blanchedalmond': 'FFEBCD', 'blue': '0000FF', 'blueviolet': '8A2BE2', 'brown': 'A52A2A', 'burlywood': 'DEB887', 'cadetblue': '5F9EA0', 'chartreuse': '7FFF00', 'chocolate': 'D2691E', 'coral': 'FF7F50', 'cornflowerblue': '6495ED', 'cornsilk': 'FFF8DC', 'crimson': 'DC143C', 'cyan': '00FFFF', 'darkblue': '00008B', 'darkcyan': '008B8B', 'darkgoldenrod': 'B8860B', 'darkgray': 'A9A9A9', 'darkgreen': '006400', 'darkgrey': 'A9A9A9', 'darkkhaki': 'BDB76B', 'darkmagenta': '8B008B', 'darkolivegreen': '556B2F', 'darkorange': 'FF8C00', 'darkorchid': '9932CC', 'darkred': '8B0000', 'darksalmon': 'E9967A', 'darkseagreen': '8FBC8F', 'darkslateblue': '483D8B', 'darkslategray': '2F4F4F', 'darkslategrey': '2F4F4F', 'darkturquoise': '00CED1', 'darkviolet': '9400D3', 'deeppink': 'FF1493', 'deepskyblue': '00BFFF', 'dimgray': '696969', 'dimgrey': '696969', 'dodgerblue': '1E90FF', 'firebrick': 'B22222', 'floralwhite': 'FFFAF0', 'forestgreen': '228B22', 'fuchsia': 'FF00FF', 'gainsboro': 'DCDCDC', 'ghostwhite': 'F8F8FF', 'gold': 'FFD700', 'goldenrod': 'DAA520', 'gray': '808080', 'green': '008000', 'greenyellow': 'ADFF2F', 'grey': '808080', 'honeydew': 'F0FFF0', 'hotpink': 'FF69B4', 'indianred': 'CD5C5C', 'indigo': '4B0082', 'ivory': 'FFFFF0', 'khaki': 'F0E68C', 'lavender': 'E6E6FA', 'lavenderblush': 'FFF0F5', 'lawngreen': '7CFC00', 'lemonchiffon': 'FFFACD', 'lightblue': 'ADD8E6', 'lightcoral': 'F08080', 'lightcyan': 'E0FFFF', 'lightgoldenrodyellow': 'FAFAD2', 'lightgray': 'D3D3D3', 'lightgreen': '90EE90', 'lightgrey': 'D3D3D3', 'lightpink': 'FFB6C1', 'lightsalmon': 'FFA07A', 'lightseagreen': '20B2AA', 'lightskyblue': '87CEFA', 'lightslategray': '778899', 'lightslategrey': '778899', 'lightsteelblue': 'B0C4DE', 'lightyellow': 'FFFFE0', 'lime': '00FF00', 'limegreen': '32CD32', 'linen': 'FAF0E6', 'magenta': 'FF00FF', 'maroon': '800000', 'mediumaquamarine': '66CDAA', 'mediumblue': '0000CD', 'mediumorchid': 'BA55D3', 'mediumpurple': '9370DB', 'mediumseagreen': '3CB371', 'mediumslateblue': '7B68EE', 'mediumspringgreen': '00FA9A', 'mediumturquoise': '48D1CC', 'mediumvioletred': 'C71585', 'midnightblue': '191970', 'mintcream': 'F5FFFA', 'mistyrose': 'FFE4E1', 'moccasin': 'FFE4B5', 'navajowhite': 'FFDEAD', 'navy': '000080', 'oldlace': 'FDF5E6', 'olive': '808000', 'olivedrab': '6B8E23', 'orange': 'FFA500', 'orangered': 'FF4500', 'orchid': 'DA70D6', 'palegoldenrod': 'EEE8AA', 'palegreen': '98FB98', 'paleturquoise': 'AFEEEE', 'palevioletred': 'DB7093', 'papayawhip': 'FFEFD5', 'peachpuff': 'FFDAB9', 'peru': 'CD853F', 'pink': 'FFC0CB', 'plum': 'DDA0DD', 'powderblue': 'B0E0E6', 'purple': '800080', 'rebeccapurple': '663399', 'red': 'FF0000', 'rosybrown': 'BC8F8F', 'royalblue': '4169E1', 'saddlebrown': '8B4513', 'salmon': 'FA8072', 'sandybrown': 'F4A460', 'seagreen': '2E8B57', 'seashell': 'FFF5EE', 'sienna': 'A0522D', 'silver': 'C0C0C0', 'skyblue': '87CEEB', 'slateblue': '6A5ACD', 'slategray': '708090', 'slategrey': '708090', 'snow': 'FFFAFA', 'springgreen': '00FF7F', 'steelblue': '4682B4', 'tan': 'D2B48C', 'teal': '008080', 'thistle': 'D8BFD8', 'tomato': 'FF6347', 'turquoise': '40E0D0', 'violet': 'EE82EE', 'wheat': 'F5DEB3', 'white': 'FFFFFF', 'whitesmoke': 'F5F5F5', 'yellow': 'FFFF00', 'yellowgreen': '9ACD32'} # File: pandas-main/pandas/io/formats/console.py """""" from __future__ import annotations from shutil import get_terminal_size def get_console_size() -> tuple[int | None, int | None]: from pandas import get_option display_width = get_option('display.width') display_height = get_option('display.max_rows') if in_interactive_session(): if in_ipython_frontend(): from pandas._config.config import get_default_val terminal_width = get_default_val('display.width') terminal_height = get_default_val('display.max_rows') else: (terminal_width, terminal_height) = get_terminal_size() else: (terminal_width, terminal_height) = (None, None) return (display_width or terminal_width, display_height or terminal_height) def in_interactive_session() -> bool: from pandas import get_option def check_main() -> bool: try: import __main__ as main except ModuleNotFoundError: return get_option('mode.sim_interactive') return not hasattr(main, '__file__') or get_option('mode.sim_interactive') try: return __IPYTHON__ or check_main() except NameError: return check_main() def in_ipython_frontend() -> bool: try: ip = get_ipython() return 'zmq' in str(type(ip)).lower() except NameError: pass return False # File: pandas-main/pandas/io/formats/css.py """""" from __future__ import annotations import re from typing import TYPE_CHECKING import warnings from pandas.errors import CSSWarning from pandas.util._exceptions import find_stack_level if TYPE_CHECKING: from collections.abc import Callable, Generator, Iterable, Iterator def _side_expander(prop_fmt: str) -> Callable: def expand(self: CSSResolver, prop: str, value: str) -> Generator[tuple[str, str], None, None]: tokens = value.split() try: mapping = self.SIDE_SHORTHANDS[len(tokens)] except KeyError: warnings.warn(f'Could not expand "{prop}: {value}"', CSSWarning, stacklevel=find_stack_level()) return for (key, idx) in zip(self.SIDES, mapping): yield (prop_fmt.format(key), tokens[idx]) return expand def _border_expander(side: str='') -> Callable: if side != '': side = f'-{side}' def expand(self: CSSResolver, prop: str, value: str) -> Generator[tuple[str, str], None, None]: tokens = value.split() if len(tokens) == 0 or len(tokens) > 3: warnings.warn(f'Too many tokens provided to "{prop}" (expected 1-3)', CSSWarning, stacklevel=find_stack_level()) border_declarations = {f'border{side}-color': 'black', f'border{side}-style': 'none', f'border{side}-width': 'medium'} for token in tokens: if token.lower() in self.BORDER_STYLES: border_declarations[f'border{side}-style'] = token elif any((ratio in token.lower() for ratio in self.BORDER_WIDTH_RATIOS)): border_declarations[f'border{side}-width'] = token else: border_declarations[f'border{side}-color'] = token yield from self.atomize(border_declarations.items()) return expand class CSSResolver: UNIT_RATIOS = {'pt': ('pt', 1), 'em': ('em', 1), 'rem': ('pt', 12), 'ex': ('em', 0.5), 'px': ('pt', 0.75), 'pc': ('pt', 12), 'in': ('pt', 72), 'cm': ('in', 1 / 2.54), 'mm': ('in', 1 / 25.4), 'q': ('mm', 0.25), '!!default': ('em', 0)} FONT_SIZE_RATIOS = UNIT_RATIOS.copy() FONT_SIZE_RATIOS.update({'%': ('em', 0.01), 'xx-small': ('rem', 0.5), 'x-small': ('rem', 0.625), 'small': ('rem', 0.8), 'medium': ('rem', 1), 'large': ('rem', 1.125), 'x-large': ('rem', 1.5), 'xx-large': ('rem', 2), 'smaller': ('em', 1 / 1.2), 'larger': ('em', 1.2), '!!default': ('em', 1)}) MARGIN_RATIOS = UNIT_RATIOS.copy() MARGIN_RATIOS.update({'none': ('pt', 0)}) BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy() BORDER_WIDTH_RATIOS.update({'none': ('pt', 0), 'thick': ('px', 4), 'medium': ('px', 2), 'thin': ('px', 1)}) BORDER_STYLES = ['none', 'hidden', 'dotted', 'dashed', 'solid', 'double', 'groove', 'ridge', 'inset', 'outset', 'mediumdashdot', 'dashdotdot', 'hair', 'mediumdashdotdot', 'dashdot', 'slantdashdot', 'mediumdashed'] SIDE_SHORTHANDS = {1: [0, 0, 0, 0], 2: [0, 1, 0, 1], 3: [0, 1, 2, 1], 4: [0, 1, 2, 3]} SIDES = ('top', 'right', 'bottom', 'left') CSS_EXPANSIONS = {**{f'border-{prop}' if prop else 'border': _border_expander(prop) for prop in ['', 'top', 'right', 'bottom', 'left']}, **{f'border-{prop}': _side_expander(f'border-{{:s}}-{prop}') for prop in ['color', 'style', 'width']}, 'margin': _side_expander('margin-{:s}'), 'padding': _side_expander('padding-{:s}')} def __call__(self, declarations: str | Iterable[tuple[str, str]], inherited: dict[str, str] | None=None) -> dict[str, str]: if isinstance(declarations, str): declarations = self.parse(declarations) props = dict(self.atomize(declarations)) if inherited is None: inherited = {} props = self._update_initial(props, inherited) props = self._update_font_size(props, inherited) return self._update_other_units(props) def _update_initial(self, props: dict[str, str], inherited: dict[str, str]) -> dict[str, str]: for (prop, val) in inherited.items(): if prop not in props: props[prop] = val new_props = props.copy() for (prop, val) in props.items(): if val == 'inherit': val = inherited.get(prop, 'initial') if val in ('initial', None): del new_props[prop] else: new_props[prop] = val return new_props def _update_font_size(self, props: dict[str, str], inherited: dict[str, str]) -> dict[str, str]: if props.get('font-size'): props['font-size'] = self.size_to_pt(props['font-size'], self._get_font_size(inherited), conversions=self.FONT_SIZE_RATIOS) return props def _get_font_size(self, props: dict[str, str]) -> float | None: if props.get('font-size'): font_size_string = props['font-size'] return self._get_float_font_size_from_pt(font_size_string) return None def _get_float_font_size_from_pt(self, font_size_string: str) -> float: assert font_size_string.endswith('pt') return float(font_size_string.rstrip('pt')) def _update_other_units(self, props: dict[str, str]) -> dict[str, str]: font_size = self._get_font_size(props) for side in self.SIDES: prop = f'border-{side}-width' if prop in props: props[prop] = self.size_to_pt(props[prop], em_pt=font_size, conversions=self.BORDER_WIDTH_RATIOS) for prop in [f'margin-{side}', f'padding-{side}']: if prop in props: props[prop] = self.size_to_pt(props[prop], em_pt=font_size, conversions=self.MARGIN_RATIOS) return props def size_to_pt(self, in_val: str, em_pt: float | None=None, conversions: dict=UNIT_RATIOS) -> str: def _error() -> str: warnings.warn(f'Unhandled size: {in_val!r}', CSSWarning, stacklevel=find_stack_level()) return self.size_to_pt('1!!default', conversions=conversions) match = re.match('^(\\S*?)([a-zA-Z%!].*)', in_val) if match is None: return _error() (val, unit) = match.groups() if val == '': val = 1 else: try: val = float(val) except ValueError: return _error() while unit != 'pt': if unit == 'em': if em_pt is None: unit = 'rem' else: val *= em_pt unit = 'pt' continue try: (unit, mul) = conversions[unit] except KeyError: return _error() val *= mul val = round(val, 5) if int(val) == val: size_fmt = f'{int(val):d}pt' else: size_fmt = f'{val:f}pt' return size_fmt def atomize(self, declarations: Iterable) -> Generator[tuple[str, str], None, None]: for (prop, value) in declarations: prop = prop.lower() value = value.lower() if prop in self.CSS_EXPANSIONS: expand = self.CSS_EXPANSIONS[prop] yield from expand(self, prop, value) else: yield (prop, value) def parse(self, declarations_str: str) -> Iterator[tuple[str, str]]: for decl in declarations_str.split(';'): if not decl.strip(): continue (prop, sep, val) = decl.partition(':') prop = prop.strip().lower() val = val.strip().lower() if sep: yield (prop, val) else: warnings.warn(f'Ill-formatted attribute: expected a colon in {decl!r}', CSSWarning, stacklevel=find_stack_level()) # File: pandas-main/pandas/io/formats/csvs.py """""" from __future__ import annotations from collections.abc import Hashable, Iterable, Iterator, Sequence import csv as csvlib import os from typing import TYPE_CHECKING, Any, cast import numpy as np from pandas._libs import writers as libwriters from pandas._typing import SequenceNotStr from pandas.util._decorators import cache_readonly from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCIndex, ABCMultiIndex, ABCPeriodIndex from pandas.core.dtypes.missing import notna from pandas.core.indexes.api import Index from pandas.io.common import get_handle if TYPE_CHECKING: from pandas._typing import CompressionOptions, FilePath, FloatFormatType, IndexLabel, StorageOptions, WriteBuffer, npt from pandas.io.formats.format import DataFrameFormatter _DEFAULT_CHUNKSIZE_CELLS = 100000 class CSVFormatter: cols: npt.NDArray[np.object_] def __init__(self, formatter: DataFrameFormatter, path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes]='', sep: str=',', cols: Sequence[Hashable] | None=None, index_label: IndexLabel | None=None, mode: str='w', encoding: str | None=None, errors: str='strict', compression: CompressionOptions='infer', quoting: int | None=None, lineterminator: str | None='\n', chunksize: int | None=None, quotechar: str | None='"', date_format: str | None=None, doublequote: bool=True, escapechar: str | None=None, storage_options: StorageOptions | None=None) -> None: self.fmt = formatter self.obj = self.fmt.frame self.filepath_or_buffer = path_or_buf self.encoding = encoding self.compression: CompressionOptions = compression self.mode = mode self.storage_options = storage_options self.sep = sep self.index_label = self._initialize_index_label(index_label) self.errors = errors self.quoting = quoting or csvlib.QUOTE_MINIMAL self.quotechar = self._initialize_quotechar(quotechar) self.doublequote = doublequote self.escapechar = escapechar self.lineterminator = lineterminator or os.linesep self.date_format = date_format self.cols = self._initialize_columns(cols) self.chunksize = self._initialize_chunksize(chunksize) @property def na_rep(self) -> str: return self.fmt.na_rep @property def float_format(self) -> FloatFormatType | None: return self.fmt.float_format @property def decimal(self) -> str: return self.fmt.decimal @property def header(self) -> bool | SequenceNotStr[str]: return self.fmt.header @property def index(self) -> bool: return self.fmt.index def _initialize_index_label(self, index_label: IndexLabel | None) -> IndexLabel: if index_label is not False: if index_label is None: return self._get_index_label_from_obj() elif not isinstance(index_label, (list, tuple, np.ndarray, ABCIndex)): return [index_label] return index_label def _get_index_label_from_obj(self) -> Sequence[Hashable]: if isinstance(self.obj.index, ABCMultiIndex): return self._get_index_label_multiindex() else: return self._get_index_label_flat() def _get_index_label_multiindex(self) -> Sequence[Hashable]: return [name or '' for name in self.obj.index.names] def _get_index_label_flat(self) -> Sequence[Hashable]: index_label = self.obj.index.name return [''] if index_label is None else [index_label] def _initialize_quotechar(self, quotechar: str | None) -> str | None: if self.quoting != csvlib.QUOTE_NONE: return quotechar return None @property def has_mi_columns(self) -> bool: return bool(isinstance(self.obj.columns, ABCMultiIndex)) def _initialize_columns(self, cols: Iterable[Hashable] | None) -> npt.NDArray[np.object_]: if self.has_mi_columns: if cols is not None: msg = 'cannot specify cols with a MultiIndex on the columns' raise TypeError(msg) if cols is not None: if isinstance(cols, ABCIndex): cols = cols._get_values_for_csv(**self._number_format) else: cols = list(cols) self.obj = self.obj.loc[:, cols] new_cols = self.obj.columns return new_cols._get_values_for_csv(**self._number_format) def _initialize_chunksize(self, chunksize: int | None) -> int: if chunksize is None: return _DEFAULT_CHUNKSIZE_CELLS // (len(self.cols) or 1) or 1 return int(chunksize) @property def _number_format(self) -> dict[str, Any]: return {'na_rep': self.na_rep, 'float_format': self.float_format, 'date_format': self.date_format, 'quoting': self.quoting, 'decimal': self.decimal} @cache_readonly def data_index(self) -> Index: data_index = self.obj.index if isinstance(data_index, (ABCDatetimeIndex, ABCPeriodIndex)) and self.date_format is not None: data_index = Index([x.strftime(self.date_format) if notna(x) else '' for x in data_index]) elif isinstance(data_index, ABCMultiIndex): data_index = data_index.remove_unused_levels() return data_index @property def nlevels(self) -> int: if self.index: return getattr(self.data_index, 'nlevels', 1) else: return 0 @property def _has_aliases(self) -> bool: return isinstance(self.header, (tuple, list, np.ndarray, ABCIndex)) @property def _need_to_save_header(self) -> bool: return bool(self._has_aliases or self.header) @property def write_cols(self) -> SequenceNotStr[Hashable]: if self._has_aliases: assert not isinstance(self.header, bool) if len(self.header) != len(self.cols): raise ValueError(f'Writing {len(self.cols)} cols but got {len(self.header)} aliases') return self.header else: return cast(SequenceNotStr[Hashable], self.cols) @property def encoded_labels(self) -> list[Hashable]: encoded_labels: list[Hashable] = [] if self.index and self.index_label: assert isinstance(self.index_label, Sequence) encoded_labels = list(self.index_label) if not self.has_mi_columns or self._has_aliases: encoded_labels += list(self.write_cols) return encoded_labels def save(self) -> None: with get_handle(self.filepath_or_buffer, self.mode, encoding=self.encoding, errors=self.errors, compression=self.compression, storage_options=self.storage_options) as handles: self.writer = csvlib.writer(handles.handle, lineterminator=self.lineterminator, delimiter=self.sep, quoting=self.quoting, doublequote=self.doublequote, escapechar=self.escapechar, quotechar=self.quotechar) self._save() def _save(self) -> None: if self._need_to_save_header: self._save_header() self._save_body() def _save_header(self) -> None: if not self.has_mi_columns or self._has_aliases: self.writer.writerow(self.encoded_labels) else: for row in self._generate_multiindex_header_rows(): self.writer.writerow(row) def _generate_multiindex_header_rows(self) -> Iterator[list[Hashable]]: columns = self.obj.columns for i in range(columns.nlevels): col_line = [] if self.index: col_line.append(columns.names[i]) if isinstance(self.index_label, list) and len(self.index_label) > 1: col_line.extend([''] * (len(self.index_label) - 1)) col_line.extend(columns._get_level_values(i)) yield col_line if self.encoded_labels and set(self.encoded_labels) != {''}: yield (self.encoded_labels + [''] * len(columns)) def _save_body(self) -> None: nrows = len(self.data_index) chunks = nrows // self.chunksize + 1 for i in range(chunks): start_i = i * self.chunksize end_i = min(start_i + self.chunksize, nrows) if start_i >= end_i: break self._save_chunk(start_i, end_i) def _save_chunk(self, start_i: int, end_i: int) -> None: slicer = slice(start_i, end_i) df = self.obj.iloc[slicer] res = df._get_values_for_csv(**self._number_format) data = list(res._iter_column_arrays()) ix = self.data_index[slicer]._get_values_for_csv(**self._number_format) if self.nlevels != 0 else np.empty(end_i - start_i) libwriters.write_csv_rows(data, ix, self.nlevels, self.cols, self.writer) # File: pandas-main/pandas/io/formats/excel.py """""" from __future__ import annotations from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence import functools import itertools import re from typing import TYPE_CHECKING, Any, cast import warnings import numpy as np from pandas._libs.lib import is_list_like from pandas.util._decorators import doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes import missing from pandas.core.dtypes.common import is_float, is_scalar from pandas import DataFrame, Index, MultiIndex, PeriodIndex import pandas.core.common as com from pandas.core.shared_docs import _shared_docs from pandas.io.formats._color_data import CSS4_COLORS from pandas.io.formats.css import CSSResolver, CSSWarning from pandas.io.formats.format import get_level_lengths from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from pandas._typing import ExcelWriterMergeCells, FilePath, IndexLabel, StorageOptions, WriteExcelBuffer from pandas import ExcelWriter class ExcelCell: __fields__ = ('row', 'col', 'val', 'style', 'mergestart', 'mergeend') __slots__ = __fields__ def __init__(self, row: int, col: int, val, style=None, mergestart: int | None=None, mergeend: int | None=None) -> None: self.row = row self.col = col self.val = val self.style = style self.mergestart = mergestart self.mergeend = mergeend class CssExcelCell(ExcelCell): def __init__(self, row: int, col: int, val, style: dict | None, css_styles: dict[tuple[int, int], list[tuple[str, Any]]] | None, css_row: int, css_col: int, css_converter: Callable | None, **kwargs) -> None: if css_styles and css_converter: declaration_dict = {prop.lower(): val for (prop, val) in css_styles[css_row, css_col]} unique_declarations = frozenset(declaration_dict.items()) style = css_converter(unique_declarations) super().__init__(row=row, col=col, val=val, style=style, **kwargs) class CSSToExcelConverter: NAMED_COLORS = CSS4_COLORS VERTICAL_MAP = {'top': 'top', 'text-top': 'top', 'middle': 'center', 'baseline': 'bottom', 'bottom': 'bottom', 'text-bottom': 'bottom'} BOLD_MAP = {'bold': True, 'bolder': True, '600': True, '700': True, '800': True, '900': True, 'normal': False, 'lighter': False, '100': False, '200': False, '300': False, '400': False, '500': False} ITALIC_MAP = {'normal': False, 'italic': True, 'oblique': True} FAMILY_MAP = {'serif': 1, 'sans-serif': 2, 'cursive': 4, 'fantasy': 5} BORDER_STYLE_MAP = {style.lower(): style for style in ['dashed', 'mediumDashDot', 'dashDotDot', 'hair', 'dotted', 'mediumDashDotDot', 'double', 'dashDot', 'slantDashDot', 'mediumDashed']} inherited: dict[str, str] | None def __init__(self, inherited: str | None=None) -> None: if inherited is not None: self.inherited = self.compute_css(inherited) else: self.inherited = None self._call_cached = functools.cache(self._call_uncached) compute_css = CSSResolver() def __call__(self, declarations: str | frozenset[tuple[str, str]]) -> dict[str, dict[str, str]]: return self._call_cached(declarations) def _call_uncached(self, declarations: str | frozenset[tuple[str, str]]) -> dict[str, dict[str, str]]: properties = self.compute_css(declarations, self.inherited) return self.build_xlstyle(properties) def build_xlstyle(self, props: Mapping[str, str]) -> dict[str, dict[str, str]]: out = {'alignment': self.build_alignment(props), 'border': self.build_border(props), 'fill': self.build_fill(props), 'font': self.build_font(props), 'number_format': self.build_number_format(props)} def remove_none(d: dict[str, str | None]) -> None: for (k, v) in list(d.items()): if v is None: del d[k] elif isinstance(v, dict): remove_none(v) if not v: del d[k] remove_none(out) return out def build_alignment(self, props: Mapping[str, str]) -> dict[str, bool | str | None]: return {'horizontal': props.get('text-align'), 'vertical': self._get_vertical_alignment(props), 'wrap_text': self._get_is_wrap_text(props)} def _get_vertical_alignment(self, props: Mapping[str, str]) -> str | None: vertical_align = props.get('vertical-align') if vertical_align: return self.VERTICAL_MAP.get(vertical_align) return None def _get_is_wrap_text(self, props: Mapping[str, str]) -> bool | None: if props.get('white-space') is None: return None return bool(props['white-space'] not in ('nowrap', 'pre', 'pre-line')) def build_border(self, props: Mapping[str, str]) -> dict[str, dict[str, str | None]]: return {side: {'style': self._border_style(props.get(f'border-{side}-style'), props.get(f'border-{side}-width'), self.color_to_excel(props.get(f'border-{side}-color'))), 'color': self.color_to_excel(props.get(f'border-{side}-color'))} for side in ['top', 'right', 'bottom', 'left']} def _border_style(self, style: str | None, width: str | None, color: str | None) -> str | None: if width is None and style is None and (color is None): return None if width is None and style is None: return 'none' if style in ('none', 'hidden'): return 'none' width_name = self._get_width_name(width) if width_name is None: return 'none' if style in (None, 'groove', 'ridge', 'inset', 'outset', 'solid'): return width_name if style == 'double': return 'double' if style == 'dotted': if width_name in ('hair', 'thin'): return 'dotted' return 'mediumDashDotDot' if style == 'dashed': if width_name in ('hair', 'thin'): return 'dashed' return 'mediumDashed' elif style in self.BORDER_STYLE_MAP: return self.BORDER_STYLE_MAP[style] else: warnings.warn(f'Unhandled border style format: {style!r}', CSSWarning, stacklevel=find_stack_level()) return 'none' def _get_width_name(self, width_input: str | None) -> str | None: width = self._width_to_float(width_input) if width < 1e-05: return None elif width < 1.3: return 'thin' elif width < 2.8: return 'medium' return 'thick' def _width_to_float(self, width: str | None) -> float: if width is None: width = '2pt' return self._pt_to_float(width) def _pt_to_float(self, pt_string: str) -> float: assert pt_string.endswith('pt') return float(pt_string.rstrip('pt')) def build_fill(self, props: Mapping[str, str]): fill_color = props.get('background-color') if fill_color not in (None, 'transparent', 'none'): return {'fgColor': self.color_to_excel(fill_color), 'patternType': 'solid'} def build_number_format(self, props: Mapping[str, str]) -> dict[str, str | None]: fc = props.get('number-format') fc = fc.replace('§', ';') if isinstance(fc, str) else fc return {'format_code': fc} def build_font(self, props: Mapping[str, str]) -> dict[str, bool | float | str | None]: font_names = self._get_font_names(props) decoration = self._get_decoration(props) return {'name': font_names[0] if font_names else None, 'family': self._select_font_family(font_names), 'size': self._get_font_size(props), 'bold': self._get_is_bold(props), 'italic': self._get_is_italic(props), 'underline': 'single' if 'underline' in decoration else None, 'strike': 'line-through' in decoration or None, 'color': self.color_to_excel(props.get('color')), 'shadow': self._get_shadow(props)} def _get_is_bold(self, props: Mapping[str, str]) -> bool | None: weight = props.get('font-weight') if weight: return self.BOLD_MAP.get(weight) return None def _get_is_italic(self, props: Mapping[str, str]) -> bool | None: font_style = props.get('font-style') if font_style: return self.ITALIC_MAP.get(font_style) return None def _get_decoration(self, props: Mapping[str, str]) -> Sequence[str]: decoration = props.get('text-decoration') if decoration is not None: return decoration.split() else: return () def _get_underline(self, decoration: Sequence[str]) -> str | None: if 'underline' in decoration: return 'single' return None def _get_shadow(self, props: Mapping[str, str]) -> bool | None: if 'text-shadow' in props: return bool(re.search('^[^#(]*[1-9]', props['text-shadow'])) return None def _get_font_names(self, props: Mapping[str, str]) -> Sequence[str]: font_names_tmp = re.findall('(?x)\n (\n "(?:[^"]|\\\\")+"\n |\n \'(?:[^\']|\\\\\')+\'\n |\n [^\'",]+\n )(?=,|\\s*$)\n ', props.get('font-family', '')) font_names = [] for name in font_names_tmp: if name[:1] == '"': name = name[1:-1].replace('\\"', '"') elif name[:1] == "'": name = name[1:-1].replace("\\'", "'") else: name = name.strip() if name: font_names.append(name) return font_names def _get_font_size(self, props: Mapping[str, str]) -> float | None: size = props.get('font-size') if size is None: return size return self._pt_to_float(size) def _select_font_family(self, font_names: Sequence[str]) -> int | None: family = None for name in font_names: family = self.FAMILY_MAP.get(name) if family: break return family def color_to_excel(self, val: str | None) -> str | None: if val is None: return None if self._is_hex_color(val): return self._convert_hex_to_excel(val) try: return self.NAMED_COLORS[val] except KeyError: warnings.warn(f'Unhandled color format: {val!r}', CSSWarning, stacklevel=find_stack_level()) return None def _is_hex_color(self, color_string: str) -> bool: return bool(color_string.startswith('#')) def _convert_hex_to_excel(self, color_string: str) -> str: code = color_string.lstrip('#') if self._is_shorthand_color(color_string): return (code[0] * 2 + code[1] * 2 + code[2] * 2).upper() else: return code.upper() def _is_shorthand_color(self, color_string: str) -> bool: code = color_string.lstrip('#') if len(code) == 3: return True elif len(code) == 6: return False else: raise ValueError(f'Unexpected color {color_string}') class ExcelFormatter: max_rows = 2 ** 20 max_cols = 2 ** 14 def __init__(self, df, na_rep: str='', float_format: str | None=None, cols: Sequence[Hashable] | None=None, header: Sequence[Hashable] | bool=True, index: bool=True, index_label: IndexLabel | None=None, merge_cells: ExcelWriterMergeCells=False, inf_rep: str='inf', style_converter: Callable | None=None) -> None: self.rowcounter = 0 self.na_rep = na_rep if not isinstance(df, DataFrame): self.styler = df self.styler._compute() df = df.data if style_converter is None: style_converter = CSSToExcelConverter() self.style_converter: Callable | None = style_converter else: self.styler = None self.style_converter = None self.df = df if cols is not None: if not len(Index(cols).intersection(df.columns)): raise KeyError('passes columns are not ALL present dataframe') if len(Index(cols).intersection(df.columns)) != len(set(cols)): raise KeyError("Not all names specified in 'columns' are found") self.df = df.reindex(columns=cols) self.columns = self.df.columns self.float_format = float_format self.index = index self.index_label = index_label self.header = header if not isinstance(merge_cells, bool) and merge_cells != 'columns': raise ValueError(f'Unexpected value for merge_cells={merge_cells!r}.') self.merge_cells = merge_cells self.inf_rep = inf_rep def _format_value(self, val): if is_scalar(val) and missing.isna(val): val = self.na_rep elif is_float(val): if missing.isposinf_scalar(val): val = self.inf_rep elif missing.isneginf_scalar(val): val = f'-{self.inf_rep}' elif self.float_format is not None: val = float(self.float_format % val) if getattr(val, 'tzinfo', None) is not None: raise ValueError('Excel does not support datetimes with timezones. Please ensure that datetimes are timezone unaware before writing to Excel.') return val def _format_header_mi(self) -> Iterable[ExcelCell]: if self.columns.nlevels > 1: if not self.index: raise NotImplementedError("Writing to Excel with MultiIndex columns and no index ('index'=False) is not yet implemented.") if not (self._has_aliases or self.header): return columns = self.columns level_strs = columns._format_multi(sparsify=self.merge_cells in {True, 'columns'}, include_names=False) level_lengths = get_level_lengths(level_strs) coloffset = 0 lnum = 0 if self.index and isinstance(self.df.index, MultiIndex): coloffset = self.df.index.nlevels - 1 if self.merge_cells in {True, 'columns'}: for (lnum, name) in enumerate(columns.names): yield ExcelCell(row=lnum, col=coloffset, val=name, style=None) for (lnum, (spans, levels, level_codes)) in enumerate(zip(level_lengths, columns.levels, columns.codes)): values = levels.take(level_codes) for (i, span_val) in spans.items(): (mergestart, mergeend) = (None, None) if span_val > 1: (mergestart, mergeend) = (lnum, coloffset + i + span_val) yield CssExcelCell(row=lnum, col=coloffset + i + 1, val=values[i], style=None, css_styles=getattr(self.styler, 'ctx_columns', None), css_row=lnum, css_col=i, css_converter=self.style_converter, mergestart=mergestart, mergeend=mergeend) else: for (i, values) in enumerate(zip(*level_strs)): v = '.'.join(map(pprint_thing, values)) yield CssExcelCell(row=lnum, col=coloffset + i + 1, val=v, style=None, css_styles=getattr(self.styler, 'ctx_columns', None), css_row=lnum, css_col=i, css_converter=self.style_converter) self.rowcounter = lnum def _format_header_regular(self) -> Iterable[ExcelCell]: if self._has_aliases or self.header: coloffset = 0 if self.index: coloffset = 1 if isinstance(self.df.index, MultiIndex): coloffset = len(self.df.index.names) colnames = self.columns if self._has_aliases: self.header = cast(Sequence, self.header) if len(self.header) != len(self.columns): raise ValueError(f'Writing {len(self.columns)} cols but got {len(self.header)} aliases') colnames = self.header for (colindex, colname) in enumerate(colnames): yield CssExcelCell(row=self.rowcounter, col=colindex + coloffset, val=colname, style=None, css_styles=getattr(self.styler, 'ctx_columns', None), css_row=0, css_col=colindex, css_converter=self.style_converter) def _format_header(self) -> Iterable[ExcelCell]: gen: Iterable[ExcelCell] if isinstance(self.columns, MultiIndex): gen = self._format_header_mi() else: gen = self._format_header_regular() gen2: Iterable[ExcelCell] = () if self.df.index.names: row = [x if x is not None else '' for x in self.df.index.names] + [''] * len(self.columns) if functools.reduce(lambda x, y: x and y, (x != '' for x in row)): gen2 = (ExcelCell(self.rowcounter, colindex, val, None) for (colindex, val) in enumerate(row)) self.rowcounter += 1 return itertools.chain(gen, gen2) def _format_body(self) -> Iterable[ExcelCell]: if isinstance(self.df.index, MultiIndex): return self._format_hierarchical_rows() else: return self._format_regular_rows() def _format_regular_rows(self) -> Iterable[ExcelCell]: if self._has_aliases or self.header: self.rowcounter += 1 if self.index: if self.index_label and isinstance(self.index_label, (list, tuple, np.ndarray, Index)): index_label = self.index_label[0] elif self.index_label and isinstance(self.index_label, str): index_label = self.index_label else: index_label = self.df.index.names[0] if isinstance(self.columns, MultiIndex): self.rowcounter += 1 if index_label and self.header is not False: yield ExcelCell(self.rowcounter - 1, 0, index_label, None) index_values = self.df.index if isinstance(self.df.index, PeriodIndex): index_values = self.df.index.to_timestamp() for (idx, idxval) in enumerate(index_values): yield CssExcelCell(row=self.rowcounter + idx, col=0, val=idxval, style=None, css_styles=getattr(self.styler, 'ctx_index', None), css_row=idx, css_col=0, css_converter=self.style_converter) coloffset = 1 else: coloffset = 0 yield from self._generate_body(coloffset) def _format_hierarchical_rows(self) -> Iterable[ExcelCell]: if self._has_aliases or self.header: self.rowcounter += 1 gcolidx = 0 if self.index: index_labels = self.df.index.names if self.index_label and isinstance(self.index_label, (list, tuple, np.ndarray, Index)): index_labels = self.index_label if isinstance(self.columns, MultiIndex) and self.merge_cells in {True, 'columns'}: self.rowcounter += 1 if com.any_not_none(*index_labels) and self.header is not False: for (cidx, name) in enumerate(index_labels): yield ExcelCell(self.rowcounter - 1, cidx, name, None) if self.merge_cells and self.merge_cells != 'columns': level_strs = self.df.index._format_multi(sparsify=True, include_names=False) level_lengths = get_level_lengths(level_strs) for (spans, levels, level_codes) in zip(level_lengths, self.df.index.levels, self.df.index.codes): values = levels.take(level_codes, allow_fill=levels._can_hold_na, fill_value=levels._na_value) for (i, span_val) in spans.items(): (mergestart, mergeend) = (None, None) if span_val > 1: mergestart = self.rowcounter + i + span_val - 1 mergeend = gcolidx yield CssExcelCell(row=self.rowcounter + i, col=gcolidx, val=values[i], style=None, css_styles=getattr(self.styler, 'ctx_index', None), css_row=i, css_col=gcolidx, css_converter=self.style_converter, mergestart=mergestart, mergeend=mergeend) gcolidx += 1 else: for indexcolvals in zip(*self.df.index): for (idx, indexcolval) in enumerate(indexcolvals): yield CssExcelCell(row=self.rowcounter + idx, col=gcolidx, val=indexcolval, style=None, css_styles=getattr(self.styler, 'ctx_index', None), css_row=idx, css_col=gcolidx, css_converter=self.style_converter) gcolidx += 1 yield from self._generate_body(gcolidx) @property def _has_aliases(self) -> bool: return is_list_like(self.header) def _generate_body(self, coloffset: int) -> Iterable[ExcelCell]: for colidx in range(len(self.columns)): series = self.df.iloc[:, colidx] for (i, val) in enumerate(series): yield CssExcelCell(row=self.rowcounter + i, col=colidx + coloffset, val=val, style=None, css_styles=getattr(self.styler, 'ctx', None), css_row=i, css_col=colidx, css_converter=self.style_converter) def get_formatted_cells(self) -> Iterable[ExcelCell]: for cell in itertools.chain(self._format_header(), self._format_body()): cell.val = self._format_value(cell.val) yield cell @doc(storage_options=_shared_docs['storage_options']) def write(self, writer: FilePath | WriteExcelBuffer | ExcelWriter, sheet_name: str='Sheet1', startrow: int=0, startcol: int=0, freeze_panes: tuple[int, int] | None=None, engine: str | None=None, storage_options: StorageOptions | None=None, engine_kwargs: dict | None=None) -> None: from pandas.io.excel import ExcelWriter (num_rows, num_cols) = self.df.shape if num_rows > self.max_rows or num_cols > self.max_cols: raise ValueError(f'This sheet is too large! Your sheet size is: {num_rows}, {num_cols} Max sheet size is: {self.max_rows}, {self.max_cols}') if engine_kwargs is None: engine_kwargs = {} formatted_cells = self.get_formatted_cells() if isinstance(writer, ExcelWriter): need_save = False else: writer = ExcelWriter(writer, engine=engine, storage_options=storage_options, engine_kwargs=engine_kwargs) need_save = True try: writer._write_cells(formatted_cells, sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes) finally: if need_save: writer.close() # File: pandas-main/pandas/io/formats/format.py """""" from __future__ import annotations from collections.abc import Callable, Generator, Hashable, Mapping, Sequence from contextlib import contextmanager from csv import QUOTE_NONE from decimal import Decimal from functools import partial from io import StringIO import math import re from shutil import get_terminal_size from typing import TYPE_CHECKING, Any, Final, cast import numpy as np from pandas._config.config import get_option, set_option from pandas._libs import lib from pandas._libs.missing import NA from pandas._libs.tslibs import NaT, Timedelta, Timestamp from pandas._libs.tslibs.nattype import NaTType from pandas.core.dtypes.common import is_complex_dtype, is_float, is_integer, is_list_like, is_numeric_dtype, is_scalar from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, ExtensionDtype from pandas.core.dtypes.missing import isna, notna from pandas.core.arrays import Categorical, DatetimeArray, ExtensionArray, TimedeltaArray from pandas.core.arrays.string_ import StringDtype from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.indexes.api import Index, MultiIndex, PeriodIndex, ensure_index from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.reshape.concat import concat from pandas.io.common import check_parent_directory, stringify_path from pandas.io.formats import printing if TYPE_CHECKING: from pandas._typing import ArrayLike, Axes, ColspaceArgType, ColspaceType, CompressionOptions, FilePath, FloatFormatType, FormattersType, IndexLabel, SequenceNotStr, StorageOptions, WriteBuffer from pandas import DataFrame, Series common_docstring: Final = "\n Parameters\n ----------\n buf : str, Path or StringIO-like, optional, default None\n Buffer to write to. If None, the output is returned as a string.\n columns : array-like, optional, default None\n The subset of columns to write. Writes all columns by default.\n col_space : %(col_space_type)s, optional\n %(col_space)s.\n header : %(header_type)s, optional\n %(header)s.\n index : bool, optional, default True\n Whether to print index (row) labels.\n na_rep : str, optional, default 'NaN'\n String representation of ``NaN`` to use.\n formatters : list, tuple or dict of one-param. functions, optional\n Formatter functions to apply to columns' elements by position or\n name.\n The result of each function must be a unicode string.\n List/tuple must be of length equal to the number of columns.\n float_format : one-parameter function, optional, default None\n Formatter function to apply to columns' elements if they are\n floats. This function must return a unicode string and will be\n applied only to the non-``NaN`` elements, with ``NaN`` being\n handled by ``na_rep``.\n sparsify : bool, optional, default True\n Set to False for a DataFrame with a hierarchical index to print\n every multiindex key at each row.\n index_names : bool, optional, default True\n Prints the names of the indexes.\n justify : str, default None\n How to justify the column labels. If None uses the option from\n the print configuration (controlled by set_option), 'right' out\n of the box. Valid values are\n\n * left\n * right\n * center\n * justify\n * justify-all\n * start\n * end\n * inherit\n * match-parent\n * initial\n * unset.\n max_rows : int, optional\n Maximum number of rows to display in the console.\n max_cols : int, optional\n Maximum number of columns to display in the console.\n show_dimensions : bool, default False\n Display DataFrame dimensions (number of rows by number of columns).\n decimal : str, default '.'\n Character recognized as decimal separator, e.g. ',' in Europe.\n " VALID_JUSTIFY_PARAMETERS = ('left', 'right', 'center', 'justify', 'justify-all', 'start', 'end', 'inherit', 'match-parent', 'initial', 'unset') return_docstring: Final = '\n Returns\n -------\n str or None\n If buf is None, returns the result as a string. Otherwise returns\n None.\n ' class SeriesFormatter: def __init__(self, series: Series, *, length: bool | str=True, header: bool=True, index: bool=True, na_rep: str='NaN', name: bool=False, float_format: str | None=None, dtype: bool=True, max_rows: int | None=None, min_rows: int | None=None) -> None: self.series = series self.buf = StringIO() self.name = name self.na_rep = na_rep self.header = header self.length = length self.index = index self.max_rows = max_rows self.min_rows = min_rows if float_format is None: float_format = get_option('display.float_format') self.float_format = float_format self.dtype = dtype self.adj = printing.get_adjustment() self._chk_truncate() def _chk_truncate(self) -> None: self.tr_row_num: int | None min_rows = self.min_rows max_rows = self.max_rows is_truncated_vertically = max_rows and len(self.series) > max_rows series = self.series if is_truncated_vertically: max_rows = cast(int, max_rows) if min_rows: max_rows = min(min_rows, max_rows) if max_rows == 1: row_num = max_rows series = series.iloc[:max_rows] else: row_num = max_rows // 2 series = concat((series.iloc[:row_num], series.iloc[-row_num:])) self.tr_row_num = row_num else: self.tr_row_num = None self.tr_series = series self.is_truncated_vertically = is_truncated_vertically def _get_footer(self) -> str: name = self.series.name footer = '' index = self.series.index if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)) and index.freq is not None: footer += f'Freq: {index.freqstr}' if self.name is not False and name is not None: if footer: footer += ', ' series_name = printing.pprint_thing(name, escape_chars=('\t', '\r', '\n')) footer += f'Name: {series_name}' if self.length is True or (self.length == 'truncate' and self.is_truncated_vertically): if footer: footer += ', ' footer += f'Length: {len(self.series)}' if self.dtype is not False and self.dtype is not None: dtype_name = getattr(self.tr_series.dtype, 'name', None) if dtype_name: if footer: footer += ', ' footer += f'dtype: {printing.pprint_thing(dtype_name)}' if isinstance(self.tr_series.dtype, CategoricalDtype): level_info = self.tr_series._values._get_repr_footer() if footer: footer += '\n' footer += level_info return str(footer) def _get_formatted_values(self) -> list[str]: return format_array(self.tr_series._values, None, float_format=self.float_format, na_rep=self.na_rep, leading_space=self.index) def to_string(self) -> str: series = self.tr_series footer = self._get_footer() if len(series) == 0: return f'{type(self.series).__name__}([], {footer})' index = series.index have_header = _has_names(index) if isinstance(index, MultiIndex): fmt_index = index._format_multi(include_names=True, sparsify=None) adj = printing.get_adjustment() fmt_index = adj.adjoin(2, *fmt_index).split('\n') else: fmt_index = index._format_flat(include_name=True) fmt_values = self._get_formatted_values() if self.is_truncated_vertically: n_header_rows = 0 row_num = self.tr_row_num row_num = cast(int, row_num) width = self.adj.len(fmt_values[row_num - 1]) if width > 3: dot_str = '...' else: dot_str = '..' dot_str = self.adj.justify([dot_str], width, mode='center')[0] fmt_values.insert(row_num + n_header_rows, dot_str) fmt_index.insert(row_num + 1, '') if self.index: result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values]) else: result = self.adj.adjoin(3, fmt_values) if self.header and have_header: result = fmt_index[0] + '\n' + result if footer: result += '\n' + footer return str(''.join(result)) def get_dataframe_repr_params() -> dict[str, Any]: from pandas.io.formats import console if get_option('display.expand_frame_repr'): (line_width, _) = console.get_console_size() else: line_width = None return {'max_rows': get_option('display.max_rows'), 'min_rows': get_option('display.min_rows'), 'max_cols': get_option('display.max_columns'), 'max_colwidth': get_option('display.max_colwidth'), 'show_dimensions': get_option('display.show_dimensions'), 'line_width': line_width} def get_series_repr_params() -> dict[str, Any]: (width, height) = get_terminal_size() max_rows_opt = get_option('display.max_rows') max_rows = height if max_rows_opt == 0 else max_rows_opt min_rows = height if max_rows_opt == 0 else get_option('display.min_rows') return {'name': True, 'dtype': True, 'min_rows': min_rows, 'max_rows': max_rows, 'length': get_option('display.show_dimensions')} class DataFrameFormatter: __doc__ = __doc__ if __doc__ else '' __doc__ += common_docstring + return_docstring def __init__(self, frame: DataFrame, columns: Axes | None=None, col_space: ColspaceArgType | None=None, header: bool | SequenceNotStr[str]=True, index: bool=True, na_rep: str='NaN', formatters: FormattersType | None=None, justify: str | None=None, float_format: FloatFormatType | None=None, sparsify: bool | None=None, index_names: bool=True, max_rows: int | None=None, min_rows: int | None=None, max_cols: int | None=None, show_dimensions: bool | str=False, decimal: str='.', bold_rows: bool=False, escape: bool=True) -> None: self.frame = frame self.columns = self._initialize_columns(columns) self.col_space = self._initialize_colspace(col_space) self.header = header self.index = index self.na_rep = na_rep self.formatters = self._initialize_formatters(formatters) self.justify = self._initialize_justify(justify) self.float_format = float_format self.sparsify = self._initialize_sparsify(sparsify) self.show_index_names = index_names self.decimal = decimal self.bold_rows = bold_rows self.escape = escape self.max_rows = max_rows self.min_rows = min_rows self.max_cols = max_cols self.show_dimensions = show_dimensions self.max_cols_fitted = self._calc_max_cols_fitted() self.max_rows_fitted = self._calc_max_rows_fitted() self.tr_frame = self.frame self.truncate() self.adj = printing.get_adjustment() def get_strcols(self) -> list[list[str]]: strcols = self._get_strcols_without_index() if self.index: str_index = self._get_formatted_index(self.tr_frame) strcols.insert(0, str_index) return strcols @property def should_show_dimensions(self) -> bool: return self.show_dimensions is True or (self.show_dimensions == 'truncate' and self.is_truncated) @property def is_truncated(self) -> bool: return bool(self.is_truncated_horizontally or self.is_truncated_vertically) @property def is_truncated_horizontally(self) -> bool: return bool(self.max_cols_fitted and len(self.columns) > self.max_cols_fitted) @property def is_truncated_vertically(self) -> bool: return bool(self.max_rows_fitted and len(self.frame) > self.max_rows_fitted) @property def dimensions_info(self) -> str: return f'\n\n[{len(self.frame)} rows x {len(self.frame.columns)} columns]' @property def has_index_names(self) -> bool: return _has_names(self.frame.index) @property def has_column_names(self) -> bool: return _has_names(self.frame.columns) @property def show_row_idx_names(self) -> bool: return all((self.has_index_names, self.index, self.show_index_names)) @property def show_col_idx_names(self) -> bool: return all((self.has_column_names, self.show_index_names, self.header)) @property def max_rows_displayed(self) -> int: return min(self.max_rows or len(self.frame), len(self.frame)) def _initialize_sparsify(self, sparsify: bool | None) -> bool: if sparsify is None: return get_option('display.multi_sparse') return sparsify def _initialize_formatters(self, formatters: FormattersType | None) -> FormattersType: if formatters is None: return {} elif len(self.frame.columns) == len(formatters) or isinstance(formatters, dict): return formatters else: raise ValueError(f'Formatters length({len(formatters)}) should match DataFrame number of columns({len(self.frame.columns)})') def _initialize_justify(self, justify: str | None) -> str: if justify is None: return get_option('display.colheader_justify') else: return justify def _initialize_columns(self, columns: Axes | None) -> Index: if columns is not None: cols = ensure_index(columns) self.frame = self.frame[cols] return cols else: return self.frame.columns def _initialize_colspace(self, col_space: ColspaceArgType | None) -> ColspaceType: result: ColspaceType if col_space is None: result = {} elif isinstance(col_space, (int, str)): result = {'': col_space} result.update({column: col_space for column in self.frame.columns}) elif isinstance(col_space, Mapping): for column in col_space.keys(): if column not in self.frame.columns and column != '': raise ValueError(f'Col_space is defined for an unknown column: {column}') result = col_space else: if len(self.frame.columns) != len(col_space): raise ValueError(f'Col_space length({len(col_space)}) should match DataFrame number of columns({len(self.frame.columns)})') result = dict(zip(self.frame.columns, col_space)) return result def _calc_max_cols_fitted(self) -> int | None: if not self._is_in_terminal(): return self.max_cols (width, _) = get_terminal_size() if self._is_screen_narrow(width): return width else: return self.max_cols def _calc_max_rows_fitted(self) -> int | None: max_rows: int | None if self._is_in_terminal(): (_, height) = get_terminal_size() if self.max_rows == 0: return height - self._get_number_of_auxiliary_rows() if self._is_screen_short(height): max_rows = height else: max_rows = self.max_rows else: max_rows = self.max_rows return self._adjust_max_rows(max_rows) def _adjust_max_rows(self, max_rows: int | None) -> int | None: if max_rows: if len(self.frame) > max_rows and self.min_rows: max_rows = min(self.min_rows, max_rows) return max_rows def _is_in_terminal(self) -> bool: return bool(self.max_cols == 0 or self.max_rows == 0) def _is_screen_narrow(self, max_width) -> bool: return bool(self.max_cols == 0 and len(self.frame.columns) > max_width) def _is_screen_short(self, max_height) -> bool: return bool(self.max_rows == 0 and len(self.frame) > max_height) def _get_number_of_auxiliary_rows(self) -> int: dot_row = 1 prompt_row = 1 num_rows = dot_row + prompt_row if self.show_dimensions: num_rows += len(self.dimensions_info.splitlines()) if self.header: num_rows += 1 return num_rows def truncate(self) -> None: if self.is_truncated_horizontally: self._truncate_horizontally() if self.is_truncated_vertically: self._truncate_vertically() def _truncate_horizontally(self) -> None: assert self.max_cols_fitted is not None col_num = self.max_cols_fitted // 2 if col_num >= 1: left = self.tr_frame.iloc[:, :col_num] right = self.tr_frame.iloc[:, -col_num:] self.tr_frame = concat((left, right), axis=1) if isinstance(self.formatters, (list, tuple)): self.formatters = [*self.formatters[:col_num], *self.formatters[-col_num:]] else: col_num = cast(int, self.max_cols) self.tr_frame = self.tr_frame.iloc[:, :col_num] self.tr_col_num = col_num def _truncate_vertically(self) -> None: assert self.max_rows_fitted is not None row_num = self.max_rows_fitted // 2 if row_num >= 1: _len = len(self.tr_frame) _slice = np.hstack([np.arange(row_num), np.arange(_len - row_num, _len)]) self.tr_frame = self.tr_frame.iloc[_slice] else: row_num = cast(int, self.max_rows) self.tr_frame = self.tr_frame.iloc[:row_num, :] self.tr_row_num = row_num def _get_strcols_without_index(self) -> list[list[str]]: strcols: list[list[str]] = [] if not is_list_like(self.header) and (not self.header): for (i, c) in enumerate(self.tr_frame): fmt_values = self.format_col(i) fmt_values = _make_fixed_width(strings=fmt_values, justify=self.justify, minimum=int(self.col_space.get(c, 0)), adj=self.adj) strcols.append(fmt_values) return strcols if is_list_like(self.header): self.header = cast(list[str], self.header) if len(self.header) != len(self.columns): raise ValueError(f'Writing {len(self.columns)} cols but got {len(self.header)} aliases') str_columns = [[label] for label in self.header] else: str_columns = self._get_formatted_column_labels(self.tr_frame) if self.show_row_idx_names: for x in str_columns: x.append('') for (i, c) in enumerate(self.tr_frame): cheader = str_columns[i] header_colwidth = max(int(self.col_space.get(c, 0)), *(self.adj.len(x) for x in cheader)) fmt_values = self.format_col(i) fmt_values = _make_fixed_width(fmt_values, self.justify, minimum=header_colwidth, adj=self.adj) max_len = max(*(self.adj.len(x) for x in fmt_values), header_colwidth) cheader = self.adj.justify(cheader, max_len, mode=self.justify) strcols.append(cheader + fmt_values) return strcols def format_col(self, i: int) -> list[str]: frame = self.tr_frame formatter = self._get_formatter(i) return format_array(frame.iloc[:, i]._values, formatter, float_format=self.float_format, na_rep=self.na_rep, space=self.col_space.get(frame.columns[i]), decimal=self.decimal, leading_space=self.index) def _get_formatter(self, i: str | int) -> Callable | None: if isinstance(self.formatters, (list, tuple)): if is_integer(i): i = cast(int, i) return self.formatters[i] else: return None else: if is_integer(i) and i not in self.columns: i = self.columns[i] return self.formatters.get(i, None) def _get_formatted_column_labels(self, frame: DataFrame) -> list[list[str]]: from pandas.core.indexes.multi import sparsify_labels columns = frame.columns if isinstance(columns, MultiIndex): fmt_columns = columns._format_multi(sparsify=False, include_names=False) if self.sparsify and len(fmt_columns): fmt_columns = sparsify_labels(fmt_columns) str_columns = [list(x) for x in zip(*fmt_columns)] else: fmt_columns = columns._format_flat(include_name=False) str_columns = [[' ' + x if not self._get_formatter(i) and is_numeric_dtype(dtype) else x] for (i, (x, dtype)) in enumerate(zip(fmt_columns, self.frame.dtypes))] return str_columns def _get_formatted_index(self, frame: DataFrame) -> list[str]: col_space = {k: cast(int, v) for (k, v) in self.col_space.items()} index = frame.index columns = frame.columns fmt = self._get_formatter('__index__') if isinstance(index, MultiIndex): fmt_index = index._format_multi(sparsify=self.sparsify, include_names=self.show_row_idx_names, formatter=fmt) else: fmt_index = [index._format_flat(include_name=self.show_row_idx_names, formatter=fmt)] fmt_index = [tuple(_make_fixed_width(list(x), justify='left', minimum=col_space.get('', 0), adj=self.adj)) for x in fmt_index] adjoined = self.adj.adjoin(1, *fmt_index).split('\n') if self.show_col_idx_names: col_header = [str(x) for x in self._get_column_name_list()] else: col_header = [''] * columns.nlevels if self.header: return col_header + adjoined else: return adjoined def _get_column_name_list(self) -> list[Hashable]: names: list[Hashable] = [] columns = self.frame.columns if isinstance(columns, MultiIndex): names.extend(('' if name is None else name for name in columns.names)) else: names.append('' if columns.name is None else columns.name) return names class DataFrameRenderer: def __init__(self, fmt: DataFrameFormatter) -> None: self.fmt = fmt def to_html(self, buf: FilePath | WriteBuffer[str] | None=None, encoding: str | None=None, classes: str | list | tuple | None=None, notebook: bool=False, border: int | bool | None=None, table_id: str | None=None, render_links: bool=False) -> str | None: from pandas.io.formats.html import HTMLFormatter, NotebookFormatter Klass = NotebookFormatter if notebook else HTMLFormatter html_formatter = Klass(self.fmt, classes=classes, border=border, table_id=table_id, render_links=render_links) string = html_formatter.to_string() return save_to_buffer(string, buf=buf, encoding=encoding) def to_string(self, buf: FilePath | WriteBuffer[str] | None=None, encoding: str | None=None, line_width: int | None=None) -> str | None: from pandas.io.formats.string import StringFormatter string_formatter = StringFormatter(self.fmt, line_width=line_width) string = string_formatter.to_string() return save_to_buffer(string, buf=buf, encoding=encoding) def to_csv(self, path_or_buf: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None=None, encoding: str | None=None, sep: str=',', columns: Sequence[Hashable] | None=None, index_label: IndexLabel | None=None, mode: str='w', compression: CompressionOptions='infer', quoting: int | None=None, quotechar: str='"', lineterminator: str | None=None, chunksize: int | None=None, date_format: str | None=None, doublequote: bool=True, escapechar: str | None=None, errors: str='strict', storage_options: StorageOptions | None=None) -> str | None: from pandas.io.formats.csvs import CSVFormatter if path_or_buf is None: created_buffer = True path_or_buf = StringIO() else: created_buffer = False csv_formatter = CSVFormatter(path_or_buf=path_or_buf, lineterminator=lineterminator, sep=sep, encoding=encoding, errors=errors, compression=compression, quoting=quoting, cols=columns, index_label=index_label, mode=mode, chunksize=chunksize, quotechar=quotechar, date_format=date_format, doublequote=doublequote, escapechar=escapechar, storage_options=storage_options, formatter=self.fmt) csv_formatter.save() if created_buffer: assert isinstance(path_or_buf, StringIO) content = path_or_buf.getvalue() path_or_buf.close() return content return None def save_to_buffer(string: str, buf: FilePath | WriteBuffer[str] | None=None, encoding: str | None=None) -> str | None: with _get_buffer(buf, encoding=encoding) as fd: fd.write(string) if buf is None: return fd.getvalue() return None @contextmanager def _get_buffer(buf: FilePath | WriteBuffer[str] | None, encoding: str | None=None) -> Generator[WriteBuffer[str], None, None] | Generator[StringIO, None, None]: if buf is not None: buf = stringify_path(buf) else: buf = StringIO() if encoding is None: encoding = 'utf-8' elif not isinstance(buf, str): raise ValueError('buf is not a file name and encoding is specified.') if hasattr(buf, 'write'): yield buf elif isinstance(buf, str): check_parent_directory(str(buf)) with open(buf, 'w', encoding=encoding, newline='') as f: yield f else: raise TypeError('buf is not a file name and it has no write method') def format_array(values: ArrayLike, formatter: Callable | None, float_format: FloatFormatType | None=None, na_rep: str='NaN', digits: int | None=None, space: str | int | None=None, justify: str='right', decimal: str='.', leading_space: bool | None=True, quoting: int | None=None, fallback_formatter: Callable | None=None) -> list[str]: fmt_klass: type[_GenericArrayFormatter] if lib.is_np_dtype(values.dtype, 'M'): fmt_klass = _Datetime64Formatter values = cast(DatetimeArray, values) elif isinstance(values.dtype, DatetimeTZDtype): fmt_klass = _Datetime64TZFormatter values = cast(DatetimeArray, values) elif lib.is_np_dtype(values.dtype, 'm'): fmt_klass = _Timedelta64Formatter values = cast(TimedeltaArray, values) elif isinstance(values.dtype, ExtensionDtype): fmt_klass = _ExtensionArrayFormatter elif lib.is_np_dtype(values.dtype, 'fc'): fmt_klass = FloatArrayFormatter elif lib.is_np_dtype(values.dtype, 'iu'): fmt_klass = _IntArrayFormatter else: fmt_klass = _GenericArrayFormatter if space is None: space = 12 if float_format is None: float_format = get_option('display.float_format') if digits is None: digits = get_option('display.precision') fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep, float_format=float_format, formatter=formatter, space=space, justify=justify, decimal=decimal, leading_space=leading_space, quoting=quoting, fallback_formatter=fallback_formatter) return fmt_obj.get_result() class _GenericArrayFormatter: def __init__(self, values: ArrayLike, digits: int=7, formatter: Callable | None=None, na_rep: str='NaN', space: str | int=12, float_format: FloatFormatType | None=None, justify: str='right', decimal: str='.', quoting: int | None=None, fixed_width: bool=True, leading_space: bool | None=True, fallback_formatter: Callable | None=None) -> None: self.values = values self.digits = digits self.na_rep = na_rep self.space = space self.formatter = formatter self.float_format = float_format self.justify = justify self.decimal = decimal self.quoting = quoting self.fixed_width = fixed_width self.leading_space = leading_space self.fallback_formatter = fallback_formatter def get_result(self) -> list[str]: fmt_values = self._format_strings() return _make_fixed_width(fmt_values, self.justify) def _format_strings(self) -> list[str]: if self.float_format is None: float_format = get_option('display.float_format') if float_format is None: precision = get_option('display.precision') float_format = lambda x: _trim_zeros_single_float(f'{x: .{precision:d}f}') else: float_format = self.float_format if self.formatter is not None: formatter = self.formatter elif self.fallback_formatter is not None: formatter = self.fallback_formatter else: quote_strings = self.quoting is not None and self.quoting != QUOTE_NONE formatter = partial(printing.pprint_thing, escape_chars=('\t', '\r', '\n'), quote_strings=quote_strings) def _format(x): if self.na_rep is not None and is_scalar(x) and isna(x): if x is None: return 'None' elif x is NA: return str(NA) elif x is NaT or isinstance(x, (np.datetime64, np.timedelta64)): return 'NaT' return self.na_rep elif isinstance(x, PandasObject): return str(x) elif isinstance(x, StringDtype): return repr(x) else: return str(formatter(x)) vals = self.values if not isinstance(vals, np.ndarray): raise TypeError('ExtensionArray formatting should use _ExtensionArrayFormatter') inferred = lib.map_infer(vals, is_float) is_float_type = inferred & np.all(notna(vals), axis=tuple(range(1, len(vals.shape)))) leading_space = self.leading_space if leading_space is None: leading_space = is_float_type.any() fmt_values = [] for (i, v) in enumerate(vals): if (not is_float_type[i] or self.formatter is not None) and leading_space: fmt_values.append(f' {_format(v)}') elif is_float_type[i]: fmt_values.append(float_format(v)) else: if leading_space is False: tpl = '{v}' else: tpl = ' {v}' fmt_values.append(tpl.format(v=_format(v))) return fmt_values class FloatArrayFormatter(_GenericArrayFormatter): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) if self.float_format is not None and self.formatter is None: self.fixed_width = False if callable(self.float_format): self.formatter = self.float_format self.float_format = None def _value_formatter(self, float_format: FloatFormatType | None=None, threshold: float | None=None) -> Callable: if float_format is None: float_format = self.float_format if float_format: def base_formatter(v): assert float_format is not None return float_format(value=v) if notna(v) else self.na_rep else: def base_formatter(v): return str(v) if notna(v) else self.na_rep if self.decimal != '.': def decimal_formatter(v): return base_formatter(v).replace('.', self.decimal, 1) else: decimal_formatter = base_formatter if threshold is None: return decimal_formatter def formatter(value): if notna(value): if abs(value) > threshold: return decimal_formatter(value) else: return decimal_formatter(0.0) else: return self.na_rep return formatter def get_result_as_array(self) -> np.ndarray: def format_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str) -> np.ndarray: mask = isna(values) formatted = np.array([formatter(val) if not m else na_rep for (val, m) in zip(values.ravel(), mask.ravel())]).reshape(values.shape) return formatted def format_complex_with_na_rep(values: ArrayLike, formatter: Callable, na_rep: str) -> np.ndarray: real_values = np.real(values).ravel() imag_values = np.imag(values).ravel() (real_mask, imag_mask) = (isna(real_values), isna(imag_values)) formatted_lst = [] for (val, real_val, imag_val, re_isna, im_isna) in zip(values.ravel(), real_values, imag_values, real_mask, imag_mask): if not re_isna and (not im_isna): formatted_lst.append(formatter(val)) elif not re_isna: formatted_lst.append(f'{formatter(real_val)}+{na_rep}j') elif not im_isna: imag_formatted = formatter(imag_val).strip() if imag_formatted.startswith('-'): formatted_lst.append(f'{na_rep}{imag_formatted}j') else: formatted_lst.append(f'{na_rep}+{imag_formatted}j') else: formatted_lst.append(f'{na_rep}+{na_rep}j') return np.array(formatted_lst).reshape(values.shape) if self.formatter is not None: return format_with_na_rep(self.values, self.formatter, self.na_rep) if self.fixed_width: threshold = get_option('display.chop_threshold') else: threshold = None def format_values_with(float_format): formatter = self._value_formatter(float_format, threshold) na_rep = ' ' + self.na_rep if self.justify == 'left' else self.na_rep values = self.values is_complex = is_complex_dtype(values) if is_complex: values = format_complex_with_na_rep(values, formatter, na_rep) else: values = format_with_na_rep(values, formatter, na_rep) if self.fixed_width: if is_complex: result = _trim_zeros_complex(values, self.decimal) else: result = _trim_zeros_float(values, self.decimal) return np.asarray(result, dtype='object') return values float_format: FloatFormatType | None if self.float_format is None: if self.fixed_width: if self.leading_space is True: fmt_str = '{value: .{digits:d}f}' else: fmt_str = '{value:.{digits:d}f}' float_format = partial(fmt_str.format, digits=self.digits) else: float_format = self.float_format else: float_format = lambda value: self.float_format % value formatted_values = format_values_with(float_format) if not self.fixed_width: return formatted_values if len(formatted_values) > 0: maxlen = max((len(x) for x in formatted_values)) too_long = maxlen > self.digits + 6 else: too_long = False abs_vals = np.abs(self.values) has_large_values = (abs_vals > 1000000.0).any() has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any() if has_small_values or (too_long and has_large_values): if self.leading_space is True: fmt_str = '{value: .{digits:d}e}' else: fmt_str = '{value:.{digits:d}e}' float_format = partial(fmt_str.format, digits=self.digits) formatted_values = format_values_with(float_format) return formatted_values def _format_strings(self) -> list[str]: return list(self.get_result_as_array()) class _IntArrayFormatter(_GenericArrayFormatter): def _format_strings(self) -> list[str]: if self.leading_space is False: formatter_str = lambda x: f'{x:d}'.format(x=x) else: formatter_str = lambda x: f'{x: d}'.format(x=x) formatter = self.formatter or formatter_str fmt_values = [formatter(x) for x in self.values] return fmt_values class _Datetime64Formatter(_GenericArrayFormatter): values: DatetimeArray def __init__(self, values: DatetimeArray, nat_rep: str='NaT', date_format: None=None, **kwargs) -> None: super().__init__(values, **kwargs) self.nat_rep = nat_rep self.date_format = date_format def _format_strings(self) -> list[str]: values = self.values if self.formatter is not None: return [self.formatter(x) for x in values] fmt_values = values._format_native_types(na_rep=self.nat_rep, date_format=self.date_format) return fmt_values.tolist() class _ExtensionArrayFormatter(_GenericArrayFormatter): values: ExtensionArray def _format_strings(self) -> list[str]: values = self.values formatter = self.formatter fallback_formatter = None if formatter is None: fallback_formatter = values._formatter(boxed=True) if isinstance(values, Categorical): array = values._internal_get_values() else: array = np.asarray(values, dtype=object) fmt_values = format_array(array, formatter, float_format=self.float_format, na_rep=self.na_rep, digits=self.digits, space=self.space, justify=self.justify, decimal=self.decimal, leading_space=self.leading_space, quoting=self.quoting, fallback_formatter=fallback_formatter) return fmt_values def format_percentiles(percentiles: np.ndarray | Sequence[float]) -> list[str]: percentiles = np.asarray(percentiles) if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) or (not np.all(percentiles <= 1)): raise ValueError('percentiles should all be in the interval [0,1]') percentiles = 100 * percentiles prec = get_precision(percentiles) percentiles_round_type = percentiles.round(prec).astype(int) int_idx = np.isclose(percentiles_round_type, percentiles) if np.all(int_idx): out = percentiles_round_type.astype(str) return [i + '%' for i in out] unique_pcts = np.unique(percentiles) prec = get_precision(unique_pcts) out = np.empty_like(percentiles, dtype=object) out[int_idx] = percentiles[int_idx].round().astype(int).astype(str) out[~int_idx] = percentiles[~int_idx].round(prec).astype(str) return [i + '%' for i in out] def get_precision(array: np.ndarray | Sequence[float]) -> int: to_begin = array[0] if array[0] > 0 else None to_end = 100 - array[-1] if array[-1] < 100 else None diff = np.ediff1d(array, to_begin=to_begin, to_end=to_end) diff = abs(diff) prec = -np.floor(np.log10(np.min(diff))).astype(int) prec = max(1, prec) return prec def _format_datetime64(x: NaTType | Timestamp, nat_rep: str='NaT') -> str: if x is NaT: return nat_rep return str(x) def _format_datetime64_dateonly(x: NaTType | Timestamp, nat_rep: str='NaT', date_format: str | None=None) -> str: if isinstance(x, NaTType): return nat_rep if date_format: return x.strftime(date_format) else: return x._date_repr def get_format_datetime64(is_dates_only: bool, nat_rep: str='NaT', date_format: str | None=None) -> Callable: if is_dates_only: return lambda x: _format_datetime64_dateonly(x, nat_rep=nat_rep, date_format=date_format) else: return lambda x: _format_datetime64(x, nat_rep=nat_rep) class _Datetime64TZFormatter(_Datetime64Formatter): values: DatetimeArray def _format_strings(self) -> list[str]: ido = self.values._is_dates_only values = self.values.astype(object) formatter = self.formatter or get_format_datetime64(ido, date_format=self.date_format) fmt_values = [formatter(x) for x in values] return fmt_values class _Timedelta64Formatter(_GenericArrayFormatter): values: TimedeltaArray def __init__(self, values: TimedeltaArray, nat_rep: str='NaT', **kwargs) -> None: super().__init__(values, **kwargs) self.nat_rep = nat_rep def _format_strings(self) -> list[str]: formatter = self.formatter or get_format_timedelta64(self.values, nat_rep=self.nat_rep, box=False) return [formatter(x) for x in self.values] def get_format_timedelta64(values: TimedeltaArray, nat_rep: str | float='NaT', box: bool=False) -> Callable: even_days = values._is_dates_only if even_days: format = None else: format = 'long' def _formatter(x): if x is None or (is_scalar(x) and isna(x)): return nat_rep if not isinstance(x, Timedelta): x = Timedelta(x) result = x._repr_base(format=format) if box: result = f"'{result}'" return result return _formatter def _make_fixed_width(strings: list[str], justify: str='right', minimum: int | None=None, adj: printing._TextAdjustment | None=None) -> list[str]: if len(strings) == 0 or justify == 'all': return strings if adj is None: adjustment = printing.get_adjustment() else: adjustment = adj max_len = max((adjustment.len(x) for x in strings)) if minimum is not None: max_len = max(minimum, max_len) conf_max = get_option('display.max_colwidth') if conf_max is not None and max_len > conf_max: max_len = conf_max def just(x: str) -> str: if conf_max is not None: if (conf_max > 3) & (adjustment.len(x) > max_len): x = x[:max_len - 3] + '...' return x strings = [just(x) for x in strings] result = adjustment.justify(strings, max_len, mode=justify) return result def _trim_zeros_complex(str_complexes: ArrayLike, decimal: str='.') -> list[str]: (real_part, imag_part) = ([], []) for x in str_complexes: trimmed = re.split('([j+-])', x) real_part.append(''.join(trimmed[:-4])) imag_part.append(''.join(trimmed[-4:-2])) n = len(str_complexes) padded_parts = _trim_zeros_float(real_part + imag_part, decimal) if len(padded_parts) == 0: return [] padded_length = max((len(part) for part in padded_parts)) - 1 padded = [real_pt + imag_pt[0] + f'{imag_pt[1:]:>{padded_length}}' + 'j' for (real_pt, imag_pt) in zip(padded_parts[:n], padded_parts[n:])] return padded def _trim_zeros_single_float(str_float: str) -> str: str_float = str_float.rstrip('0') if str_float.endswith('.'): str_float += '0' return str_float def _trim_zeros_float(str_floats: ArrayLike | list[str], decimal: str='.') -> list[str]: trimmed = str_floats number_regex = re.compile(f'^\\s*[\\+-]?[0-9]+\\{decimal}[0-9]*$') def is_number_with_decimal(x) -> bool: return re.match(number_regex, x) is not None def should_trim(values: ArrayLike | list[str]) -> bool: numbers = [x for x in values if is_number_with_decimal(x)] return len(numbers) > 0 and all((x.endswith('0') for x in numbers)) while should_trim(trimmed): trimmed = [x[:-1] if is_number_with_decimal(x) else x for x in trimmed] result = [x + '0' if is_number_with_decimal(x) and x.endswith(decimal) else x for x in trimmed] return result def _has_names(index: Index) -> bool: if isinstance(index, MultiIndex): return com.any_not_none(*index.names) else: return index.name is not None class EngFormatter: ENG_PREFIXES = {-24: 'y', -21: 'z', -18: 'a', -15: 'f', -12: 'p', -9: 'n', -6: 'u', -3: 'm', 0: '', 3: 'k', 6: 'M', 9: 'G', 12: 'T', 15: 'P', 18: 'E', 21: 'Z', 24: 'Y'} def __init__(self, accuracy: int | None=None, use_eng_prefix: bool=False) -> None: self.accuracy = accuracy self.use_eng_prefix = use_eng_prefix def __call__(self, num: float) -> str: dnum = Decimal(str(num)) if Decimal.is_nan(dnum): return 'NaN' if Decimal.is_infinite(dnum): return 'inf' sign = 1 if dnum < 0: sign = -1 dnum = -dnum if dnum != 0: pow10 = Decimal(int(math.floor(dnum.log10() / 3) * 3)) else: pow10 = Decimal(0) pow10 = pow10.min(max(self.ENG_PREFIXES.keys())) pow10 = pow10.max(min(self.ENG_PREFIXES.keys())) int_pow10 = int(pow10) if self.use_eng_prefix: prefix = self.ENG_PREFIXES[int_pow10] elif int_pow10 < 0: prefix = f'E-{-int_pow10:02d}' else: prefix = f'E+{int_pow10:02d}' mant = sign * dnum / 10 ** pow10 if self.accuracy is None: format_str = '{mant: g}{prefix}' else: format_str = f'{{mant: .{self.accuracy:d}f}}{{prefix}}' formatted = format_str.format(mant=mant, prefix=prefix) return formatted def set_eng_float_format(accuracy: int=3, use_eng_prefix: bool=False) -> None: set_option('display.float_format', EngFormatter(accuracy, use_eng_prefix)) def get_level_lengths(levels: Any, sentinel: bool | object | str='') -> list[dict[int, int]]: if len(levels) == 0: return [] control = [True] * len(levels[0]) result = [] for level in levels: last_index = 0 lengths = {} for (i, key) in enumerate(level): if control[i] and key == sentinel: pass else: control[i] = False lengths[last_index] = i - last_index last_index = i lengths[last_index] = len(level) - last_index result.append(lengths) return result def buffer_put_lines(buf: WriteBuffer[str], lines: list[str]) -> None: if any((isinstance(x, str) for x in lines)): lines = [str(x) for x in lines] buf.write('\n'.join(lines)) # File: pandas-main/pandas/io/formats/html.py """""" from __future__ import annotations from textwrap import dedent from typing import TYPE_CHECKING, Any, Final, cast from pandas._config import get_option from pandas._libs import lib from pandas import MultiIndex, option_context from pandas.io.common import is_url from pandas.io.formats.format import DataFrameFormatter, get_level_lengths from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from collections.abc import Hashable, Iterable, Mapping class HTMLFormatter: indent_delta: Final = 2 def __init__(self, formatter: DataFrameFormatter, classes: str | list[str] | tuple[str, ...] | None=None, border: int | bool | None=None, table_id: str | None=None, render_links: bool=False) -> None: self.fmt = formatter self.classes = classes self.frame = self.fmt.frame self.columns = self.fmt.tr_frame.columns self.elements: list[str] = [] self.bold_rows = self.fmt.bold_rows self.escape = self.fmt.escape self.show_dimensions = self.fmt.show_dimensions if border is None or border is True: border = cast(int, get_option('display.html.border')) elif not border: border = None self.border = border self.table_id = table_id self.render_links = render_links self.col_space = {} is_multi_index = isinstance(self.columns, MultiIndex) for (column, value) in self.fmt.col_space.items(): col_space_value = f'{value}px' if isinstance(value, int) else value self.col_space[column] = col_space_value if is_multi_index and isinstance(column, tuple): for column_index in column: self.col_space[str(column_index)] = col_space_value def to_string(self) -> str: lines = self.render() if any((isinstance(x, str) for x in lines)): lines = [str(x) for x in lines] return '\n'.join(lines) def render(self) -> list[str]: self._write_table() if self.should_show_dimensions: by = chr(215) self.write(f'

{len(self.frame)} rows {by} {len(self.frame.columns)} columns

') return self.elements @property def should_show_dimensions(self) -> bool: return self.fmt.should_show_dimensions @property def show_row_idx_names(self) -> bool: return self.fmt.show_row_idx_names @property def show_col_idx_names(self) -> bool: return self.fmt.show_col_idx_names @property def row_levels(self) -> int: if self.fmt.index: return self.frame.index.nlevels elif self.show_col_idx_names: return 1 return 0 def _get_columns_formatted_values(self) -> Iterable: return self.columns @property def is_truncated(self) -> bool: return self.fmt.is_truncated @property def ncols(self) -> int: return len(self.fmt.tr_frame.columns) def write(self, s: Any, indent: int=0) -> None: rs = pprint_thing(s) self.elements.append(' ' * indent + rs) def write_th(self, s: Any, header: bool=False, indent: int=0, tags: str | None=None) -> None: col_space = self.col_space.get(s, None) if header and col_space is not None: tags = tags or '' tags += f'style="min-width: {col_space};"' self._write_cell(s, kind='th', indent=indent, tags=tags) def write_td(self, s: Any, indent: int=0, tags: str | None=None) -> None: self._write_cell(s, kind='td', indent=indent, tags=tags) def _write_cell(self, s: Any, kind: str='td', indent: int=0, tags: str | None=None) -> None: if tags is not None: start_tag = f'<{kind} {tags}>' else: start_tag = f'<{kind}>' if self.escape: esc = {'&': '&', '<': '<', '>': '>'} else: esc = {} rs = pprint_thing(s, escape_chars=esc).strip() if self.render_links and is_url(rs): rs_unescaped = pprint_thing(s, escape_chars={}).strip() start_tag += f'' end_a = '' else: end_a = '' self.write(f'{start_tag}{rs}{end_a}', indent) def write_tr(self, line: Iterable, indent: int=0, indent_delta: int=0, header: bool=False, align: str | None=None, tags: dict[int, str] | None=None, nindex_levels: int=0) -> None: if tags is None: tags = {} if align is None: self.write('
', indent) def _write_col_header(self, indent: int) -> None: row: list[Hashable] is_truncated_horizontally = self.fmt.is_truncated_horizontally if isinstance(self.columns, MultiIndex): template = 'colspan="{span:d}" halign="left"' sentinel: lib.NoDefault | bool if self.fmt.sparsify: sentinel = lib.no_default else: sentinel = False levels = self.columns._format_multi(sparsify=sentinel, include_names=False) level_lengths = get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 for (lnum, (records, values)) in enumerate(zip(level_lengths, levels)): if is_truncated_horizontally: ins_col = self.fmt.tr_col_num if self.fmt.sparsify: recs_new = {} for (tag, span) in list(records.items()): if tag >= ins_col: recs_new[tag + 1] = span elif tag + span > ins_col: recs_new[tag] = span + 1 if lnum == inner_lvl: values = values[:ins_col] + ('...',) + values[ins_col:] else: values = values[:ins_col] + (values[ins_col - 1],) + values[ins_col:] else: recs_new[tag] = span if tag + span == ins_col: recs_new[ins_col] = 1 values = values[:ins_col] + ('...',) + values[ins_col:] records = recs_new inner_lvl = len(level_lengths) - 1 if lnum == inner_lvl: records[ins_col] = 1 else: recs_new = {} for (tag, span) in list(records.items()): if tag >= ins_col: recs_new[tag + 1] = span else: recs_new[tag] = span recs_new[ins_col] = 1 records = recs_new values = values[:ins_col] + ['...'] + values[ins_col:] row = [''] * (self.row_levels - 1) if self.fmt.index or self.show_col_idx_names: if self.fmt.show_index_names: name = self.columns.names[lnum] row.append(pprint_thing(name or '')) else: row.append('') tags = {} j = len(row) for (i, v) in enumerate(values): if i in records: if records[i] > 1: tags[j] = template.format(span=records[i]) else: continue j += 1 row.append(v) self.write_tr(row, indent, self.indent_delta, tags=tags, header=True) else: row = [''] * (self.row_levels - 1) if self.fmt.index or self.show_col_idx_names: if self.fmt.show_index_names: row.append(self.columns.name or '') else: row.append('') row.extend(self._get_columns_formatted_values()) align = self.fmt.justify if is_truncated_horizontally: ins_col = self.row_levels + self.fmt.tr_col_num row.insert(ins_col, '...') self.write_tr(row, indent, self.indent_delta, header=True, align=align) def _write_row_header(self, indent: int) -> None: is_truncated_horizontally = self.fmt.is_truncated_horizontally row = [x if x is not None else '' for x in self.frame.index.names] + [''] * (self.ncols + (1 if is_truncated_horizontally else 0)) self.write_tr(row, indent, self.indent_delta, header=True) def _write_header(self, indent: int) -> None: self.write('', indent) if self.fmt.header: self._write_col_header(indent + self.indent_delta) if self.show_row_idx_names: self._write_row_header(indent + self.indent_delta) self.write('', indent) def _get_formatted_values(self) -> dict[int, list[str]]: with option_context('display.max_colwidth', None): fmt_values = {i: self.fmt.format_col(i) for i in range(self.ncols)} return fmt_values def _write_body(self, indent: int) -> None: self.write('', indent) fmt_values = self._get_formatted_values() if self.fmt.index and isinstance(self.frame.index, MultiIndex): self._write_hierarchical_rows(fmt_values, indent + self.indent_delta) else: self._write_regular_rows(fmt_values, indent + self.indent_delta) self.write('', indent) def _write_regular_rows(self, fmt_values: Mapping[int, list[str]], indent: int) -> None: is_truncated_horizontally = self.fmt.is_truncated_horizontally is_truncated_vertically = self.fmt.is_truncated_vertically nrows = len(self.fmt.tr_frame) if self.fmt.index: fmt = self.fmt._get_formatter('__index__') if fmt is not None: index_values = self.fmt.tr_frame.index.map(fmt) else: index_values = self.fmt.tr_frame.index._format_flat(include_name=False) row: list[str] = [] for i in range(nrows): if is_truncated_vertically and i == self.fmt.tr_row_num: str_sep_row = ['...'] * len(row) self.write_tr(str_sep_row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels) row = [] if self.fmt.index: row.append(index_values[i]) elif self.show_col_idx_names: row.append('') row.extend((fmt_values[j][i] for j in range(self.ncols))) if is_truncated_horizontally: dot_col_ix = self.fmt.tr_col_num + self.row_levels row.insert(dot_col_ix, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels) def _write_hierarchical_rows(self, fmt_values: Mapping[int, list[str]], indent: int) -> None: template = 'rowspan="{span}" valign="top"' is_truncated_horizontally = self.fmt.is_truncated_horizontally is_truncated_vertically = self.fmt.is_truncated_vertically frame = self.fmt.tr_frame nrows = len(frame) assert isinstance(frame.index, MultiIndex) idx_values = frame.index._format_multi(sparsify=False, include_names=False) idx_values = list(zip(*idx_values)) if self.fmt.sparsify: sentinel = lib.no_default levels = frame.index._format_multi(sparsify=sentinel, include_names=False) level_lengths = get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 if is_truncated_vertically: ins_row = self.fmt.tr_row_num inserted = False for (lnum, records) in enumerate(level_lengths): rec_new = {} for (tag, span) in list(records.items()): if tag >= ins_row: rec_new[tag + 1] = span elif tag + span > ins_row: rec_new[tag] = span + 1 if not inserted: dot_row = list(idx_values[ins_row - 1]) dot_row[-1] = '...' idx_values.insert(ins_row, tuple(dot_row)) inserted = True else: dot_row = list(idx_values[ins_row]) dot_row[inner_lvl - lnum] = '...' idx_values[ins_row] = tuple(dot_row) else: rec_new[tag] = span if tag + span == ins_row: rec_new[ins_row] = 1 if lnum == 0: idx_values.insert(ins_row, tuple(['...'] * len(level_lengths))) elif inserted: dot_row = list(idx_values[ins_row]) dot_row[inner_lvl - lnum] = '...' idx_values[ins_row] = tuple(dot_row) level_lengths[lnum] = rec_new level_lengths[inner_lvl][ins_row] = 1 for ix_col in fmt_values: fmt_values[ix_col].insert(ins_row, '...') nrows += 1 for i in range(nrows): row = [] tags = {} sparse_offset = 0 j = 0 for (records, v) in zip(level_lengths, idx_values[i]): if i in records: if records[i] > 1: tags[j] = template.format(span=records[i]) else: sparse_offset += 1 continue j += 1 row.append(v) row.extend((fmt_values[j][i] for j in range(self.ncols))) if is_truncated_horizontally: row.insert(self.row_levels - sparse_offset + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=tags, nindex_levels=len(levels) - sparse_offset) else: row = [] for i in range(len(frame)): if is_truncated_vertically and i == self.fmt.tr_row_num: str_sep_row = ['...'] * len(row) self.write_tr(str_sep_row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels) idx_values = list(zip(*frame.index._format_multi(sparsify=False, include_names=False))) row = [] row.extend(idx_values[i]) row.extend((fmt_values[j][i] for j in range(self.ncols))) if is_truncated_horizontally: row.insert(self.row_levels + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=frame.index.nlevels) class NotebookFormatter(HTMLFormatter): def _get_formatted_values(self) -> dict[int, list[str]]: return {i: self.fmt.format_col(i) for i in range(self.ncols)} def _get_columns_formatted_values(self) -> list[str]: return self.columns._format_flat(include_name=False) def write_style(self) -> None: template_first = ' ' template_select = ' .dataframe %s {\n %s: %s;\n }' element_props = [('tbody tr th:only-of-type', 'vertical-align', 'middle'), ('tbody tr th', 'vertical-align', 'top')] if isinstance(self.columns, MultiIndex): element_props.append(('thead tr th', 'text-align', 'left')) if self.show_row_idx_names: element_props.append(('thead tr:last-of-type th', 'text-align', 'right')) else: element_props.append(('thead th', 'text-align', 'right')) template_mid = '\n\n'.join((template_select % t for t in element_props)) template = dedent(f'{template_first}\n{template_mid}\n{template_last}') self.write(template) def render(self) -> list[str]: self.write('
') self.write_style() super().render() self.write('
') return self.elements # File: pandas-main/pandas/io/formats/info.py from __future__ import annotations from abc import ABC, abstractmethod import sys from textwrap import dedent from typing import TYPE_CHECKING from pandas._config import get_option from pandas.io.formats import format as fmt from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from collections.abc import Iterable, Iterator, Mapping, Sequence from pandas._typing import Dtype, WriteBuffer from pandas import DataFrame, Index, Series frame_max_cols_sub = dedent(' max_cols : int, optional\n When to switch from the verbose to the truncated output. If the\n DataFrame has more than `max_cols` columns, the truncated output\n is used. By default, the setting in\n ``pandas.options.display.max_info_columns`` is used.') show_counts_sub = dedent(' show_counts : bool, optional\n Whether to show the non-null counts. By default, this is shown\n only if the DataFrame is smaller than\n ``pandas.options.display.max_info_rows`` and\n ``pandas.options.display.max_info_columns``. A value of True always\n shows the counts, and False never shows the counts.') frame_examples_sub = dedent(' >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = [\'alpha\', \'beta\', \'gamma\', \'delta\', \'epsilon\']\n >>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]\n >>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,\n ... "float_col": float_values})\n >>> df\n int_col text_col float_col\n 0 1 alpha 0.00\n 1 2 beta 0.25\n 2 3 gamma 0.50\n 3 4 delta 0.75\n 4 5 epsilon 1.00\n\n Prints information of all columns:\n\n >>> df.info(verbose=True)\n \n RangeIndex: 5 entries, 0 to 4\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 int_col 5 non-null int64\n 1 text_col 5 non-null object\n 2 float_col 5 non-null float64\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Prints a summary of columns count and its dtypes but not per column\n information:\n\n >>> df.info(verbose=False)\n \n RangeIndex: 5 entries, 0 to 4\n Columns: 3 entries, int_col to float_col\n dtypes: float64(1), int64(1), object(1)\n memory usage: 248.0+ bytes\n\n Pipe output of DataFrame.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> df.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open("df_info.txt", "w",\n ... encoding="utf-8") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big DataFrames and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice([\'a\', \'b\', \'c\'], 10 ** 6)\n >>> df = pd.DataFrame({\n ... \'column_1\': np.random.choice([\'a\', \'b\', \'c\'], 10 ** 6),\n ... \'column_2\': np.random.choice([\'a\', \'b\', \'c\'], 10 ** 6),\n ... \'column_3\': np.random.choice([\'a\', \'b\', \'c\'], 10 ** 6)\n ... })\n >>> df.info()\n \n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 22.9+ MB\n\n >>> df.info(memory_usage=\'deep\')\n \n RangeIndex: 1000000 entries, 0 to 999999\n Data columns (total 3 columns):\n # Column Non-Null Count Dtype\n --- ------ -------------- -----\n 0 column_1 1000000 non-null object\n 1 column_2 1000000 non-null object\n 2 column_3 1000000 non-null object\n dtypes: object(3)\n memory usage: 165.9 MB') frame_see_also_sub = dedent(' DataFrame.describe: Generate descriptive statistics of DataFrame\n columns.\n DataFrame.memory_usage: Memory usage of DataFrame columns.') frame_sub_kwargs = {'klass': 'DataFrame', 'type_sub': ' and columns', 'max_cols_sub': frame_max_cols_sub, 'show_counts_sub': show_counts_sub, 'examples_sub': frame_examples_sub, 'see_also_sub': frame_see_also_sub, 'version_added_sub': ''} series_examples_sub = dedent(' >>> int_values = [1, 2, 3, 4, 5]\n >>> text_values = [\'alpha\', \'beta\', \'gamma\', \'delta\', \'epsilon\']\n >>> s = pd.Series(text_values, index=int_values)\n >>> s.info()\n \n Index: 5 entries, 1 to 5\n Series name: None\n Non-Null Count Dtype\n -------------- -----\n 5 non-null object\n dtypes: object(1)\n memory usage: 80.0+ bytes\n\n Prints a summary excluding information about its values:\n\n >>> s.info(verbose=False)\n \n Index: 5 entries, 1 to 5\n dtypes: object(1)\n memory usage: 80.0+ bytes\n\n Pipe output of Series.info to buffer instead of sys.stdout, get\n buffer content and writes to a text file:\n\n >>> import io\n >>> buffer = io.StringIO()\n >>> s.info(buf=buffer)\n >>> s = buffer.getvalue()\n >>> with open("df_info.txt", "w",\n ... encoding="utf-8") as f: # doctest: +SKIP\n ... f.write(s)\n 260\n\n The `memory_usage` parameter allows deep introspection mode, specially\n useful for big Series and fine-tune memory optimization:\n\n >>> random_strings_array = np.random.choice([\'a\', \'b\', \'c\'], 10 ** 6)\n >>> s = pd.Series(np.random.choice([\'a\', \'b\', \'c\'], 10 ** 6))\n >>> s.info()\n \n RangeIndex: 1000000 entries, 0 to 999999\n Series name: None\n Non-Null Count Dtype\n -------------- -----\n 1000000 non-null object\n dtypes: object(1)\n memory usage: 7.6+ MB\n\n >>> s.info(memory_usage=\'deep\')\n \n RangeIndex: 1000000 entries, 0 to 999999\n Series name: None\n Non-Null Count Dtype\n -------------- -----\n 1000000 non-null object\n dtypes: object(1)\n memory usage: 55.3 MB') series_see_also_sub = dedent(' Series.describe: Generate descriptive statistics of Series.\n Series.memory_usage: Memory usage of Series.') series_sub_kwargs = {'klass': 'Series', 'type_sub': '', 'max_cols_sub': '', 'show_counts_sub': show_counts_sub, 'examples_sub': series_examples_sub, 'see_also_sub': series_see_also_sub, 'version_added_sub': '\n.. versionadded:: 1.4.0\n'} INFO_DOCSTRING = dedent('\n Print a concise summary of a {klass}.\n\n This method prints information about a {klass} including\n the index dtype{type_sub}, non-null values and memory usage.\n {version_added_sub}\n Parameters\n ----------\n verbose : bool, optional\n Whether to print the full summary. By default, the setting in\n ``pandas.options.display.max_info_columns`` is followed.\n buf : writable buffer, defaults to sys.stdout\n Where to send the output. By default, the output is printed to\n sys.stdout. Pass a writable buffer if you need to further process\n the output.\n {max_cols_sub}\n memory_usage : bool, str, optional\n Specifies whether total memory usage of the {klass}\n elements (including the index) should be displayed. By default,\n this follows the ``pandas.options.display.memory_usage`` setting.\n\n True always show memory usage. False never shows memory usage.\n A value of \'deep\' is equivalent to "True with deep introspection".\n Memory usage is shown in human-readable units (base-2\n representation). Without deep introspection a memory estimation is\n made based in column dtype and number of rows assuming values\n consume the same memory amount for corresponding dtypes. With deep\n memory introspection, a real memory usage calculation is performed\n at the cost of computational resources. See the\n :ref:`Frequently Asked Questions ` for more\n details.\n {show_counts_sub}\n\n Returns\n -------\n None\n This method prints a summary of a {klass} and returns None.\n\n See Also\n --------\n {see_also_sub}\n\n Examples\n --------\n {examples_sub}\n ') def _put_str(s: str | Dtype, space: int) -> str: return str(s)[:space].ljust(space) def _sizeof_fmt(num: float, size_qualifier: str) -> str: for x in ['bytes', 'KB', 'MB', 'GB', 'TB']: if num < 1024.0: return f'{num:3.1f}{size_qualifier} {x}' num /= 1024.0 return f'{num:3.1f}{size_qualifier} PB' def _initialize_memory_usage(memory_usage: bool | str | None=None) -> bool | str: if memory_usage is None: memory_usage = get_option('display.memory_usage') return memory_usage class _BaseInfo(ABC): data: DataFrame | Series memory_usage: bool | str @property @abstractmethod def dtypes(self) -> Iterable[Dtype]: @property @abstractmethod def dtype_counts(self) -> Mapping[str, int]: @property @abstractmethod def non_null_counts(self) -> list[int] | Series: @property @abstractmethod def memory_usage_bytes(self) -> int: @property def memory_usage_string(self) -> str: return f'{_sizeof_fmt(self.memory_usage_bytes, self.size_qualifier)}\n' @property def size_qualifier(self) -> str: size_qualifier = '' if self.memory_usage: if self.memory_usage != 'deep': if 'object' in self.dtype_counts or self.data.index._is_memory_usage_qualified: size_qualifier = '+' return size_qualifier @abstractmethod def render(self, *, buf: WriteBuffer[str] | None, max_cols: int | None, verbose: bool | None, show_counts: bool | None) -> None: pass class DataFrameInfo(_BaseInfo): def __init__(self, data: DataFrame, memory_usage: bool | str | None=None) -> None: self.data: DataFrame = data self.memory_usage = _initialize_memory_usage(memory_usage) @property def dtype_counts(self) -> Mapping[str, int]: return _get_dataframe_dtype_counts(self.data) @property def dtypes(self) -> Iterable[Dtype]: return self.data.dtypes @property def ids(self) -> Index: return self.data.columns @property def col_count(self) -> int: return len(self.ids) @property def non_null_counts(self) -> Series: return self.data.count() @property def memory_usage_bytes(self) -> int: deep = self.memory_usage == 'deep' return self.data.memory_usage(index=True, deep=deep).sum() def render(self, *, buf: WriteBuffer[str] | None, max_cols: int | None, verbose: bool | None, show_counts: bool | None) -> None: printer = _DataFrameInfoPrinter(info=self, max_cols=max_cols, verbose=verbose, show_counts=show_counts) printer.to_buffer(buf) class SeriesInfo(_BaseInfo): def __init__(self, data: Series, memory_usage: bool | str | None=None) -> None: self.data: Series = data self.memory_usage = _initialize_memory_usage(memory_usage) def render(self, *, buf: WriteBuffer[str] | None=None, max_cols: int | None=None, verbose: bool | None=None, show_counts: bool | None=None) -> None: if max_cols is not None: raise ValueError('Argument `max_cols` can only be passed in DataFrame.info, not Series.info') printer = _SeriesInfoPrinter(info=self, verbose=verbose, show_counts=show_counts) printer.to_buffer(buf) @property def non_null_counts(self) -> list[int]: return [self.data.count()] @property def dtypes(self) -> Iterable[Dtype]: return [self.data.dtypes] @property def dtype_counts(self) -> Mapping[str, int]: from pandas.core.frame import DataFrame return _get_dataframe_dtype_counts(DataFrame(self.data)) @property def memory_usage_bytes(self) -> int: deep = self.memory_usage == 'deep' return self.data.memory_usage(index=True, deep=deep) class _InfoPrinterAbstract: def to_buffer(self, buf: WriteBuffer[str] | None=None) -> None: table_builder = self._create_table_builder() lines = table_builder.get_lines() if buf is None: buf = sys.stdout fmt.buffer_put_lines(buf, lines) @abstractmethod def _create_table_builder(self) -> _TableBuilderAbstract: class _DataFrameInfoPrinter(_InfoPrinterAbstract): def __init__(self, info: DataFrameInfo, max_cols: int | None=None, verbose: bool | None=None, show_counts: bool | None=None) -> None: self.info = info self.data = info.data self.verbose = verbose self.max_cols = self._initialize_max_cols(max_cols) self.show_counts = self._initialize_show_counts(show_counts) @property def max_rows(self) -> int: return get_option('display.max_info_rows') @property def exceeds_info_cols(self) -> bool: return bool(self.col_count > self.max_cols) @property def exceeds_info_rows(self) -> bool: return bool(len(self.data) > self.max_rows) @property def col_count(self) -> int: return self.info.col_count def _initialize_max_cols(self, max_cols: int | None) -> int: if max_cols is None: return get_option('display.max_info_columns') return max_cols def _initialize_show_counts(self, show_counts: bool | None) -> bool: if show_counts is None: return bool(not self.exceeds_info_cols and (not self.exceeds_info_rows)) else: return show_counts def _create_table_builder(self) -> _DataFrameTableBuilder: if self.verbose: return _DataFrameTableBuilderVerbose(info=self.info, with_counts=self.show_counts) elif self.verbose is False: return _DataFrameTableBuilderNonVerbose(info=self.info) elif self.exceeds_info_cols: return _DataFrameTableBuilderNonVerbose(info=self.info) else: return _DataFrameTableBuilderVerbose(info=self.info, with_counts=self.show_counts) class _SeriesInfoPrinter(_InfoPrinterAbstract): def __init__(self, info: SeriesInfo, verbose: bool | None=None, show_counts: bool | None=None) -> None: self.info = info self.data = info.data self.verbose = verbose self.show_counts = self._initialize_show_counts(show_counts) def _create_table_builder(self) -> _SeriesTableBuilder: if self.verbose or self.verbose is None: return _SeriesTableBuilderVerbose(info=self.info, with_counts=self.show_counts) else: return _SeriesTableBuilderNonVerbose(info=self.info) def _initialize_show_counts(self, show_counts: bool | None) -> bool: if show_counts is None: return True else: return show_counts class _TableBuilderAbstract(ABC): _lines: list[str] info: _BaseInfo @abstractmethod def get_lines(self) -> list[str]: @property def data(self) -> DataFrame | Series: return self.info.data @property def dtypes(self) -> Iterable[Dtype]: return self.info.dtypes @property def dtype_counts(self) -> Mapping[str, int]: return self.info.dtype_counts @property def display_memory_usage(self) -> bool: return bool(self.info.memory_usage) @property def memory_usage_string(self) -> str: return self.info.memory_usage_string @property def non_null_counts(self) -> list[int] | Series: return self.info.non_null_counts def add_object_type_line(self) -> None: self._lines.append(str(type(self.data))) def add_index_range_line(self) -> None: self._lines.append(self.data.index._summary()) def add_dtypes_line(self) -> None: collected_dtypes = [f'{key}({val:d})' for (key, val) in sorted(self.dtype_counts.items())] self._lines.append(f"dtypes: {', '.join(collected_dtypes)}") class _DataFrameTableBuilder(_TableBuilderAbstract): def __init__(self, *, info: DataFrameInfo) -> None: self.info: DataFrameInfo = info def get_lines(self) -> list[str]: self._lines = [] if self.col_count == 0: self._fill_empty_info() else: self._fill_non_empty_info() return self._lines def _fill_empty_info(self) -> None: self.add_object_type_line() self.add_index_range_line() self._lines.append(f'Empty {type(self.data).__name__}\n') @abstractmethod def _fill_non_empty_info(self) -> None: @property def data(self) -> DataFrame: return self.info.data @property def ids(self) -> Index: return self.info.ids @property def col_count(self) -> int: return self.info.col_count def add_memory_usage_line(self) -> None: self._lines.append(f'memory usage: {self.memory_usage_string}') class _DataFrameTableBuilderNonVerbose(_DataFrameTableBuilder): def _fill_non_empty_info(self) -> None: self.add_object_type_line() self.add_index_range_line() self.add_columns_summary_line() self.add_dtypes_line() if self.display_memory_usage: self.add_memory_usage_line() def add_columns_summary_line(self) -> None: self._lines.append(self.ids._summary(name='Columns')) class _TableBuilderVerboseMixin(_TableBuilderAbstract): SPACING: str = ' ' * 2 strrows: Sequence[Sequence[str]] gross_column_widths: Sequence[int] with_counts: bool @property @abstractmethod def headers(self) -> Sequence[str]: @property def header_column_widths(self) -> Sequence[int]: return [len(col) for col in self.headers] def _get_gross_column_widths(self) -> Sequence[int]: body_column_widths = self._get_body_column_widths() return [max(*widths) for widths in zip(self.header_column_widths, body_column_widths)] def _get_body_column_widths(self) -> Sequence[int]: strcols: Sequence[Sequence[str]] = list(zip(*self.strrows)) return [max((len(x) for x in col)) for col in strcols] def _gen_rows(self) -> Iterator[Sequence[str]]: if self.with_counts: return self._gen_rows_with_counts() else: return self._gen_rows_without_counts() @abstractmethod def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: @abstractmethod def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: def add_header_line(self) -> None: header_line = self.SPACING.join([_put_str(header, col_width) for (header, col_width) in zip(self.headers, self.gross_column_widths)]) self._lines.append(header_line) def add_separator_line(self) -> None: separator_line = self.SPACING.join([_put_str('-' * header_colwidth, gross_colwidth) for (header_colwidth, gross_colwidth) in zip(self.header_column_widths, self.gross_column_widths)]) self._lines.append(separator_line) def add_body_lines(self) -> None: for row in self.strrows: body_line = self.SPACING.join([_put_str(col, gross_colwidth) for (col, gross_colwidth) in zip(row, self.gross_column_widths)]) self._lines.append(body_line) def _gen_non_null_counts(self) -> Iterator[str]: for count in self.non_null_counts: yield f'{count} non-null' def _gen_dtypes(self) -> Iterator[str]: for dtype in self.dtypes: yield pprint_thing(dtype) class _DataFrameTableBuilderVerbose(_DataFrameTableBuilder, _TableBuilderVerboseMixin): def __init__(self, *, info: DataFrameInfo, with_counts: bool) -> None: self.info = info self.with_counts = with_counts self.strrows: Sequence[Sequence[str]] = list(self._gen_rows()) self.gross_column_widths: Sequence[int] = self._get_gross_column_widths() def _fill_non_empty_info(self) -> None: self.add_object_type_line() self.add_index_range_line() self.add_columns_summary_line() self.add_header_line() self.add_separator_line() self.add_body_lines() self.add_dtypes_line() if self.display_memory_usage: self.add_memory_usage_line() @property def headers(self) -> Sequence[str]: if self.with_counts: return [' # ', 'Column', 'Non-Null Count', 'Dtype'] return [' # ', 'Column', 'Dtype'] def add_columns_summary_line(self) -> None: self._lines.append(f'Data columns (total {self.col_count} columns):') def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: yield from zip(self._gen_line_numbers(), self._gen_columns(), self._gen_dtypes()) def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: yield from zip(self._gen_line_numbers(), self._gen_columns(), self._gen_non_null_counts(), self._gen_dtypes()) def _gen_line_numbers(self) -> Iterator[str]: for (i, _) in enumerate(self.ids): yield f' {i}' def _gen_columns(self) -> Iterator[str]: for col in self.ids: yield pprint_thing(col) class _SeriesTableBuilder(_TableBuilderAbstract): def __init__(self, *, info: SeriesInfo) -> None: self.info: SeriesInfo = info def get_lines(self) -> list[str]: self._lines = [] self._fill_non_empty_info() return self._lines @property def data(self) -> Series: return self.info.data def add_memory_usage_line(self) -> None: self._lines.append(f'memory usage: {self.memory_usage_string}') @abstractmethod def _fill_non_empty_info(self) -> None: class _SeriesTableBuilderNonVerbose(_SeriesTableBuilder): def _fill_non_empty_info(self) -> None: self.add_object_type_line() self.add_index_range_line() self.add_dtypes_line() if self.display_memory_usage: self.add_memory_usage_line() class _SeriesTableBuilderVerbose(_SeriesTableBuilder, _TableBuilderVerboseMixin): def __init__(self, *, info: SeriesInfo, with_counts: bool) -> None: self.info = info self.with_counts = with_counts self.strrows: Sequence[Sequence[str]] = list(self._gen_rows()) self.gross_column_widths: Sequence[int] = self._get_gross_column_widths() def _fill_non_empty_info(self) -> None: self.add_object_type_line() self.add_index_range_line() self.add_series_name_line() self.add_header_line() self.add_separator_line() self.add_body_lines() self.add_dtypes_line() if self.display_memory_usage: self.add_memory_usage_line() def add_series_name_line(self) -> None: self._lines.append(f'Series name: {self.data.name}') @property def headers(self) -> Sequence[str]: if self.with_counts: return ['Non-Null Count', 'Dtype'] return ['Dtype'] def _gen_rows_without_counts(self) -> Iterator[Sequence[str]]: yield from self._gen_dtypes() def _gen_rows_with_counts(self) -> Iterator[Sequence[str]]: yield from zip(self._gen_non_null_counts(), self._gen_dtypes()) def _get_dataframe_dtype_counts(df: DataFrame) -> Mapping[str, int]: return df.dtypes.value_counts().groupby(lambda x: x.name).sum() # File: pandas-main/pandas/io/formats/printing.py """""" from __future__ import annotations from collections.abc import Callable, Iterable, Mapping, Sequence import sys from typing import TYPE_CHECKING, Any, TypeVar, Union from unicodedata import east_asian_width from pandas._config import get_option from pandas.core.dtypes.inference import is_sequence from pandas.io.formats.console import get_console_size if TYPE_CHECKING: from pandas._typing import ListLike EscapeChars = Union[Mapping[str, str], Iterable[str]] _KT = TypeVar('_KT') _VT = TypeVar('_VT') def adjoin(space: int, *lists: list[str], **kwargs: Any) -> str: strlen = kwargs.pop('strlen', len) justfunc = kwargs.pop('justfunc', _adj_justify) newLists = [] lengths = [max(map(strlen, x)) + space for x in lists[:-1]] lengths.append(max(map(len, lists[-1]))) maxLen = max(map(len, lists)) for (i, lst) in enumerate(lists): nl = justfunc(lst, lengths[i], mode='left') nl = [' ' * lengths[i]] * (maxLen - len(lst)) + nl newLists.append(nl) toJoin = zip(*newLists) return '\n'.join((''.join(lines) for lines in toJoin)) def _adj_justify(texts: Iterable[str], max_len: int, mode: str='right') -> list[str]: if mode == 'left': return [x.ljust(max_len) for x in texts] elif mode == 'center': return [x.center(max_len) for x in texts] else: return [x.rjust(max_len) for x in texts] def _pprint_seq(seq: ListLike, _nest_lvl: int=0, max_seq_items: int | None=None, **kwds: Any) -> str: if isinstance(seq, set): fmt = '{{{body}}}' else: fmt = '[{body}]' if hasattr(seq, '__setitem__') else '({body})' if max_seq_items is False: max_items = None else: max_items = max_seq_items or get_option('max_seq_items') or len(seq) s = iter(seq) r = [] max_items_reached = False for (i, item) in enumerate(s): if max_items is not None and i >= max_items: max_items_reached = True break r.append(pprint_thing(item, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds)) body = ', '.join(r) if max_items_reached: body += ', ...' elif isinstance(seq, tuple) and len(seq) == 1: body += ',' return fmt.format(body=body) def _pprint_dict(seq: Mapping, _nest_lvl: int=0, max_seq_items: int | None=None, **kwds: Any) -> str: fmt = '{{{things}}}' pairs = [] pfmt = '{key}: {val}' if max_seq_items is False: nitems = len(seq) else: nitems = max_seq_items or get_option('max_seq_items') or len(seq) for (k, v) in list(seq.items())[:nitems]: pairs.append(pfmt.format(key=pprint_thing(k, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds), val=pprint_thing(v, _nest_lvl + 1, max_seq_items=max_seq_items, **kwds))) if nitems < len(seq): return fmt.format(things=', '.join(pairs) + ', ...') else: return fmt.format(things=', '.join(pairs)) def pprint_thing(thing: object, _nest_lvl: int=0, escape_chars: EscapeChars | None=None, default_escapes: bool=False, quote_strings: bool=False, max_seq_items: int | None=None) -> str: def as_escaped_string(thing: Any, escape_chars: EscapeChars | None=escape_chars) -> str: translate = {'\t': '\\t', '\n': '\\n', '\r': '\\r'} if isinstance(escape_chars, Mapping): if default_escapes: translate.update(escape_chars) else: translate = escape_chars escape_chars = list(escape_chars.keys()) else: escape_chars = escape_chars or () result = str(thing) for c in escape_chars: result = result.replace(c, translate[c]) return result if hasattr(thing, '__next__'): return str(thing) elif isinstance(thing, Mapping) and _nest_lvl < get_option('display.pprint_nest_depth'): result = _pprint_dict(thing, _nest_lvl, quote_strings=True, max_seq_items=max_seq_items) elif is_sequence(thing) and _nest_lvl < get_option('display.pprint_nest_depth'): result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars, quote_strings=quote_strings, max_seq_items=max_seq_items) elif isinstance(thing, str) and quote_strings: result = f"'{as_escaped_string(thing)}'" else: result = as_escaped_string(thing) return result def pprint_thing_encoded(object: object, encoding: str='utf-8', errors: str='replace') -> bytes: value = pprint_thing(object) return value.encode(encoding, errors) def enable_data_resource_formatter(enable: bool) -> None: if 'IPython' not in sys.modules: return from IPython import get_ipython ip = get_ipython() if ip is None: return formatters = ip.display_formatter.formatters mimetype = 'application/vnd.dataresource+json' if enable: if mimetype not in formatters: from IPython.core.formatters import BaseFormatter from traitlets import ObjectName class TableSchemaFormatter(BaseFormatter): print_method = ObjectName('_repr_data_resource_') _return_type = (dict,) formatters[mimetype] = TableSchemaFormatter() formatters[mimetype].enabled = True elif mimetype in formatters: formatters[mimetype].enabled = False def default_pprint(thing: Any, max_seq_items: int | None=None) -> str: return pprint_thing(thing, escape_chars=('\t', '\r', '\n'), quote_strings=True, max_seq_items=max_seq_items) def format_object_summary(obj: ListLike, formatter: Callable, is_justify: bool=True, name: str | None=None, indent_for_name: bool=True, line_break_each_value: bool=False) -> str: (display_width, _) = get_console_size() if display_width is None: display_width = get_option('display.width') or 80 if name is None: name = type(obj).__name__ if indent_for_name: name_len = len(name) space1 = f"\n{' ' * (name_len + 1)}" space2 = f"\n{' ' * (name_len + 2)}" else: space1 = '\n' space2 = '\n ' n = len(obj) if line_break_each_value: sep = ',\n ' + ' ' * len(name) else: sep = ',' max_seq_items = get_option('display.max_seq_items') or n is_truncated = n > max_seq_items adj = get_adjustment() def _extend_line(s: str, line: str, value: str, display_width: int, next_line_prefix: str) -> tuple[str, str]: if adj.len(line.rstrip()) + adj.len(value.rstrip()) >= display_width: s += line.rstrip() line = next_line_prefix line += value return (s, line) def best_len(values: list[str]) -> int: if values: return max((adj.len(x) for x in values)) else: return 0 close = ', ' if n == 0: summary = f'[]{close}' elif n == 1 and (not line_break_each_value): first = formatter(obj[0]) summary = f'[{first}]{close}' elif n == 2 and (not line_break_each_value): first = formatter(obj[0]) last = formatter(obj[-1]) summary = f'[{first}, {last}]{close}' else: if max_seq_items == 1: head = [] tail = [formatter(x) for x in obj[-1:]] elif n > max_seq_items: n = min(max_seq_items // 2, 10) head = [formatter(x) for x in obj[:n]] tail = [formatter(x) for x in obj[-n:]] else: head = [] tail = [formatter(x) for x in obj] if is_justify: if line_break_each_value: (head, tail) = _justify(head, tail) elif is_truncated or not (len(', '.join(head)) < display_width and len(', '.join(tail)) < display_width): max_length = max(best_len(head), best_len(tail)) head = [x.rjust(max_length) for x in head] tail = [x.rjust(max_length) for x in tail] if line_break_each_value: max_space = display_width - len(space2) value = tail[0] max_items = 1 for num_items in reversed(range(1, len(value) + 1)): pprinted_seq = _pprint_seq(value, max_seq_items=num_items) if len(pprinted_seq) < max_space: max_items = num_items break head = [_pprint_seq(x, max_seq_items=max_items) for x in head] tail = [_pprint_seq(x, max_seq_items=max_items) for x in tail] summary = '' line = space2 for head_value in head: word = head_value + sep + ' ' (summary, line) = _extend_line(summary, line, word, display_width, space2) if is_truncated: summary += line.rstrip() + space2 + '...' line = space2 for tail_item in tail[:-1]: word = tail_item + sep + ' ' (summary, line) = _extend_line(summary, line, word, display_width, space2) (summary, line) = _extend_line(summary, line, tail[-1], display_width - 2, space2) summary += line close = ']' + close.rstrip(' ') summary += close if len(summary) > display_width or line_break_each_value: summary += space1 else: summary += ' ' summary = '[' + summary[len(space2):] return summary def _justify(head: list[Sequence[str]], tail: list[Sequence[str]]) -> tuple[list[tuple[str, ...]], list[tuple[str, ...]]]: combined = head + tail max_length = [0] * len(combined[0]) for inner_seq in combined: length = [len(item) for item in inner_seq] max_length = [max(x, y) for (x, y) in zip(max_length, length)] head_tuples = [tuple((x.rjust(max_len) for (x, max_len) in zip(seq, max_length))) for seq in head] tail_tuples = [tuple((x.rjust(max_len) for (x, max_len) in zip(seq, max_length))) for seq in tail] return (head_tuples, tail_tuples) class PrettyDict(dict[_KT, _VT]): def __repr__(self) -> str: return pprint_thing(self) class _TextAdjustment: def __init__(self) -> None: self.encoding = get_option('display.encoding') def len(self, text: str) -> int: return len(text) def justify(self, texts: Any, max_len: int, mode: str='right') -> list[str]: if mode == 'left': return [x.ljust(max_len) for x in texts] elif mode == 'center': return [x.center(max_len) for x in texts] else: return [x.rjust(max_len) for x in texts] def adjoin(self, space: int, *lists: Any, **kwargs: Any) -> str: return adjoin(space, *lists, strlen=self.len, justfunc=self.justify, **kwargs) class _EastAsianTextAdjustment(_TextAdjustment): def __init__(self) -> None: super().__init__() if get_option('display.unicode.ambiguous_as_wide'): self.ambiguous_width = 2 else: self.ambiguous_width = 1 self._EAW_MAP = {'Na': 1, 'N': 1, 'W': 2, 'F': 2, 'H': 1} def len(self, text: str) -> int: if not isinstance(text, str): return len(text) return sum((self._EAW_MAP.get(east_asian_width(c), self.ambiguous_width) for c in text)) def justify(self, texts: Iterable[str], max_len: int, mode: str='right') -> list[str]: def _get_pad(t: str) -> int: return max_len - self.len(t) + len(t) if mode == 'left': return [x.ljust(_get_pad(x)) for x in texts] elif mode == 'center': return [x.center(_get_pad(x)) for x in texts] else: return [x.rjust(_get_pad(x)) for x in texts] def get_adjustment() -> _TextAdjustment: use_east_asian_width = get_option('display.unicode.east_asian_width') if use_east_asian_width: return _EastAsianTextAdjustment() else: return _TextAdjustment() # File: pandas-main/pandas/io/formats/string.py """""" from __future__ import annotations from shutil import get_terminal_size from typing import TYPE_CHECKING import numpy as np from pandas.io.formats.printing import pprint_thing if TYPE_CHECKING: from collections.abc import Iterable from pandas.io.formats.format import DataFrameFormatter class StringFormatter: def __init__(self, fmt: DataFrameFormatter, line_width: int | None=None) -> None: self.fmt = fmt self.adj = fmt.adj self.frame = fmt.frame self.line_width = line_width def to_string(self) -> str: text = self._get_string_representation() if self.fmt.should_show_dimensions: text = f'{text}{self.fmt.dimensions_info}' return text def _get_strcols(self) -> list[list[str]]: strcols = self.fmt.get_strcols() if self.fmt.is_truncated: strcols = self._insert_dot_separators(strcols) return strcols def _get_string_representation(self) -> str: if self.fmt.frame.empty: return self._empty_info_line strcols = self._get_strcols() if self.line_width is None: return self.adj.adjoin(1, *strcols) if self._need_to_wrap_around: return self._join_multiline(strcols) return self._fit_strcols_to_terminal_width(strcols) @property def _empty_info_line(self) -> str: return f'Empty {type(self.frame).__name__}\nColumns: {pprint_thing(self.frame.columns)}\nIndex: {pprint_thing(self.frame.index)}' @property def _need_to_wrap_around(self) -> bool: return bool(self.fmt.max_cols is None or self.fmt.max_cols > 0) def _insert_dot_separators(self, strcols: list[list[str]]) -> list[list[str]]: str_index = self.fmt._get_formatted_index(self.fmt.tr_frame) index_length = len(str_index) if self.fmt.is_truncated_horizontally: strcols = self._insert_dot_separator_horizontal(strcols, index_length) if self.fmt.is_truncated_vertically: strcols = self._insert_dot_separator_vertical(strcols, index_length) return strcols @property def _adjusted_tr_col_num(self) -> int: return self.fmt.tr_col_num + 1 if self.fmt.index else self.fmt.tr_col_num def _insert_dot_separator_horizontal(self, strcols: list[list[str]], index_length: int) -> list[list[str]]: strcols.insert(self._adjusted_tr_col_num, [' ...'] * index_length) return strcols def _insert_dot_separator_vertical(self, strcols: list[list[str]], index_length: int) -> list[list[str]]: n_header_rows = index_length - len(self.fmt.tr_frame) row_num = self.fmt.tr_row_num for (ix, col) in enumerate(strcols): cwidth = self.adj.len(col[row_num]) if self.fmt.is_truncated_horizontally: is_dot_col = ix == self._adjusted_tr_col_num else: is_dot_col = False if cwidth > 3 or is_dot_col: dots = '...' else: dots = '..' if ix == 0 and self.fmt.index: dot_mode = 'left' elif is_dot_col: cwidth = 4 dot_mode = 'right' else: dot_mode = 'right' dot_str = self.adj.justify([dots], cwidth, mode=dot_mode)[0] col.insert(row_num + n_header_rows, dot_str) return strcols def _join_multiline(self, strcols_input: Iterable[list[str]]) -> str: lwidth = self.line_width adjoin_width = 1 strcols = list(strcols_input) if self.fmt.index: idx = strcols.pop(0) lwidth -= np.array([self.adj.len(x) for x in idx]).max() + adjoin_width col_widths = [np.array([self.adj.len(x) for x in col]).max() if len(col) > 0 else 0 for col in strcols] assert lwidth is not None col_bins = _binify(col_widths, lwidth) nbins = len(col_bins) str_lst = [] start = 0 for (i, end) in enumerate(col_bins): row = strcols[start:end] if self.fmt.index: row.insert(0, idx) if nbins > 1: nrows = len(row[-1]) if end <= len(strcols) and i < nbins - 1: row.append([' \\'] + [' '] * (nrows - 1)) else: row.append([' '] * nrows) str_lst.append(self.adj.adjoin(adjoin_width, *row)) start = end return '\n\n'.join(str_lst) def _fit_strcols_to_terminal_width(self, strcols: list[list[str]]) -> str: from pandas import Series lines = self.adj.adjoin(1, *strcols).split('\n') max_len = Series(lines).str.len().max() (width, _) = get_terminal_size() dif = max_len - width adj_dif = dif + 1 col_lens = Series([Series(ele).str.len().max() for ele in strcols]) n_cols = len(col_lens) counter = 0 while adj_dif > 0 and n_cols > 1: counter += 1 mid = round(n_cols / 2) mid_ix = col_lens.index[mid] col_len = col_lens[mid_ix] adj_dif -= col_len + 1 col_lens = col_lens.drop(mid_ix) n_cols = len(col_lens) max_cols_fitted = n_cols - self.fmt.index max_cols_fitted = max(max_cols_fitted, 2) self.fmt.max_cols_fitted = max_cols_fitted self.fmt.truncate() strcols = self._get_strcols() return self.adj.adjoin(1, *strcols) def _binify(cols: list[int], line_width: int) -> list[int]: adjoin_width = 1 bins = [] curr_width = 0 i_last_column = len(cols) - 1 for (i, w) in enumerate(cols): w_adjoined = w + adjoin_width curr_width += w_adjoined if i_last_column == i: wrap = curr_width + 1 > line_width and i > 0 else: wrap = curr_width + 2 > line_width and i > 0 if wrap: bins.append(i) curr_width = w_adjoined bins.append(len(cols)) return bins # File: pandas-main/pandas/io/formats/style.py """""" from __future__ import annotations import copy from functools import partial import operator import textwrap from typing import TYPE_CHECKING, overload import numpy as np from pandas._config import get_option from pandas.compat._optional import import_optional_dependency from pandas.util._decorators import Substitution, doc import pandas as pd from pandas import IndexSlice, RangeIndex import pandas.core.common as com from pandas.core.frame import DataFrame, Series from pandas.core.generic import NDFrame from pandas.core.shared_docs import _shared_docs from pandas.io.formats.format import save_to_buffer jinja2 = import_optional_dependency('jinja2', extra='DataFrame.style requires jinja2.') from pandas.io.formats.style_render import CSSProperties, CSSStyles, ExtFormatter, StylerRenderer, Subset, Tooltips, format_table_styles, maybe_convert_css_to_tuples, non_reducing_slice, refactor_levels if TYPE_CHECKING: from collections.abc import Callable, Hashable, Sequence from matplotlib.colors import Colormap from pandas._typing import Any, Axis, AxisInt, Concatenate, ExcelWriterMergeCells, FilePath, IndexLabel, IntervalClosedType, Level, P, QuantileInterpolation, Scalar, Self, StorageOptions, T, WriteBuffer, WriteExcelBuffer from pandas import ExcelWriter subset_args = 'subset : label, array-like, IndexSlice, optional\n A valid 2d input to `DataFrame.loc[]`, or, in the case of a 1d input\n or single key, to `DataFrame.loc[:, ]` where the columns are\n prioritised, to limit ``data`` to *before* applying the function.' properties_args = 'props : str, default None\n CSS properties to use for highlighting. If ``props`` is given, ``color``\n is not used.' coloring_args = "color : str, default '{default}'\n Background color to use for highlighting." buffering_args = 'buf : str, path object, file-like object, optional\n String, path object (implementing ``os.PathLike[str]``), or file-like\n object implementing a string ``write()`` function. If ``None``, the result is\n returned as a string.' encoding_args = 'encoding : str, optional\n Character encoding setting for file output (and meta tags if available).\n Defaults to ``pandas.options.styler.render.encoding`` value of "utf-8".' class Styler(StylerRenderer): def __init__(self, data: DataFrame | Series, precision: int | None=None, table_styles: CSSStyles | None=None, uuid: str | None=None, caption: str | tuple | list | None=None, table_attributes: str | None=None, cell_ids: bool=True, na_rep: str | None=None, uuid_len: int=5, decimal: str | None=None, thousands: str | None=None, escape: str | None=None, formatter: ExtFormatter | None=None) -> None: super().__init__(data=data, uuid=uuid, uuid_len=uuid_len, table_styles=table_styles, table_attributes=table_attributes, caption=caption, cell_ids=cell_ids, precision=precision) thousands = thousands or get_option('styler.format.thousands') decimal = decimal or get_option('styler.format.decimal') na_rep = na_rep or get_option('styler.format.na_rep') escape = escape or get_option('styler.format.escape') formatter = formatter or get_option('styler.format.formatter') self.format(formatter=formatter, precision=precision, na_rep=na_rep, escape=escape, decimal=decimal, thousands=thousands) def concat(self, other: Styler) -> Styler: if not isinstance(other, Styler): raise TypeError('`other` must be of type `Styler`') if not self.data.columns.equals(other.data.columns): raise ValueError('`other.data` must have same columns as `Styler.data`') if not self.data.index.nlevels == other.data.index.nlevels: raise ValueError('number of index levels must be same in `other` as in `Styler`. See documentation for suggestions.') self.concatenated.append(other) return self def _repr_html_(self) -> str | None: if get_option('styler.render.repr') == 'html': return self.to_html() return None def _repr_latex_(self) -> str | None: if get_option('styler.render.repr') == 'latex': return self.to_latex() return None def set_tooltips(self, ttips: DataFrame, props: CSSProperties | None=None, css_class: str | None=None, as_title_attribute: bool=False) -> Styler: if not self.cell_ids: raise NotImplementedError("Tooltips can only render with 'cell_ids' is True.") if not ttips.index.is_unique or not ttips.columns.is_unique: raise KeyError('Tooltips render only if `ttips` has unique index and columns.') if self.tooltips is None: self.tooltips = Tooltips() self.tooltips.tt_data = ttips if not as_title_attribute: if props: self.tooltips.class_properties = props if css_class: self.tooltips.class_name = css_class else: self.tooltips.as_title_attribute = as_title_attribute return self @doc(NDFrame.to_excel, klass='Styler', storage_options=_shared_docs['storage_options'], storage_options_versionadded='1.5.0', encoding_parameter=textwrap.dedent(' encoding : str or None, default None\n Unused parameter, present for compatibility.\n '), verbose_parameter=textwrap.dedent(' verbose : str, default True\n Optional unused parameter, present for compatibility.\n '), extra_parameters='') def to_excel(self, excel_writer: FilePath | WriteExcelBuffer | ExcelWriter, sheet_name: str='Sheet1', na_rep: str='', float_format: str | None=None, columns: Sequence[Hashable] | None=None, header: Sequence[Hashable] | bool=True, index: bool=True, index_label: IndexLabel | None=None, startrow: int=0, startcol: int=0, engine: str | None=None, merge_cells: ExcelWriterMergeCells=True, encoding: str | None=None, inf_rep: str='inf', verbose: bool=True, freeze_panes: tuple[int, int] | None=None, storage_options: StorageOptions | None=None) -> None: from pandas.io.formats.excel import ExcelFormatter formatter = ExcelFormatter(self, na_rep=na_rep, cols=columns, header=header, float_format=float_format, index=index, index_label=index_label, merge_cells=merge_cells, inf_rep=inf_rep) formatter.write(excel_writer, sheet_name=sheet_name, startrow=startrow, startcol=startcol, freeze_panes=freeze_panes, engine=engine, storage_options=storage_options) @overload def to_latex(self, buf: FilePath | WriteBuffer[str], *, column_format: str | None=..., position: str | None=..., position_float: str | None=..., hrules: bool | None=..., clines: str | None=..., label: str | None=..., caption: str | tuple | None=..., sparse_index: bool | None=..., sparse_columns: bool | None=..., multirow_align: str | None=..., multicol_align: str | None=..., siunitx: bool=..., environment: str | None=..., encoding: str | None=..., convert_css: bool=...) -> None: ... @overload def to_latex(self, buf: None=..., *, column_format: str | None=..., position: str | None=..., position_float: str | None=..., hrules: bool | None=..., clines: str | None=..., label: str | None=..., caption: str | tuple | None=..., sparse_index: bool | None=..., sparse_columns: bool | None=..., multirow_align: str | None=..., multicol_align: str | None=..., siunitx: bool=..., environment: str | None=..., encoding: str | None=..., convert_css: bool=...) -> str: ... def to_latex(self, buf: FilePath | WriteBuffer[str] | None=None, *, column_format: str | None=None, position: str | None=None, position_float: str | None=None, hrules: bool | None=None, clines: str | None=None, label: str | None=None, caption: str | tuple | None=None, sparse_index: bool | None=None, sparse_columns: bool | None=None, multirow_align: str | None=None, multicol_align: str | None=None, siunitx: bool=False, environment: str | None=None, encoding: str | None=None, convert_css: bool=False) -> str | None: obj = self._copy(deepcopy=True) table_selectors = [style['selector'] for style in self.table_styles] if self.table_styles is not None else [] if column_format is not None: obj.set_table_styles([{'selector': 'column_format', 'props': f':{column_format}'}], overwrite=False) elif 'column_format' in table_selectors: pass else: _original_columns = self.data.columns self.data.columns = RangeIndex(stop=len(self.data.columns)) numeric_cols = self.data._get_numeric_data().columns.to_list() self.data.columns = _original_columns column_format = '' for level in range(self.index.nlevels): column_format += '' if self.hide_index_[level] else 'l' for (ci, _) in enumerate(self.data.columns): if ci not in self.hidden_columns: column_format += ('r' if not siunitx else 'S') if ci in numeric_cols else 'l' obj.set_table_styles([{'selector': 'column_format', 'props': f':{column_format}'}], overwrite=False) if position: obj.set_table_styles([{'selector': 'position', 'props': f':{position}'}], overwrite=False) if position_float: if environment == 'longtable': raise ValueError("`position_float` cannot be used in 'longtable' `environment`") if position_float not in ['raggedright', 'raggedleft', 'centering']: raise ValueError(f"`position_float` should be one of 'raggedright', 'raggedleft', 'centering', got: '{position_float}'") obj.set_table_styles([{'selector': 'position_float', 'props': f':{position_float}'}], overwrite=False) hrules = get_option('styler.latex.hrules') if hrules is None else hrules if hrules: obj.set_table_styles([{'selector': 'toprule', 'props': ':toprule'}, {'selector': 'midrule', 'props': ':midrule'}, {'selector': 'bottomrule', 'props': ':bottomrule'}], overwrite=False) if label: obj.set_table_styles([{'selector': 'label', 'props': f":{{{label.replace(':', '§')}}}"}], overwrite=False) if caption: obj.set_caption(caption) if sparse_index is None: sparse_index = get_option('styler.sparse.index') if sparse_columns is None: sparse_columns = get_option('styler.sparse.columns') environment = environment or get_option('styler.latex.environment') multicol_align = multicol_align or get_option('styler.latex.multicol_align') multirow_align = multirow_align or get_option('styler.latex.multirow_align') latex = obj._render_latex(sparse_index=sparse_index, sparse_columns=sparse_columns, multirow_align=multirow_align, multicol_align=multicol_align, environment=environment, convert_css=convert_css, siunitx=siunitx, clines=clines) encoding = encoding or get_option('styler.render.encoding') if isinstance(buf, str) else encoding return save_to_buffer(latex, buf=buf, encoding=encoding) @overload def to_html(self, buf: FilePath | WriteBuffer[str], *, table_uuid: str | None=..., table_attributes: str | None=..., sparse_index: bool | None=..., sparse_columns: bool | None=..., bold_headers: bool=..., caption: str | None=..., max_rows: int | None=..., max_columns: int | None=..., encoding: str | None=..., doctype_html: bool=..., exclude_styles: bool=..., **kwargs) -> None: ... @overload def to_html(self, buf: None=..., *, table_uuid: str | None=..., table_attributes: str | None=..., sparse_index: bool | None=..., sparse_columns: bool | None=..., bold_headers: bool=..., caption: str | None=..., max_rows: int | None=..., max_columns: int | None=..., encoding: str | None=..., doctype_html: bool=..., exclude_styles: bool=..., **kwargs) -> str: ... @Substitution(buf=buffering_args, encoding=encoding_args) def to_html(self, buf: FilePath | WriteBuffer[str] | None=None, *, table_uuid: str | None=None, table_attributes: str | None=None, sparse_index: bool | None=None, sparse_columns: bool | None=None, bold_headers: bool=False, caption: str | None=None, max_rows: int | None=None, max_columns: int | None=None, encoding: str | None=None, doctype_html: bool=False, exclude_styles: bool=False, **kwargs) -> str | None: obj = self._copy(deepcopy=True) if table_uuid: obj.set_uuid(table_uuid) if table_attributes: obj.set_table_attributes(table_attributes) if sparse_index is None: sparse_index = get_option('styler.sparse.index') if sparse_columns is None: sparse_columns = get_option('styler.sparse.columns') if bold_headers: obj.set_table_styles([{'selector': 'th', 'props': 'font-weight: bold;'}], overwrite=False) if caption is not None: obj.set_caption(caption) html = obj._render_html(sparse_index=sparse_index, sparse_columns=sparse_columns, max_rows=max_rows, max_cols=max_columns, exclude_styles=exclude_styles, encoding=encoding or get_option('styler.render.encoding'), doctype_html=doctype_html, **kwargs) return save_to_buffer(html, buf=buf, encoding=encoding if buf is not None else None) @overload def to_string(self, buf: FilePath | WriteBuffer[str], *, encoding: str | None=..., sparse_index: bool | None=..., sparse_columns: bool | None=..., max_rows: int | None=..., max_columns: int | None=..., delimiter: str=...) -> None: ... @overload def to_string(self, buf: None=..., *, encoding: str | None=..., sparse_index: bool | None=..., sparse_columns: bool | None=..., max_rows: int | None=..., max_columns: int | None=..., delimiter: str=...) -> str: ... @Substitution(buf=buffering_args, encoding=encoding_args) def to_string(self, buf: FilePath | WriteBuffer[str] | None=None, *, encoding: str | None=None, sparse_index: bool | None=None, sparse_columns: bool | None=None, max_rows: int | None=None, max_columns: int | None=None, delimiter: str=' ') -> str | None: obj = self._copy(deepcopy=True) if sparse_index is None: sparse_index = get_option('styler.sparse.index') if sparse_columns is None: sparse_columns = get_option('styler.sparse.columns') text = obj._render_string(sparse_columns=sparse_columns, sparse_index=sparse_index, max_rows=max_rows, max_cols=max_columns, delimiter=delimiter) return save_to_buffer(text, buf=buf, encoding=encoding if buf is not None else None) def set_td_classes(self, classes: DataFrame) -> Styler: if not classes.index.is_unique or not classes.columns.is_unique: raise KeyError('Classes render only if `classes` has unique index and columns.') classes = classes.reindex_like(self.data) for (r, row_tup) in enumerate(classes.itertuples()): for (c, value) in enumerate(row_tup[1:]): if not (pd.isna(value) or value == ''): self.cell_context[r, c] = str(value) return self def _update_ctx(self, attrs: DataFrame) -> None: if not self.index.is_unique or not self.columns.is_unique: raise KeyError('`Styler.apply` and `.map` are not compatible with non-unique index or columns.') for cn in attrs.columns: j = self.columns.get_loc(cn) ser = attrs[cn] for (rn, c) in ser.items(): if not c or pd.isna(c): continue css_list = maybe_convert_css_to_tuples(c) i = self.index.get_loc(rn) self.ctx[i, j].extend(css_list) def _update_ctx_header(self, attrs: DataFrame, axis: AxisInt) -> None: for j in attrs.columns: ser = attrs[j] for (i, c) in ser.items(): if not c: continue css_list = maybe_convert_css_to_tuples(c) if axis == 0: self.ctx_index[i, j].extend(css_list) else: self.ctx_columns[j, i].extend(css_list) def _copy(self, deepcopy: bool=False) -> Styler: styler = type(self)(self.data) shallow = ['hide_index_', 'hide_columns_', 'hide_column_names', 'hide_index_names', 'table_attributes', 'cell_ids', 'caption', 'uuid', 'uuid_len', 'template_latex', 'template_html_style', 'template_html_table', 'template_html'] deep = ['css', 'concatenated', '_display_funcs', '_display_funcs_index', '_display_funcs_columns', '_display_funcs_index_names', '_display_funcs_column_names', 'hidden_rows', 'hidden_columns', 'ctx', 'ctx_index', 'ctx_columns', 'cell_context', '_todo', 'table_styles', 'tooltips'] for attr in shallow: setattr(styler, attr, getattr(self, attr)) for attr in deep: val = getattr(self, attr) setattr(styler, attr, copy.deepcopy(val) if deepcopy else val) return styler def __copy__(self) -> Styler: return self._copy(deepcopy=False) def __deepcopy__(self, memo) -> Styler: return self._copy(deepcopy=True) def clear(self) -> None: clean_copy = Styler(self.data, uuid=self.uuid) clean_attrs = [a for a in clean_copy.__dict__ if not callable(a)] self_attrs = [a for a in self.__dict__ if not callable(a)] for attr in clean_attrs: setattr(self, attr, getattr(clean_copy, attr)) for attr in set(self_attrs).difference(clean_attrs): delattr(self, attr) def _apply(self, func: Callable, axis: Axis | None=0, subset: Subset | None=None, **kwargs) -> Styler: subset = slice(None) if subset is None else subset subset = non_reducing_slice(subset) data = self.data.loc[subset] if data.empty: result = DataFrame() elif axis is None: result = func(data, **kwargs) if not isinstance(result, DataFrame): if not isinstance(result, np.ndarray): raise TypeError(f'Function {func!r} must return a DataFrame or ndarray when passed to `Styler.apply` with axis=None') if data.shape != result.shape: raise ValueError(f'Function {func!r} returned ndarray with wrong shape.\nResult has shape: {result.shape}\nExpected shape: {data.shape}') result = DataFrame(result, index=data.index, columns=data.columns) else: axis = self.data._get_axis_number(axis) if axis == 0: result = data.apply(func, axis=0, **kwargs) else: result = data.T.apply(func, axis=0, **kwargs).T if isinstance(result, Series): raise ValueError(f'Function {func!r} resulted in the apply method collapsing to a Series.\nUsually, this is the result of the function returning a single value, instead of list-like.') msg = f"Function {func!r} created invalid {{0}} labels.\nUsually, this is the result of the function returning a {('Series' if axis is not None else 'DataFrame')} which contains invalid labels, or returning an incorrectly shaped, list-like object which cannot be mapped to labels, possibly due to applying the function along the wrong axis.\nResult {{0}} has shape: {{1}}\nExpected {{0}} shape: {{2}}" if not all(result.index.isin(data.index)): raise ValueError(msg.format('index', result.index.shape, data.index.shape)) if not all(result.columns.isin(data.columns)): raise ValueError(msg.format('columns', result.columns.shape, data.columns.shape)) self._update_ctx(result) return self @Substitution(subset=subset_args) def apply(self, func: Callable, axis: Axis | None=0, subset: Subset | None=None, **kwargs) -> Styler: self._todo.append((lambda instance: getattr(instance, '_apply'), (func, axis, subset), kwargs)) return self def _apply_index(self, func: Callable, axis: Axis=0, level: Level | list[Level] | None=None, method: str='apply', **kwargs) -> Styler: axis = self.data._get_axis_number(axis) obj = self.index if axis == 0 else self.columns levels_ = refactor_levels(level, obj) data = DataFrame(obj.to_list()).loc[:, levels_] if method == 'apply': result = data.apply(func, axis=0, **kwargs) elif method == 'map': result = data.map(func, **kwargs) self._update_ctx_header(result, axis) return self @doc(this='apply', wise='level-wise', alt='map', altwise='elementwise', func='take a Series and return a string array of the same length', input_note='the index as a Series, if an Index, or a level of a MultiIndex', output_note='an identically sized array of CSS styles as strings', var='label', ret='np.where(label == "B", "background-color: yellow;", "")', ret2='["background-color: yellow;" if "x" in v else "" for v in label]') def apply_index(self, func: Callable, axis: AxisInt | str=0, level: Level | list[Level] | None=None, **kwargs) -> Styler: self._todo.append((lambda instance: getattr(instance, '_apply_index'), (func, axis, level, 'apply'), kwargs)) return self @doc(apply_index, this='map', wise='elementwise', alt='apply', altwise='level-wise', func='take a scalar and return a string', input_note='an index value, if an Index, or a level value of a MultiIndex', output_note='CSS styles as a string', var='label', ret='"background-color: yellow;" if label == "B" else None', ret2='"background-color: yellow;" if "x" in label else None') def map_index(self, func: Callable, axis: AxisInt | str=0, level: Level | list[Level] | None=None, **kwargs) -> Styler: self._todo.append((lambda instance: getattr(instance, '_apply_index'), (func, axis, level, 'map'), kwargs)) return self def _map(self, func: Callable, subset: Subset | None=None, **kwargs) -> Styler: func = partial(func, **kwargs) if subset is None: subset = IndexSlice[:] subset = non_reducing_slice(subset) result = self.data.loc[subset].map(func) self._update_ctx(result) return self @Substitution(subset=subset_args) def map(self, func: Callable, subset: Subset | None=None, **kwargs) -> Styler: self._todo.append((lambda instance: getattr(instance, '_map'), (func, subset), kwargs)) return self def set_table_attributes(self, attributes: str) -> Styler: self.table_attributes = attributes return self def export(self) -> dict[str, Any]: return {'apply': copy.copy(self._todo), 'table_attributes': self.table_attributes, 'table_styles': copy.copy(self.table_styles), 'hide_index': all(self.hide_index_), 'hide_columns': all(self.hide_columns_), 'hide_index_names': self.hide_index_names, 'hide_column_names': self.hide_column_names, 'css': copy.copy(self.css)} def use(self, styles: dict[str, Any]) -> Styler: self._todo.extend(styles.get('apply', [])) table_attributes: str = self.table_attributes or '' obj_table_atts: str = '' if styles.get('table_attributes') is None else str(styles.get('table_attributes')) self.set_table_attributes((table_attributes + ' ' + obj_table_atts).strip()) if styles.get('table_styles'): self.set_table_styles(styles.get('table_styles'), overwrite=False) for obj in ['index', 'columns']: hide_obj = styles.get('hide_' + obj) if hide_obj is not None: if isinstance(hide_obj, bool): n = getattr(self, obj).nlevels setattr(self, 'hide_' + obj + '_', [hide_obj] * n) else: setattr(self, 'hide_' + obj + '_', hide_obj) self.hide_index_names = styles.get('hide_index_names', False) self.hide_column_names = styles.get('hide_column_names', False) if styles.get('css'): self.css = styles.get('css') return self def set_uuid(self, uuid: str) -> Styler: self.uuid = uuid return self def set_caption(self, caption: str | tuple | list) -> Styler: msg = '`caption` must be either a string or 2-tuple of strings.' if isinstance(caption, (list, tuple)): if len(caption) != 2 or not isinstance(caption[0], str) or (not isinstance(caption[1], str)): raise ValueError(msg) elif not isinstance(caption, str): raise ValueError(msg) self.caption = caption return self def set_sticky(self, axis: Axis=0, pixel_size: int | None=None, levels: Level | list[Level] | None=None) -> Styler: axis = self.data._get_axis_number(axis) obj = self.data.index if axis == 0 else self.data.columns pixel_size = (75 if axis == 0 else 25) if not pixel_size else pixel_size props = 'position:sticky; background-color:inherit;' if not isinstance(obj, pd.MultiIndex): if axis == 1: styles: CSSStyles = [{'selector': 'thead tr:nth-child(1) th', 'props': props + 'top:0px; z-index:2;'}] if self.index.names[0] is not None: styles[0]['props'] = props + f'top:0px; z-index:2; height:{pixel_size}px;' styles.append({'selector': 'thead tr:nth-child(2) th', 'props': props + f'top:{pixel_size}px; z-index:2; height:{pixel_size}px; '}) else: styles = [{'selector': 'thead tr th:nth-child(1)', 'props': props + 'left:0px; z-index:3 !important;'}, {'selector': 'tbody tr th:nth-child(1)', 'props': props + 'left:0px; z-index:1;'}] else: range_idx = list(range(obj.nlevels)) levels_: list[int] = refactor_levels(levels, obj) if levels else range_idx levels_ = sorted(levels_) if axis == 1: styles = [] for (i, level) in enumerate(levels_): styles.append({'selector': f'thead tr:nth-child({level + 1}) th', 'props': props + f'top:{i * pixel_size}px; height:{pixel_size}px; z-index:2;'}) if not all((name is None for name in self.index.names)): styles.append({'selector': f'thead tr:nth-child({obj.nlevels + 1}) th', 'props': props + f'top:{len(levels_) * pixel_size}px; height:{pixel_size}px; z-index:2;'}) else: styles = [] for (i, level) in enumerate(levels_): props_ = props + f'left:{i * pixel_size}px; min-width:{pixel_size}px; max-width:{pixel_size}px; ' styles.extend([{'selector': f'thead tr th:nth-child({level + 1})', 'props': props_ + 'z-index:3 !important;'}, {'selector': f'tbody tr th.level{level}', 'props': props_ + 'z-index:1;'}]) return self.set_table_styles(styles, overwrite=False) def set_table_styles(self, table_styles: dict[Any, CSSStyles] | CSSStyles | None=None, axis: AxisInt=0, overwrite: bool=True, css_class_names: dict[str, str] | None=None) -> Styler: if css_class_names is not None: self.css = {**self.css, **css_class_names} if table_styles is None: return self elif isinstance(table_styles, dict): axis = self.data._get_axis_number(axis) obj = self.data.index if axis == 1 else self.data.columns idf = f".{self.css['row']}" if axis == 1 else f".{self.css['col']}" table_styles = [{'selector': str(s['selector']) + idf + str(idx), 'props': maybe_convert_css_to_tuples(s['props'])} for (key, styles) in table_styles.items() for idx in obj.get_indexer_for([key]) for s in format_table_styles(styles)] else: table_styles = [{'selector': s['selector'], 'props': maybe_convert_css_to_tuples(s['props'])} for s in table_styles] if not overwrite and self.table_styles is not None: self.table_styles.extend(table_styles) else: self.table_styles = table_styles return self def hide(self, subset: Subset | None=None, axis: Axis=0, level: Level | list[Level] | None=None, names: bool=False) -> Styler: axis = self.data._get_axis_number(axis) if axis == 0: (obj, objs, alt) = ('index', 'index', 'rows') else: (obj, objs, alt) = ('column', 'columns', 'columns') if level is not None and subset is not None: raise ValueError('`subset` and `level` cannot be passed simultaneously') if subset is None: if level is None and names: setattr(self, f'hide_{obj}_names', True) return self levels_ = refactor_levels(level, getattr(self, objs)) setattr(self, f'hide_{objs}_', [lev in levels_ for lev in range(getattr(self, objs).nlevels)]) else: if axis == 0: subset_ = IndexSlice[subset, :] else: subset_ = IndexSlice[:, subset] subset = non_reducing_slice(subset_) hide = self.data.loc[subset] h_els = getattr(self, objs).get_indexer_for(getattr(hide, objs)) setattr(self, f'hidden_{alt}', h_els) if names: setattr(self, f'hide_{obj}_names', True) return self def _get_numeric_subset_default(self): return self.data.columns.isin(self.data.select_dtypes(include=np.number)) @doc(name='background', alt='text', image_prefix='bg', text_threshold='text_color_threshold : float or int\n\n Luminance threshold for determining text color in [0, 1]. Facilitates text\n\n visibility across varying background colors. All text is dark if 0, and\n\n light if 1, defaults to 0.408.') @Substitution(subset=subset_args) def background_gradient(self, cmap: str | Colormap='PuBu', low: float=0, high: float=0, axis: Axis | None=0, subset: Subset | None=None, text_color_threshold: float=0.408, vmin: float | None=None, vmax: float | None=None, gmap: Sequence | None=None) -> Styler: if subset is None and gmap is None: subset = self._get_numeric_subset_default() self.apply(_background_gradient, cmap=cmap, subset=subset, axis=axis, low=low, high=high, text_color_threshold=text_color_threshold, vmin=vmin, vmax=vmax, gmap=gmap) return self @doc(background_gradient, name='text', alt='background', image_prefix='tg', text_threshold='') def text_gradient(self, cmap: str | Colormap='PuBu', low: float=0, high: float=0, axis: Axis | None=0, subset: Subset | None=None, vmin: float | None=None, vmax: float | None=None, gmap: Sequence | None=None) -> Styler: if subset is None and gmap is None: subset = self._get_numeric_subset_default() return self.apply(_background_gradient, cmap=cmap, subset=subset, axis=axis, low=low, high=high, vmin=vmin, vmax=vmax, gmap=gmap, text_only=True) @Substitution(subset=subset_args) def set_properties(self, subset: Subset | None=None, **kwargs) -> Styler: values = ''.join([f'{p}: {v};' for (p, v) in kwargs.items()]) return self.map(lambda x: values, subset=subset) @Substitution(subset=subset_args) def bar(self, subset: Subset | None=None, axis: Axis | None=0, *, color: str | list | tuple | None=None, cmap: Any | None=None, width: float=100, height: float=100, align: str | float | Callable='mid', vmin: float | None=None, vmax: float | None=None, props: str='width: 10em;') -> Styler: if color is None and cmap is None: color = '#d65f5f' elif color is not None and cmap is not None: raise ValueError('`color` and `cmap` cannot both be given') elif color is not None: if isinstance(color, (list, tuple)) and len(color) > 2 or not isinstance(color, (str, list, tuple)): raise ValueError("`color` must be string or list or tuple of 2 strings,(eg: color=['#d65f5f', '#5fba7d'])") if not 0 <= width <= 100: raise ValueError(f'`width` must be a value in [0, 100], got {width}') if not 0 <= height <= 100: raise ValueError(f'`height` must be a value in [0, 100], got {height}') if subset is None: subset = self._get_numeric_subset_default() self.apply(_bar, subset=subset, axis=axis, align=align, colors=color, cmap=cmap, width=width / 100, height=height / 100, vmin=vmin, vmax=vmax, base_css=props) return self @Substitution(subset=subset_args, props=properties_args, color=coloring_args.format(default='red')) def highlight_null(self, color: str='red', subset: Subset | None=None, props: str | None=None) -> Styler: def f(data: DataFrame, props: str) -> np.ndarray: return np.where(pd.isna(data).to_numpy(), props, '') if props is None: props = f'background-color: {color};' return self.apply(f, axis=None, subset=subset, props=props) @Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args) def highlight_max(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, props: str | None=None) -> Styler: if props is None: props = f'background-color: {color};' return self.apply(partial(_highlight_value, op='max'), axis=axis, subset=subset, props=props) @Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args) def highlight_min(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, props: str | None=None) -> Styler: if props is None: props = f'background-color: {color};' return self.apply(partial(_highlight_value, op='min'), axis=axis, subset=subset, props=props) @Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args) def highlight_between(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, left: Scalar | Sequence | None=None, right: Scalar | Sequence | None=None, inclusive: IntervalClosedType='both', props: str | None=None) -> Styler: if props is None: props = f'background-color: {color};' return self.apply(_highlight_between, axis=axis, subset=subset, props=props, left=left, right=right, inclusive=inclusive) @Substitution(subset=subset_args, color=coloring_args.format(default='yellow'), props=properties_args) def highlight_quantile(self, subset: Subset | None=None, color: str='yellow', axis: Axis | None=0, q_left: float=0.0, q_right: float=1.0, interpolation: QuantileInterpolation='linear', inclusive: IntervalClosedType='both', props: str | None=None) -> Styler: subset_ = slice(None) if subset is None else subset subset_ = non_reducing_slice(subset_) data = self.data.loc[subset_] quantiles = [q_left, q_right] if axis is None: q = Series(data.to_numpy().ravel()).quantile(q=quantiles, interpolation=interpolation) axis_apply: int | None = None else: axis = self.data._get_axis_number(axis) q = data.quantile(axis=axis, numeric_only=False, q=quantiles, interpolation=interpolation) axis_apply = 1 - axis if props is None: props = f'background-color: {color};' return self.apply(_highlight_between, axis=axis_apply, subset=subset, props=props, left=q.iloc[0], right=q.iloc[1], inclusive=inclusive) @classmethod def from_custom_template(cls, searchpath: Sequence[str], html_table: str | None=None, html_style: str | None=None) -> type[Styler]: loader = jinja2.ChoiceLoader([jinja2.FileSystemLoader(searchpath), cls.loader]) class MyStyler(cls): env = jinja2.Environment(loader=loader) if html_table: template_html_table = env.get_template(html_table) if html_style: template_html_style = env.get_template(html_style) return MyStyler @overload def pipe(self, func: Callable[Concatenate[Self, P], T], *args: P.args, **kwargs: P.kwargs) -> T: ... @overload def pipe(self, func: tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: ... def pipe(self, func: Callable[Concatenate[Self, P], T] | tuple[Callable[..., T], str], *args: Any, **kwargs: Any) -> T: return com.pipe(self, func, *args, **kwargs) def _validate_apply_axis_arg(arg: NDFrame | Sequence | np.ndarray, arg_name: str, dtype: Any | None, data: NDFrame) -> np.ndarray: dtype = {'dtype': dtype} if dtype else {} if isinstance(arg, Series) and isinstance(data, DataFrame): raise ValueError(f"'{arg_name}' is a Series but underlying data for operations is a DataFrame since 'axis=None'") if isinstance(arg, DataFrame) and isinstance(data, Series): raise ValueError(f"'{arg_name}' is a DataFrame but underlying data for operations is a Series with 'axis in [0,1]'") if isinstance(arg, (Series, DataFrame)): arg = arg.reindex_like(data).to_numpy(**dtype) else: arg = np.asarray(arg, **dtype) assert isinstance(arg, np.ndarray) if arg.shape != data.shape: raise ValueError(f"supplied '{arg_name}' is not correct shape for data over selected 'axis': got {arg.shape}, expected {data.shape}") return arg def _background_gradient(data, cmap: str | Colormap='PuBu', low: float=0, high: float=0, text_color_threshold: float=0.408, vmin: float | None=None, vmax: float | None=None, gmap: Sequence | np.ndarray | DataFrame | Series | None=None, text_only: bool=False) -> list[str] | DataFrame: if gmap is None: gmap = data.to_numpy(dtype=float, na_value=np.nan) else: gmap = _validate_apply_axis_arg(gmap, 'gmap', float, data) smin = np.nanmin(gmap) if vmin is None else vmin smax = np.nanmax(gmap) if vmax is None else vmax rng = smax - smin _matplotlib = import_optional_dependency('matplotlib', extra='Styler.background_gradient requires matplotlib.') norm = _matplotlib.colors.Normalize(smin - rng * low, smax + rng * high) if cmap is None: rgbas = _matplotlib.colormaps[_matplotlib.rcParams['image.cmap']](norm(gmap)) else: rgbas = _matplotlib.colormaps.get_cmap(cmap)(norm(gmap)) def relative_luminance(rgba) -> float: (r, g, b) = (x / 12.92 if x <= 0.04045 else ((x + 0.055) / 1.055) ** 2.4 for x in rgba[:3]) return 0.2126 * r + 0.7152 * g + 0.0722 * b def css(rgba, text_only) -> str: if not text_only: dark = relative_luminance(rgba) < text_color_threshold text_color = '#f1f1f1' if dark else '#000000' return f'background-color: {_matplotlib.colors.rgb2hex(rgba)};color: {text_color};' else: return f'color: {_matplotlib.colors.rgb2hex(rgba)};' if data.ndim == 1: return [css(rgba, text_only) for rgba in rgbas] else: return DataFrame([[css(rgba, text_only) for rgba in row] for row in rgbas], index=data.index, columns=data.columns) def _highlight_between(data: NDFrame, props: str, left: Scalar | Sequence | np.ndarray | NDFrame | None=None, right: Scalar | Sequence | np.ndarray | NDFrame | None=None, inclusive: bool | str=True) -> np.ndarray: if np.iterable(left) and (not isinstance(left, str)): left = _validate_apply_axis_arg(left, 'left', None, data) if np.iterable(right) and (not isinstance(right, str)): right = _validate_apply_axis_arg(right, 'right', None, data) if inclusive == 'both': ops = (operator.ge, operator.le) elif inclusive == 'neither': ops = (operator.gt, operator.lt) elif inclusive == 'left': ops = (operator.ge, operator.lt) elif inclusive == 'right': ops = (operator.gt, operator.le) else: raise ValueError(f"'inclusive' values can be 'both', 'left', 'right', or 'neither' got {inclusive}") g_left = ops[0](data, left) if left is not None else np.full(data.shape, True, dtype=bool) if isinstance(g_left, (DataFrame, Series)): g_left = g_left.where(pd.notna(g_left), False) l_right = ops[1](data, right) if right is not None else np.full(data.shape, True, dtype=bool) if isinstance(l_right, (DataFrame, Series)): l_right = l_right.where(pd.notna(l_right), False) return np.where(g_left & l_right, props, '') def _highlight_value(data: DataFrame | Series, op: str, props: str) -> np.ndarray: value = getattr(data, op)(skipna=True) if isinstance(data, DataFrame): value = getattr(value, op)(skipna=True) cond = data == value cond = cond.where(pd.notna(cond), False) return np.where(cond, props, '') def _bar(data: NDFrame, align: str | float | Callable, colors: str | list | tuple, cmap: Any, width: float, height: float, vmin: float | None, vmax: float | None, base_css: str): def css_bar(start: float, end: float, color: str) -> str: cell_css = base_css if end > start: cell_css += 'background: linear-gradient(90deg,' if start > 0: cell_css += f' transparent {start * 100:.1f}%, {color} {start * 100:.1f}%,' cell_css += f' {color} {end * 100:.1f}%, transparent {end * 100:.1f}%)' return cell_css def css_calc(x, left: float, right: float, align: str, color: str | list | tuple): if pd.isna(x): return base_css if isinstance(color, (list, tuple)): color = color[0] if x < 0 else color[1] assert isinstance(color, str) x = left if x < left else x x = right if x > right else x start: float = 0 end: float = 1 if align == 'left': end = (x - left) / (right - left) elif align == 'right': start = (x - left) / (right - left) else: z_frac: float = 0.5 if align == 'zero': limit: float = max(abs(left), abs(right)) (left, right) = (-limit, limit) elif align == 'mid': mid: float = (left + right) / 2 z_frac = -mid / (right - left) + 0.5 if mid < 0 else -left / (right - left) if x < 0: (start, end) = ((x - left) / (right - left), z_frac) else: (start, end) = (z_frac, (x - left) / (right - left)) ret = css_bar(start * width, end * width, color) if height < 1 and 'background: linear-gradient(' in ret: return ret + f' no-repeat center; background-size: 100% {height * 100:.1f}%;' else: return ret values = data.to_numpy() left = np.nanmin(data.min(skipna=True)) if vmin is None else vmin right = np.nanmax(data.max(skipna=True)) if vmax is None else vmax z: float = 0 if align == 'mid': if left >= 0: (align, left) = ('left', 0 if vmin is None else vmin) elif right <= 0: (align, right) = ('right', 0 if vmax is None else vmax) elif align == 'mean': (z, align) = (np.nanmean(values), 'zero') elif callable(align): (z, align) = (align(values), 'zero') elif isinstance(align, (float, int)): (z, align) = (float(align), 'zero') elif align not in ('left', 'right', 'zero'): raise ValueError("`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or be a value defining the center line or a callable that returns a float") rgbas = None if cmap is not None: _matplotlib = import_optional_dependency('matplotlib', extra='Styler.bar requires matplotlib.') cmap = _matplotlib.colormaps[cmap] if isinstance(cmap, str) else cmap norm = _matplotlib.colors.Normalize(left, right) rgbas = cmap(norm(values)) if data.ndim == 1: rgbas = [_matplotlib.colors.rgb2hex(rgba) for rgba in rgbas] else: rgbas = [[_matplotlib.colors.rgb2hex(rgba) for rgba in row] for row in rgbas] assert isinstance(align, str) if data.ndim == 1: return [css_calc(x - z, left - z, right - z, align, colors if rgbas is None else rgbas[i]) for (i, x) in enumerate(values)] else: return np.array([[css_calc(x - z, left - z, right - z, align, colors if rgbas is None else rgbas[i][j]) for (j, x) in enumerate(row)] for (i, row) in enumerate(values)]) # File: pandas-main/pandas/io/formats/style_render.py from __future__ import annotations from collections import defaultdict from collections.abc import Callable, Sequence from functools import partial import re from typing import TYPE_CHECKING, Any, DefaultDict, Optional, TypedDict, Union from uuid import uuid4 import numpy as np from pandas._config import get_option from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.core.dtypes.common import is_complex, is_float, is_integer from pandas.core.dtypes.generic import ABCSeries from pandas import DataFrame, Index, IndexSlice, MultiIndex, Series, isna from pandas.api.types import is_list_like import pandas.core.common as com if TYPE_CHECKING: from pandas._typing import Axis, Level jinja2 = import_optional_dependency('jinja2', extra='DataFrame.style requires jinja2.') from markupsafe import escape as escape_html BaseFormatter = Union[str, Callable] ExtFormatter = Union[BaseFormatter, dict[Any, Optional[BaseFormatter]]] CSSPair = tuple[str, Union[str, float]] CSSList = list[CSSPair] CSSProperties = Union[str, CSSList] class CSSDict(TypedDict): selector: str props: CSSProperties CSSStyles = list[CSSDict] Subset = Union[slice, Sequence, Index] class StylerRenderer: loader = jinja2.PackageLoader('pandas', 'io/formats/templates') env = jinja2.Environment(loader=loader, trim_blocks=True) template_html = env.get_template('html.tpl') template_html_table = env.get_template('html_table.tpl') template_html_style = env.get_template('html_style.tpl') template_latex = env.get_template('latex.tpl') template_string = env.get_template('string.tpl') def __init__(self, data: DataFrame | Series, uuid: str | None=None, uuid_len: int=5, table_styles: CSSStyles | None=None, table_attributes: str | None=None, caption: str | tuple | list | None=None, cell_ids: bool=True, precision: int | None=None) -> None: if isinstance(data, Series): data = data.to_frame() if not isinstance(data, DataFrame): raise TypeError('``data`` must be a Series or DataFrame') self.data: DataFrame = data self.index: Index = data.index self.columns: Index = data.columns if not isinstance(uuid_len, int) or uuid_len < 0: raise TypeError('``uuid_len`` must be an integer in range [0, 32].') self.uuid = uuid or uuid4().hex[:min(32, uuid_len)] self.uuid_len = len(self.uuid) self.table_styles = table_styles self.table_attributes = table_attributes self.caption = caption self.cell_ids = cell_ids self.css = {'row_heading': 'row_heading', 'col_heading': 'col_heading', 'index_name': 'index_name', 'col': 'col', 'row': 'row', 'col_trim': 'col_trim', 'row_trim': 'row_trim', 'level': 'level', 'data': 'data', 'blank': 'blank', 'foot': 'foot'} self.concatenated: list[StylerRenderer] = [] self.hide_index_names: bool = False self.hide_column_names: bool = False self.hide_index_: list = [False] * self.index.nlevels self.hide_columns_: list = [False] * self.columns.nlevels self.hidden_rows: Sequence[int] = [] self.hidden_columns: Sequence[int] = [] self.ctx: DefaultDict[tuple[int, int], CSSList] = defaultdict(list) self.ctx_index: DefaultDict[tuple[int, int], CSSList] = defaultdict(list) self.ctx_columns: DefaultDict[tuple[int, int], CSSList] = defaultdict(list) self.cell_context: DefaultDict[tuple[int, int], str] = defaultdict(str) self._todo: list[tuple[Callable, tuple, dict]] = [] self.tooltips: Tooltips | None = None precision = get_option('styler.format.precision') if precision is None else precision self._display_funcs: DefaultDict[tuple[int, int], Callable[[Any], str]] = defaultdict(lambda : partial(_default_formatter, precision=precision)) self._display_funcs_index: DefaultDict[tuple[int, int], Callable[[Any], str]] = defaultdict(lambda : partial(_default_formatter, precision=precision)) self._display_funcs_index_names: DefaultDict[int, Callable[[Any], str]] = defaultdict(lambda : partial(_default_formatter, precision=precision)) self._display_funcs_columns: DefaultDict[tuple[int, int], Callable[[Any], str]] = defaultdict(lambda : partial(_default_formatter, precision=precision)) self._display_funcs_column_names: DefaultDict[int, Callable[[Any], str]] = defaultdict(lambda : partial(_default_formatter, precision=precision)) def _render(self, sparse_index: bool, sparse_columns: bool, max_rows: int | None=None, max_cols: int | None=None, blank: str=''): self._compute() dxs = [] ctx_len = len(self.index) for (i, concatenated) in enumerate(self.concatenated): concatenated.hide_index_ = self.hide_index_ concatenated.hidden_columns = self.hidden_columns foot = f"{self.css['foot']}{i}" concatenated.css = {**self.css, 'data': f'{foot}_data', 'row_heading': f'{foot}_row_heading', 'row': f'{foot}_row', 'foot': f'{foot}_foot'} dx = concatenated._render(sparse_index, sparse_columns, max_rows, max_cols, blank) dxs.append(dx) for ((r, c), v) in concatenated.ctx.items(): self.ctx[r + ctx_len, c] = v for ((r, c), v) in concatenated.ctx_index.items(): self.ctx_index[r + ctx_len, c] = v ctx_len += len(concatenated.index) d = self._translate(sparse_index, sparse_columns, max_rows, max_cols, blank, dxs) return d def _render_html(self, sparse_index: bool, sparse_columns: bool, max_rows: int | None=None, max_cols: int | None=None, **kwargs) -> str: d = self._render(sparse_index, sparse_columns, max_rows, max_cols, ' ') d.update(kwargs) return self.template_html.render(**d, html_table_tpl=self.template_html_table, html_style_tpl=self.template_html_style) def _render_latex(self, sparse_index: bool, sparse_columns: bool, clines: str | None, **kwargs) -> str: d = self._render(sparse_index, sparse_columns, None, None) self._translate_latex(d, clines=clines) self.template_latex.globals['parse_wrap'] = _parse_latex_table_wrapping self.template_latex.globals['parse_table'] = _parse_latex_table_styles self.template_latex.globals['parse_cell'] = _parse_latex_cell_styles self.template_latex.globals['parse_header'] = _parse_latex_header_span d.update(kwargs) return self.template_latex.render(**d) def _render_string(self, sparse_index: bool, sparse_columns: bool, max_rows: int | None=None, max_cols: int | None=None, **kwargs) -> str: d = self._render(sparse_index, sparse_columns, max_rows, max_cols) d.update(kwargs) return self.template_string.render(**d) def _compute(self): self.ctx.clear() self.ctx_index.clear() self.ctx_columns.clear() r = self for (func, args, kwargs) in self._todo: r = func(self)(*args, **kwargs) return r def _translate(self, sparse_index: bool, sparse_cols: bool, max_rows: int | None=None, max_cols: int | None=None, blank: str=' ', dxs: list[dict] | None=None): if dxs is None: dxs = [] self.css['blank_value'] = blank d = {'uuid': self.uuid, 'table_styles': format_table_styles(self.table_styles or []), 'caption': self.caption} max_elements = get_option('styler.render.max_elements') max_rows = max_rows if max_rows else get_option('styler.render.max_rows') max_cols = max_cols if max_cols else get_option('styler.render.max_columns') (max_rows, max_cols) = _get_trimming_maximums(len(self.data.index), len(self.data.columns), max_elements, max_rows, max_cols) self.cellstyle_map_columns: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(list) head = self._translate_header(sparse_cols, max_cols) d.update({'head': head}) idx_lengths = _get_level_lengths(self.index, sparse_index, max_rows, self.hidden_rows) d.update({'index_lengths': idx_lengths}) self.cellstyle_map: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(list) self.cellstyle_map_index: DefaultDict[tuple[CSSPair, ...], list[str]] = defaultdict(list) body: list = self._translate_body(idx_lengths, max_rows, max_cols) d.update({'body': body}) ctx_maps = {'cellstyle': 'cellstyle_map', 'cellstyle_index': 'cellstyle_map_index', 'cellstyle_columns': 'cellstyle_map_columns'} for (k, attr) in ctx_maps.items(): map = [{'props': list(props), 'selectors': selectors} for (props, selectors) in getattr(self, attr).items()] d.update({k: map}) for dx in dxs: d['body'].extend(dx['body']) d['cellstyle'].extend(dx['cellstyle']) d['cellstyle_index'].extend(dx['cellstyle_index']) table_attr = self.table_attributes if not get_option('styler.html.mathjax'): table_attr = table_attr or '' if 'class="' in table_attr: table_attr = table_attr.replace('class="', 'class="tex2jax_ignore ') else: table_attr += ' class="tex2jax_ignore"' d.update({'table_attributes': table_attr}) if self.tooltips: d = self.tooltips._translate(self, d) return d def _translate_header(self, sparsify_cols: bool, max_cols: int): col_lengths = _get_level_lengths(self.columns, sparsify_cols, max_cols, self.hidden_columns) clabels = self.data.columns.tolist() if self.data.columns.nlevels == 1: clabels = [[x] for x in clabels] clabels = list(zip(*clabels)) head = [] for (r, hide) in enumerate(self.hide_columns_): if hide or not clabels: continue header_row = self._generate_col_header_row((r, clabels), max_cols, col_lengths) head.append(header_row) if self.data.index.names and com.any_not_none(*self.data.index.names) and (not all(self.hide_index_)) and (not self.hide_index_names): index_names_row = self._generate_index_names_row(clabels, max_cols, col_lengths) head.append(index_names_row) return head def _generate_col_header_row(self, iter: Sequence, max_cols: int, col_lengths: dict): (r, clabels) = iter index_blanks = [_element('th', self.css['blank'], self.css['blank_value'], True)] * (self.index.nlevels - sum(self.hide_index_) - 1) name = self.data.columns.names[r] is_display = name is not None and (not self.hide_column_names) value = name if is_display else self.css['blank_value'] display_value = self._display_funcs_column_names[r](value) if is_display else None column_name = [_element('th', f"{self.css['blank']} {self.css['level']}{r}" if name is None else f"{self.css['index_name']} {self.css['level']}{r}", value, not all(self.hide_index_), display_value=display_value)] column_headers: list = [] visible_col_count: int = 0 for (c, value) in enumerate(clabels[r]): header_element_visible = _is_visible(c, r, col_lengths) if header_element_visible: visible_col_count += col_lengths.get((r, c), 0) if self._check_trim(visible_col_count, max_cols, column_headers, 'th', f"{self.css['col_heading']} {self.css['level']}{r} {self.css['col_trim']}"): break header_element = _element('th', f"{self.css['col_heading']} {self.css['level']}{r} {self.css['col']}{c}", value, header_element_visible, display_value=self._display_funcs_columns[r, c](value), attributes=f'colspan="{col_lengths.get((r, c), 0)}"' if col_lengths.get((r, c), 0) > 1 else '') if self.cell_ids: header_element['id'] = f"{self.css['level']}{r}_{self.css['col']}{c}" if header_element_visible and (r, c) in self.ctx_columns and self.ctx_columns[r, c]: header_element['id'] = f"{self.css['level']}{r}_{self.css['col']}{c}" self.cellstyle_map_columns[tuple(self.ctx_columns[r, c])].append(f"{self.css['level']}{r}_{self.css['col']}{c}") column_headers.append(header_element) return index_blanks + column_name + column_headers def _generate_index_names_row(self, iter: Sequence, max_cols: int, col_lengths: dict): clabels = iter index_names = [_element('th', f"{self.css['index_name']} {self.css['level']}{c}", self.css['blank_value'] if name is None else name, not self.hide_index_[c], display_value=None if name is None else self._display_funcs_index_names[c](name)) for (c, name) in enumerate(self.data.index.names)] column_blanks: list = [] visible_col_count: int = 0 if clabels: last_level = self.columns.nlevels - 1 for (c, value) in enumerate(clabels[last_level]): header_element_visible = _is_visible(c, last_level, col_lengths) if header_element_visible: visible_col_count += 1 if self._check_trim(visible_col_count, max_cols, column_blanks, 'th', f"{self.css['blank']} {self.css['col']}{c} {self.css['col_trim']}", self.css['blank_value']): break column_blanks.append(_element('th', f"{self.css['blank']} {self.css['col']}{c}", self.css['blank_value'], c not in self.hidden_columns)) return index_names + column_blanks def _translate_body(self, idx_lengths: dict, max_rows: int, max_cols: int): rlabels = self.data.index.tolist() if not isinstance(self.data.index, MultiIndex): rlabels = [[x] for x in rlabels] body: list = [] visible_row_count: int = 0 for (r, row_tup) in [z for z in enumerate(self.data.itertuples()) if z[0] not in self.hidden_rows]: visible_row_count += 1 if self._check_trim(visible_row_count, max_rows, body, 'row'): break body_row = self._generate_body_row((r, row_tup, rlabels), max_cols, idx_lengths) body.append(body_row) return body def _check_trim(self, count: int, max: int, obj: list, element: str, css: str | None=None, value: str='...') -> bool: if count > max: if element == 'row': obj.append(self._generate_trimmed_row(max)) else: obj.append(_element(element, css, value, True, attributes='')) return True return False def _generate_trimmed_row(self, max_cols: int) -> list: index_headers = [_element('th', f"{self.css['row_heading']} {self.css['level']}{c} {self.css['row_trim']}", '...', not self.hide_index_[c], attributes='') for c in range(self.data.index.nlevels)] data: list = [] visible_col_count: int = 0 for (c, _) in enumerate(self.columns): data_element_visible = c not in self.hidden_columns if data_element_visible: visible_col_count += 1 if self._check_trim(visible_col_count, max_cols, data, 'td', f"{self.css['data']} {self.css['row_trim']} {self.css['col_trim']}"): break data.append(_element('td', f"{self.css['data']} {self.css['col']}{c} {self.css['row_trim']}", '...', data_element_visible, attributes='')) return index_headers + data def _generate_body_row(self, iter: tuple, max_cols: int, idx_lengths: dict): (r, row_tup, rlabels) = iter index_headers = [] for (c, value) in enumerate(rlabels[r]): header_element_visible = _is_visible(r, c, idx_lengths) and (not self.hide_index_[c]) header_element = _element('th', f"{self.css['row_heading']} {self.css['level']}{c} {self.css['row']}{r}", value, header_element_visible, display_value=self._display_funcs_index[r, c](value), attributes=f'rowspan="{idx_lengths.get((c, r), 0)}"' if idx_lengths.get((c, r), 0) > 1 else '') if self.cell_ids: header_element['id'] = f"{self.css['level']}{c}_{self.css['row']}{r}" if header_element_visible and (r, c) in self.ctx_index and self.ctx_index[r, c]: header_element['id'] = f"{self.css['level']}{c}_{self.css['row']}{r}" self.cellstyle_map_index[tuple(self.ctx_index[r, c])].append(f"{self.css['level']}{c}_{self.css['row']}{r}") index_headers.append(header_element) data: list = [] visible_col_count: int = 0 for (c, value) in enumerate(row_tup[1:]): data_element_visible = c not in self.hidden_columns and r not in self.hidden_rows if data_element_visible: visible_col_count += 1 if self._check_trim(visible_col_count, max_cols, data, 'td', f"{self.css['data']} {self.css['row']}{r} {self.css['col_trim']}"): break cls = '' if (r, c) in self.cell_context: cls = ' ' + self.cell_context[r, c] data_element = _element('td', f"{self.css['data']} {self.css['row']}{r} {self.css['col']}{c}{cls}", value, data_element_visible, attributes='', display_value=self._display_funcs[r, c](value)) if self.cell_ids: data_element['id'] = f"{self.css['row']}{r}_{self.css['col']}{c}" if data_element_visible and (r, c) in self.ctx and self.ctx[r, c]: data_element['id'] = f"{self.css['row']}{r}_{self.css['col']}{c}" self.cellstyle_map[tuple(self.ctx[r, c])].append(f"{self.css['row']}{r}_{self.css['col']}{c}") data.append(data_element) return index_headers + data def _translate_latex(self, d: dict, clines: str | None) -> None: index_levels = self.index.nlevels visible_index_level_n = index_levels - sum(self.hide_index_) d['head'] = [[{**col, 'cellstyle': self.ctx_columns[r, c - visible_index_level_n]} for (c, col) in enumerate(row) if col['is_visible']] for (r, row) in enumerate(d['head'])] def _concatenated_visible_rows(obj, n, row_indices): row_indices.extend([r + n for r in range(len(obj.index)) if r not in obj.hidden_rows]) n += len(obj.index) for concatenated in obj.concatenated: n = _concatenated_visible_rows(concatenated, n, row_indices) return n def concatenated_visible_rows(obj): row_indices: list[int] = [] _concatenated_visible_rows(obj, 0, row_indices) return row_indices body = [] for (r, row) in zip(concatenated_visible_rows(self), d['body']): if all(self.hide_index_): row_body_headers = [] else: row_body_headers = [{**col, 'display_value': col['display_value'] if col['is_visible'] else '', 'cellstyle': self.ctx_index[r, c]} for (c, col) in enumerate(row[:index_levels]) if col['type'] == 'th' and (not self.hide_index_[c])] row_body_cells = [{**col, 'cellstyle': self.ctx[r, c]} for (c, col) in enumerate(row[index_levels:]) if col['is_visible'] and col['type'] == 'td'] body.append(row_body_headers + row_body_cells) d['body'] = body if clines not in [None, 'all;data', 'all;index', 'skip-last;data', 'skip-last;index']: raise ValueError(f"`clines` value of {clines} is invalid. Should either be None or one of 'all;data', 'all;index', 'skip-last;data', 'skip-last;index'.") if clines is not None: data_len = len(row_body_cells) if 'data' in clines and d['body'] else 0 d['clines'] = defaultdict(list) visible_row_indexes: list[int] = [r for r in range(len(self.data.index)) if r not in self.hidden_rows] visible_index_levels: list[int] = [i for i in range(index_levels) if not self.hide_index_[i]] for (rn, r) in enumerate(visible_row_indexes): for (lvln, lvl) in enumerate(visible_index_levels): if lvl == index_levels - 1 and 'skip-last' in clines: continue idx_len = d['index_lengths'].get((lvl, r), None) if idx_len is not None: d['clines'][rn + idx_len].append(f'\\cline{{{lvln + 1}-{len(visible_index_levels) + data_len}}}') def format(self, formatter: ExtFormatter | None=None, subset: Subset | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> StylerRenderer: if all((formatter is None, subset is None, precision is None, decimal == '.', thousands is None, na_rep is None, escape is None, hyperlinks is None)): self._display_funcs.clear() return self subset = slice(None) if subset is None else subset subset = non_reducing_slice(subset) data = self.data.loc[subset] if not isinstance(formatter, dict): formatter = {col: formatter for col in data.columns} cis = self.columns.get_indexer_for(data.columns) ris = self.index.get_indexer_for(data.index) for ci in cis: format_func = _maybe_wrap_formatter(formatter.get(self.columns[ci]), na_rep=na_rep, precision=precision, decimal=decimal, thousands=thousands, escape=escape, hyperlinks=hyperlinks) for ri in ris: self._display_funcs[ri, ci] = format_func return self def format_index(self, formatter: ExtFormatter | None=None, axis: Axis=0, level: Level | list[Level] | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> StylerRenderer: axis = self.data._get_axis_number(axis) if axis == 0: (display_funcs_, obj) = (self._display_funcs_index, self.index) else: (display_funcs_, obj) = (self._display_funcs_columns, self.columns) levels_ = refactor_levels(level, obj) if all((formatter is None, level is None, precision is None, decimal == '.', thousands is None, na_rep is None, escape is None, hyperlinks is None)): display_funcs_.clear() return self if not isinstance(formatter, dict): formatter = {level: formatter for level in levels_} else: formatter = {obj._get_level_number(level): formatter_ for (level, formatter_) in formatter.items()} for lvl in levels_: format_func = _maybe_wrap_formatter(formatter.get(lvl), na_rep=na_rep, precision=precision, decimal=decimal, thousands=thousands, escape=escape, hyperlinks=hyperlinks) for idx in [(i, lvl) if axis == 0 else (lvl, i) for i in range(len(obj))]: display_funcs_[idx] = format_func return self def relabel_index(self, labels: Sequence | Index, axis: Axis=0, level: Level | list[Level] | None=None) -> StylerRenderer: axis = self.data._get_axis_number(axis) if axis == 0: (display_funcs_, obj) = (self._display_funcs_index, self.index) (hidden_labels, hidden_lvls) = (self.hidden_rows, self.hide_index_) else: (display_funcs_, obj) = (self._display_funcs_columns, self.columns) (hidden_labels, hidden_lvls) = (self.hidden_columns, self.hide_columns_) visible_len = len(obj) - len(set(hidden_labels)) if len(labels) != visible_len: raise ValueError(f'``labels`` must be of length equal to the number of visible labels along ``axis`` ({visible_len}).') if level is None: level = [i for i in range(obj.nlevels) if not hidden_lvls[i]] levels_ = refactor_levels(level, obj) def alias_(x, value): if isinstance(value, str): return value.format(x) return value for (ai, i) in enumerate([i for i in range(len(obj)) if i not in hidden_labels]): if len(levels_) == 1: idx = (i, levels_[0]) if axis == 0 else (levels_[0], i) display_funcs_[idx] = partial(alias_, value=labels[ai]) else: for (aj, lvl) in enumerate(levels_): idx = (i, lvl) if axis == 0 else (lvl, i) display_funcs_[idx] = partial(alias_, value=labels[ai][aj]) return self def format_index_names(self, formatter: ExtFormatter | None=None, axis: Axis=0, level: Level | list[Level] | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> StylerRenderer: axis = self.data._get_axis_number(axis) if axis == 0: (display_funcs_, obj) = (self._display_funcs_index_names, self.index) else: (display_funcs_, obj) = (self._display_funcs_column_names, self.columns) levels_ = refactor_levels(level, obj) if all((formatter is None, level is None, precision is None, decimal == '.', thousands is None, na_rep is None, escape is None, hyperlinks is None)): display_funcs_.clear() return self if not isinstance(formatter, dict): formatter = {level: formatter for level in levels_} else: formatter = {obj._get_level_number(level): formatter_ for (level, formatter_) in formatter.items()} for lvl in levels_: format_func = _maybe_wrap_formatter(formatter.get(lvl), na_rep=na_rep, precision=precision, decimal=decimal, thousands=thousands, escape=escape, hyperlinks=hyperlinks) display_funcs_[lvl] = format_func return self def _element(html_element: str, html_class: str | None, value: Any, is_visible: bool, **kwargs) -> dict: if 'display_value' not in kwargs or kwargs['display_value'] is None: kwargs['display_value'] = value return {'type': html_element, 'value': value, 'class': html_class, 'is_visible': is_visible, **kwargs} def _get_trimming_maximums(rn, cn, max_elements, max_rows=None, max_cols=None, scaling_factor: float=0.8) -> tuple[int, int]: def scale_down(rn, cn): if cn >= rn: return (rn, int(cn * scaling_factor)) else: return (int(rn * scaling_factor), cn) if max_rows: rn = max_rows if rn > max_rows else rn if max_cols: cn = max_cols if cn > max_cols else cn while rn * cn > max_elements: (rn, cn) = scale_down(rn, cn) return (rn, cn) def _get_level_lengths(index: Index, sparsify: bool, max_index: int, hidden_elements: Sequence[int] | None=None): if isinstance(index, MultiIndex): levels = index._format_multi(sparsify=lib.no_default, include_names=False) else: levels = index._format_flat(include_name=False) if hidden_elements is None: hidden_elements = [] lengths = {} if not isinstance(index, MultiIndex): for (i, value) in enumerate(levels): if i not in hidden_elements: lengths[0, i] = 1 return lengths for (i, lvl) in enumerate(levels): visible_row_count = 0 for (j, row) in enumerate(lvl): if visible_row_count > max_index: break if not sparsify: if j not in hidden_elements: lengths[i, j] = 1 visible_row_count += 1 elif row is not lib.no_default and j not in hidden_elements: last_label = j lengths[i, last_label] = 1 visible_row_count += 1 elif row is not lib.no_default: last_label = j lengths[i, last_label] = 0 elif j not in hidden_elements: visible_row_count += 1 if visible_row_count > max_index: break if lengths[i, last_label] == 0: last_label = j lengths[i, last_label] = 1 else: lengths[i, last_label] += 1 non_zero_lengths = {element: length for (element, length) in lengths.items() if length >= 1} return non_zero_lengths def _is_visible(idx_row, idx_col, lengths) -> bool: return (idx_col, idx_row) in lengths def format_table_styles(styles: CSSStyles) -> CSSStyles: return [{'selector': selector, 'props': css_dict['props']} for css_dict in styles for selector in css_dict['selector'].split(',')] def _default_formatter(x: Any, precision: int, thousands: bool=False) -> Any: if is_float(x) or is_complex(x): return f'{x:,.{precision}f}' if thousands else f'{x:.{precision}f}' elif is_integer(x): return f'{x:,}' if thousands else str(x) return x def _wrap_decimal_thousands(formatter: Callable, decimal: str, thousands: str | None) -> Callable: def wrapper(x): if is_float(x) or is_integer(x) or is_complex(x): if decimal != '.' and thousands is not None and (thousands != ','): return formatter(x).replace(',', '§_§-').replace('.', decimal).replace('§_§-', thousands) elif decimal != '.' and (thousands is None or thousands == ','): return formatter(x).replace('.', decimal) elif decimal == '.' and thousands is not None and (thousands != ','): return formatter(x).replace(',', thousands) return formatter(x) return wrapper def _str_escape(x, escape): if isinstance(x, str): if escape == 'html': return escape_html(x) elif escape == 'latex': return _escape_latex(x) elif escape == 'latex-math': return _escape_latex_math(x) else: raise ValueError(f"`escape` only permitted in {{'html', 'latex', 'latex-math'}}, got {escape}") return x def _render_href(x, format): if isinstance(x, str): if format == 'html': href = '{0}' elif format == 'latex': href = '\\href{{{0}}}{{{0}}}' else: raise ValueError("``hyperlinks`` format can only be 'html' or 'latex'") pat = "((http|ftp)s?:\\/\\/|www.)[\\w/\\-?=%.:@]+\\.[\\w/\\-&?=%.,':;~!@#$*()\\[\\]]+" return re.sub(pat, lambda m: href.format(m.group(0)), x) return x def _maybe_wrap_formatter(formatter: BaseFormatter | None=None, na_rep: str | None=None, precision: int | None=None, decimal: str='.', thousands: str | None=None, escape: str | None=None, hyperlinks: str | None=None) -> Callable: if isinstance(formatter, str): func_0 = lambda x: formatter.format(x) elif callable(formatter): func_0 = formatter elif formatter is None: precision = get_option('styler.format.precision') if precision is None else precision func_0 = partial(_default_formatter, precision=precision, thousands=thousands is not None) else: raise TypeError(f"'formatter' expected str or callable, got {type(formatter)}") if escape is not None: func_1 = lambda x: func_0(_str_escape(x, escape=escape)) else: func_1 = func_0 if decimal != '.' or (thousands is not None and thousands != ','): func_2 = _wrap_decimal_thousands(func_1, decimal=decimal, thousands=thousands) else: func_2 = func_1 if hyperlinks is not None: func_3 = lambda x: func_2(_render_href(x, format=hyperlinks)) else: func_3 = func_2 if na_rep is None: return func_3 else: return lambda x: na_rep if isna(x) is True else func_3(x) def non_reducing_slice(slice_: Subset): kinds = (ABCSeries, np.ndarray, Index, list, str) if isinstance(slice_, kinds): slice_ = IndexSlice[:, slice_] def pred(part) -> bool: if isinstance(part, tuple): return any((isinstance(s, slice) or is_list_like(s) for s in part)) else: return isinstance(part, slice) or is_list_like(part) if not is_list_like(slice_): if not isinstance(slice_, slice): slice_ = [[slice_]] else: slice_ = [slice_] else: slice_ = [p if pred(p) else [p] for p in slice_] return tuple(slice_) def maybe_convert_css_to_tuples(style: CSSProperties) -> CSSList: if isinstance(style, str): s = style.split(';') try: return [(x.split(':')[0].strip(), x.split(':')[1].strip()) for x in s if x.strip() != ''] except IndexError as err: raise ValueError(f"Styles supplied as string must follow CSS rule formats, for example 'attr: val;'. '{style}' was given.") from err return style def refactor_levels(level: Level | list[Level] | None, obj: Index) -> list[int]: if level is None: levels_: list[int] = list(range(obj.nlevels)) elif isinstance(level, int): levels_ = [level] elif isinstance(level, str): levels_ = [obj._get_level_number(level)] elif isinstance(level, list): levels_ = [obj._get_level_number(lev) if not isinstance(lev, int) else lev for lev in level] else: raise ValueError('`level` must be of type `int`, `str` or list of such') return levels_ class Tooltips: def __init__(self, css_props: CSSProperties=[('visibility', 'hidden'), ('position', 'absolute'), ('z-index', 1), ('background-color', 'black'), ('color', 'white'), ('transform', 'translate(-20px, -20px)')], css_name: str='pd-t', tooltips: DataFrame=DataFrame(), as_title_attribute: bool=False) -> None: self.class_name = css_name self.class_properties = css_props self.tt_data = tooltips self.table_styles: CSSStyles = [] self.as_title_attribute = as_title_attribute @property def _class_styles(self): return [{'selector': f'.{self.class_name}', 'props': maybe_convert_css_to_tuples(self.class_properties)}] def _pseudo_css(self, uuid: str, name: str, row: int, col: int, text: str) -> list[CSSDict]: selector_id = '#T_' + uuid + '_row' + str(row) + '_col' + str(col) return [{'selector': selector_id + f':hover .{name}', 'props': [('visibility', 'visible')]}, {'selector': selector_id + f' .{name}::after', 'props': [('content', f'"{text}"')]}] def _translate(self, styler: StylerRenderer, d: dict): self.tt_data = self.tt_data.reindex_like(styler.data) if self.tt_data.empty: return d mask = self.tt_data.isna() | self.tt_data.eq('') if not self.as_title_attribute: name = self.class_name self.table_styles = [style for sublist in [self._pseudo_css(styler.uuid, name, i, j, str(self.tt_data.iloc[i, j])) for i in range(len(self.tt_data.index)) for j in range(len(self.tt_data.columns)) if not (mask.iloc[i, j] or i in styler.hidden_rows or j in styler.hidden_columns)] for style in sublist] if self.table_styles: for row in d['body']: for item in row: if item['type'] == 'td': item['display_value'] = str(item['display_value']) + f'' d['table_styles'].extend(self._class_styles) d['table_styles'].extend(self.table_styles) else: index_offset = self.tt_data.index.nlevels body = d['body'] for i in range(len(self.tt_data.index)): for j in range(len(self.tt_data.columns)): if not mask.iloc[i, j] or i in styler.hidden_rows or j in styler.hidden_columns: row = body[i] item = row[j + index_offset] value = self.tt_data.iloc[i, j] item['attributes'] += f' title="{value}"' return d def _parse_latex_table_wrapping(table_styles: CSSStyles, caption: str | None) -> bool: IGNORED_WRAPPERS = ['toprule', 'midrule', 'bottomrule', 'column_format'] return table_styles is not None and any((d['selector'] not in IGNORED_WRAPPERS for d in table_styles)) or caption is not None def _parse_latex_table_styles(table_styles: CSSStyles, selector: str) -> str | None: for style in table_styles[::-1]: if style['selector'] == selector: return str(style['props'][0][1]).replace('§', ':') return None def _parse_latex_cell_styles(latex_styles: CSSList, display_value: str, convert_css: bool=False) -> str: if convert_css: latex_styles = _parse_latex_css_conversion(latex_styles) for (command, options) in latex_styles[::-1]: formatter = {'--wrap': f'{{\\{command}--to_parse {display_value}}}', '--nowrap': f'\\{command}--to_parse {display_value}', '--lwrap': f'{{\\{command}--to_parse}} {display_value}', '--rwrap': f'\\{command}--to_parse{{{display_value}}}', '--dwrap': f'{{\\{command}--to_parse}}{{{display_value}}}'} display_value = f'\\{command}{options} {display_value}' for arg in ['--nowrap', '--wrap', '--lwrap', '--rwrap', '--dwrap']: if arg in str(options): display_value = formatter[arg].replace('--to_parse', _parse_latex_options_strip(value=options, arg=arg)) break return display_value def _parse_latex_header_span(cell: dict[str, Any], multirow_align: str, multicol_align: str, wrap: bool=False, convert_css: bool=False) -> str: display_val = _parse_latex_cell_styles(cell['cellstyle'], cell['display_value'], convert_css) if 'attributes' in cell: attrs = cell['attributes'] if 'colspan="' in attrs: colspan = attrs[attrs.find('colspan="') + 9:] colspan = int(colspan[:colspan.find('"')]) if 'naive-l' == multicol_align: out = f'{{{display_val}}}' if wrap else f'{display_val}' blanks = ' & {}' if wrap else ' &' return out + blanks * (colspan - 1) elif 'naive-r' == multicol_align: out = f'{{{display_val}}}' if wrap else f'{display_val}' blanks = '{} & ' if wrap else '& ' return blanks * (colspan - 1) + out return f'\\multicolumn{{{colspan}}}{{{multicol_align}}}{{{display_val}}}' elif 'rowspan="' in attrs: if multirow_align == 'naive': return display_val rowspan = attrs[attrs.find('rowspan="') + 9:] rowspan = int(rowspan[:rowspan.find('"')]) return f'\\multirow[{multirow_align}]{{{rowspan}}}{{*}}{{{display_val}}}' if wrap: return f'{{{display_val}}}' else: return display_val def _parse_latex_options_strip(value: str | float, arg: str) -> str: return str(value).replace(arg, '').replace('/*', '').replace('*/', '').strip() def _parse_latex_css_conversion(styles: CSSList) -> CSSList: def font_weight(value, arg) -> tuple[str, str] | None: if value in ('bold', 'bolder'): return ('bfseries', f'{arg}') return None def font_style(value, arg) -> tuple[str, str] | None: if value == 'italic': return ('itshape', f'{arg}') if value == 'oblique': return ('slshape', f'{arg}') return None def color(value, user_arg, command, comm_arg): arg = user_arg if user_arg != '' else comm_arg if value[0] == '#' and len(value) == 7: return (command, f'[HTML]{{{value[1:].upper()}}}{arg}') if value[0] == '#' and len(value) == 4: val = f'{value[1].upper() * 2}{value[2].upper() * 2}{value[3].upper() * 2}' return (command, f'[HTML]{{{val}}}{arg}') elif value[:3] == 'rgb': r = re.findall('(?<=\\()[0-9\\s%]+(?=,)', value)[0].strip() r = float(r[:-1]) / 100 if '%' in r else int(r) / 255 g = re.findall('(?<=,)[0-9\\s%]+(?=,)', value)[0].strip() g = float(g[:-1]) / 100 if '%' in g else int(g) / 255 if value[3] == 'a': b = re.findall('(?<=,)[0-9\\s%]+(?=,)', value)[1].strip() else: b = re.findall('(?<=,)[0-9\\s%]+(?=\\))', value)[0].strip() b = float(b[:-1]) / 100 if '%' in b else int(b) / 255 return (command, f'[rgb]{{{r:.3f}, {g:.3f}, {b:.3f}}}{arg}') else: return (command, f'{{{value}}}{arg}') CONVERTED_ATTRIBUTES: dict[str, Callable] = {'font-weight': font_weight, 'background-color': partial(color, command='cellcolor', comm_arg='--lwrap'), 'color': partial(color, command='color', comm_arg=''), 'font-style': font_style} latex_styles: CSSList = [] for (attribute, value) in styles: if isinstance(value, str) and '--latex' in value: latex_styles.append((attribute, value.replace('--latex', ''))) if attribute in CONVERTED_ATTRIBUTES: arg = '' for x in ['--wrap', '--nowrap', '--lwrap', '--dwrap', '--rwrap']: if x in str(value): (arg, value) = (x, _parse_latex_options_strip(value, x)) break latex_style = CONVERTED_ATTRIBUTES[attribute](value, arg) if latex_style is not None: latex_styles.extend([latex_style]) return latex_styles def _escape_latex(s: str) -> str: return s.replace('\\', 'ab2§=§8yz').replace('ab2§=§8yz ', 'ab2§=§8yz\\space ').replace('&', '\\&').replace('%', '\\%').replace('$', '\\$').replace('#', '\\#').replace('_', '\\_').replace('{', '\\{').replace('}', '\\}').replace('~ ', '~\\space ').replace('~', '\\textasciitilde ').replace('^ ', '^\\space ').replace('^', '\\textasciicircum ').replace('ab2§=§8yz', '\\textbackslash ') def _math_mode_with_dollar(s: str) -> str: s = s.replace('\\$', 'rt8§=§7wz') pattern = re.compile('\\$.*?\\$') pos = 0 ps = pattern.search(s, pos) res = [] while ps: res.append(_escape_latex(s[pos:ps.span()[0]])) res.append(ps.group()) pos = ps.span()[1] ps = pattern.search(s, pos) res.append(_escape_latex(s[pos:len(s)])) return ''.join(res).replace('rt8§=§7wz', '\\$') def _math_mode_with_parentheses(s: str) -> str: s = s.replace('\\(', 'LEFT§=§6yzLEFT').replace('\\)', 'RIGHTab5§=§RIGHT') res = [] for item in re.split('LEFT§=§6yz|ab5§=§RIGHT', s): if item.startswith('LEFT') and item.endswith('RIGHT'): res.append(item.replace('LEFT', '\\(').replace('RIGHT', '\\)')) elif 'LEFT' in item and 'RIGHT' in item: res.append(_escape_latex(item).replace('LEFT', '\\(').replace('RIGHT', '\\)')) else: res.append(_escape_latex(item).replace('LEFT', '\\textbackslash (').replace('RIGHT', '\\textbackslash )')) return ''.join(res) def _escape_latex_math(s: str) -> str: s = s.replace('\\$', 'rt8§=§7wz') ps_d = re.compile('\\$.*?\\$').search(s, 0) ps_p = re.compile('\\(.*?\\)').search(s, 0) mode = [] if ps_d: mode.append(ps_d.span()[0]) if ps_p: mode.append(ps_p.span()[0]) if len(mode) == 0: return _escape_latex(s.replace('rt8§=§7wz', '\\$')) if s[mode[0]] == '$': return _math_mode_with_dollar(s.replace('rt8§=§7wz', '\\$')) if s[mode[0] - 1:mode[0] + 1] == '\\(': return _math_mode_with_parentheses(s.replace('rt8§=§7wz', '\\$')) else: return _escape_latex(s.replace('rt8§=§7wz', '\\$')) # File: pandas-main/pandas/io/formats/xml.py """""" from __future__ import annotations import codecs import io from typing import TYPE_CHECKING, Any, final from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly, doc from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.missing import isna from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle from pandas.io.xml import get_data_from_filepath if TYPE_CHECKING: from pandas._typing import CompressionOptions, FilePath, ReadBuffer, StorageOptions, WriteBuffer from pandas import DataFrame @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'path_or_buffer') class _BaseXMLFormatter: def __init__(self, frame: DataFrame, path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None=None, index: bool=True, root_name: str | None='data', row_name: str | None='row', na_rep: str | None=None, attr_cols: list[str] | None=None, elem_cols: list[str] | None=None, namespaces: dict[str | None, str] | None=None, prefix: str | None=None, encoding: str='utf-8', xml_declaration: bool | None=True, pretty_print: bool | None=True, stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None=None, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None) -> None: self.frame = frame self.path_or_buffer = path_or_buffer self.index = index self.root_name = root_name self.row_name = row_name self.na_rep = na_rep self.attr_cols = attr_cols self.elem_cols = elem_cols self.namespaces = namespaces self.prefix = prefix self.encoding = encoding self.xml_declaration = xml_declaration self.pretty_print = pretty_print self.stylesheet = stylesheet self.compression: CompressionOptions = compression self.storage_options = storage_options self.orig_cols = self.frame.columns.tolist() self.frame_dicts = self._process_dataframe() self._validate_columns() self._validate_encoding() self.prefix_uri = self._get_prefix_uri() self._handle_indexes() def _build_tree(self) -> bytes: raise AbstractMethodError(self) @final def _validate_columns(self) -> None: if self.attr_cols and (not is_list_like(self.attr_cols)): raise TypeError(f'{type(self.attr_cols).__name__} is not a valid type for attr_cols') if self.elem_cols and (not is_list_like(self.elem_cols)): raise TypeError(f'{type(self.elem_cols).__name__} is not a valid type for elem_cols') @final def _validate_encoding(self) -> None: codecs.lookup(self.encoding) @final def _process_dataframe(self) -> dict[int | str, dict[str, Any]]: df = self.frame if self.index: df = df.reset_index() if self.na_rep is not None: df = df.fillna(self.na_rep) return df.to_dict(orient='index') @final def _handle_indexes(self) -> None: if not self.index: return first_key = next(iter(self.frame_dicts)) indexes: list[str] = [x for x in self.frame_dicts[first_key].keys() if x not in self.orig_cols] if self.attr_cols: self.attr_cols = indexes + self.attr_cols if self.elem_cols: self.elem_cols = indexes + self.elem_cols def _get_prefix_uri(self) -> str: raise AbstractMethodError(self) @final def _other_namespaces(self) -> dict: nmsp_dict: dict[str, str] = {} if self.namespaces: nmsp_dict = {f"xmlns{(p if p == '' else f':{p}')}": n for (p, n) in self.namespaces.items() if n != self.prefix_uri[1:-1]} return nmsp_dict @final def _build_attribs(self, d: dict[str, Any], elem_row: Any) -> Any: if not self.attr_cols: return elem_row for col in self.attr_cols: attr_name = self._get_flat_col_name(col) try: if not isna(d[col]): elem_row.attrib[attr_name] = str(d[col]) except KeyError as err: raise KeyError(f'no valid column, {col}') from err return elem_row @final def _get_flat_col_name(self, col: str | tuple) -> str: flat_col = col if isinstance(col, tuple): flat_col = ''.join([str(c) for c in col]).strip() if '' in col else '_'.join([str(c) for c in col]).strip() return f'{self.prefix_uri}{flat_col}' @cache_readonly def _sub_element_cls(self): raise AbstractMethodError(self) @final def _build_elems(self, d: dict[str, Any], elem_row: Any) -> None: sub_element_cls = self._sub_element_cls if not self.elem_cols: return for col in self.elem_cols: elem_name = self._get_flat_col_name(col) try: val = None if isna(d[col]) or d[col] == '' else str(d[col]) sub_element_cls(elem_row, elem_name).text = val except KeyError as err: raise KeyError(f'no valid column, {col}') from err @final def write_output(self) -> str | None: xml_doc = self._build_tree() if self.path_or_buffer is not None: with get_handle(self.path_or_buffer, 'wb', compression=self.compression, storage_options=self.storage_options, is_text=False) as handles: handles.handle.write(xml_doc) return None else: return xml_doc.decode(self.encoding).rstrip() class EtreeXMLFormatter(_BaseXMLFormatter): def _build_tree(self) -> bytes: from xml.etree.ElementTree import Element, SubElement, tostring self.root = Element(f'{self.prefix_uri}{self.root_name}', attrib=self._other_namespaces()) for d in self.frame_dicts.values(): elem_row = SubElement(self.root, f'{self.prefix_uri}{self.row_name}') if not self.attr_cols and (not self.elem_cols): self.elem_cols = list(d.keys()) self._build_elems(d, elem_row) else: elem_row = self._build_attribs(d, elem_row) self._build_elems(d, elem_row) self.out_xml = tostring(self.root, method='xml', encoding=self.encoding, xml_declaration=self.xml_declaration) if self.pretty_print: self.out_xml = self._prettify_tree() if self.stylesheet is not None: raise ValueError('To use stylesheet, you need lxml installed and selected as parser.') return self.out_xml def _get_prefix_uri(self) -> str: from xml.etree.ElementTree import register_namespace uri = '' if self.namespaces: for (p, n) in self.namespaces.items(): if isinstance(p, str) and isinstance(n, str): register_namespace(p, n) if self.prefix: try: uri = f'{{{self.namespaces[self.prefix]}}}' except KeyError as err: raise KeyError(f'{self.prefix} is not included in namespaces') from err elif '' in self.namespaces: uri = f"{{{self.namespaces['']}}}" else: uri = '' return uri @cache_readonly def _sub_element_cls(self): from xml.etree.ElementTree import SubElement return SubElement def _prettify_tree(self) -> bytes: from xml.dom.minidom import parseString dom = parseString(self.out_xml) return dom.toprettyxml(indent=' ', encoding=self.encoding) class LxmlXMLFormatter(_BaseXMLFormatter): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._convert_empty_str_key() def _build_tree(self) -> bytes: from lxml.etree import Element, SubElement, tostring self.root = Element(f'{self.prefix_uri}{self.root_name}', nsmap=self.namespaces) for d in self.frame_dicts.values(): elem_row = SubElement(self.root, f'{self.prefix_uri}{self.row_name}') if not self.attr_cols and (not self.elem_cols): self.elem_cols = list(d.keys()) self._build_elems(d, elem_row) else: elem_row = self._build_attribs(d, elem_row) self._build_elems(d, elem_row) self.out_xml = tostring(self.root, pretty_print=self.pretty_print, method='xml', encoding=self.encoding, xml_declaration=self.xml_declaration) if self.stylesheet is not None: self.out_xml = self._transform_doc() return self.out_xml def _convert_empty_str_key(self) -> None: if self.namespaces and '' in self.namespaces.keys(): self.namespaces[None] = self.namespaces.pop('', 'default') def _get_prefix_uri(self) -> str: uri = '' if self.namespaces: if self.prefix: try: uri = f'{{{self.namespaces[self.prefix]}}}' except KeyError as err: raise KeyError(f'{self.prefix} is not included in namespaces') from err elif '' in self.namespaces: uri = f"{{{self.namespaces['']}}}" else: uri = '' return uri @cache_readonly def _sub_element_cls(self): from lxml.etree import SubElement return SubElement def _transform_doc(self) -> bytes: from lxml.etree import XSLT, XMLParser, fromstring, parse style_doc = self.stylesheet assert style_doc is not None handle_data = get_data_from_filepath(filepath_or_buffer=style_doc, encoding=self.encoding, compression=self.compression, storage_options=self.storage_options) with handle_data as xml_data: curr_parser = XMLParser(encoding=self.encoding) if isinstance(xml_data, io.StringIO): xsl_doc = fromstring(xml_data.getvalue().encode(self.encoding), parser=curr_parser) else: xsl_doc = parse(xml_data, parser=curr_parser) transformer = XSLT(xsl_doc) new_doc = transformer(self.root) return bytes(new_doc) # File: pandas-main/pandas/io/html.py """""" from __future__ import annotations from collections import abc import errno import numbers import os import re from re import Pattern from typing import TYPE_CHECKING, Literal, cast from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError, EmptyDataError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import is_list_like from pandas import isna from pandas.core.indexes.base import Index from pandas.core.indexes.multi import MultiIndex from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle, is_url, stringify_path, validate_header_arg from pandas.io.formats.printing import pprint_thing from pandas.io.parsers import TextParser if TYPE_CHECKING: from collections.abc import Iterable, Sequence from pandas._typing import BaseBuffer, DtypeBackend, FilePath, HTMLFlavors, ReadBuffer, StorageOptions from pandas import DataFrame _RE_WHITESPACE = re.compile('[\\r\\n]+|\\s{2,}') def _remove_whitespace(s: str, regex: Pattern=_RE_WHITESPACE) -> str: return regex.sub(' ', s.strip()) def _get_skiprows(skiprows: int | Sequence[int] | slice | None) -> int | Sequence[int]: if isinstance(skiprows, slice): (start, step) = (skiprows.start or 0, skiprows.step or 1) return list(range(start, skiprows.stop, step)) elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows): return cast('int | Sequence[int]', skiprows) elif skiprows is None: return 0 raise TypeError(f'{type(skiprows).__name__} is not a valid type for skipping rows') def _read(obj: FilePath | BaseBuffer, encoding: str | None, storage_options: StorageOptions | None) -> str | bytes: try: with get_handle(obj, 'r', encoding=encoding, storage_options=storage_options) as handles: return handles.handle.read() except OSError as err: if not is_url(obj): raise FileNotFoundError(f'[Errno {errno.ENOENT}] {os.strerror(errno.ENOENT)}: {obj}') from err raise class _HtmlFrameParser: def __init__(self, io: FilePath | ReadBuffer[str] | ReadBuffer[bytes], match: str | Pattern, attrs: dict[str, str] | None, encoding: str, displayed_only: bool, extract_links: Literal[None, 'header', 'footer', 'body', 'all'], storage_options: StorageOptions=None) -> None: self.io = io self.match = match self.attrs = attrs self.encoding = encoding self.displayed_only = displayed_only self.extract_links = extract_links self.storage_options = storage_options def parse_tables(self): tables = self._parse_tables(self._build_doc(), self.match, self.attrs) return (self._parse_thead_tbody_tfoot(table) for table in tables) def _attr_getter(self, obj, attr): return obj.get(attr) def _href_getter(self, obj) -> str | None: raise AbstractMethodError(self) def _text_getter(self, obj): raise AbstractMethodError(self) def _parse_td(self, obj): raise AbstractMethodError(self) def _parse_thead_tr(self, table): raise AbstractMethodError(self) def _parse_tbody_tr(self, table): raise AbstractMethodError(self) def _parse_tfoot_tr(self, table): raise AbstractMethodError(self) def _parse_tables(self, document, match, attrs): raise AbstractMethodError(self) def _equals_tag(self, obj, tag) -> bool: raise AbstractMethodError(self) def _build_doc(self): raise AbstractMethodError(self) def _parse_thead_tbody_tfoot(self, table_html): header_rows = self._parse_thead_tr(table_html) body_rows = self._parse_tbody_tr(table_html) footer_rows = self._parse_tfoot_tr(table_html) def row_is_all_th(row): return all((self._equals_tag(t, 'th') for t in self._parse_td(row))) if not header_rows: while body_rows and row_is_all_th(body_rows[0]): header_rows.append(body_rows.pop(0)) header = self._expand_colspan_rowspan(header_rows, section='header') body = self._expand_colspan_rowspan(body_rows, section='body') footer = self._expand_colspan_rowspan(footer_rows, section='footer') return (header, body, footer) def _expand_colspan_rowspan(self, rows, section: Literal['header', 'footer', 'body']) -> list[list]: all_texts = [] text: str | tuple remainder: list[tuple[int, str | tuple, int]] = [] for tr in rows: texts = [] next_remainder = [] index = 0 tds = self._parse_td(tr) for td in tds: while remainder and remainder[0][0] <= index: (prev_i, prev_text, prev_rowspan) = remainder.pop(0) texts.append(prev_text) if prev_rowspan > 1: next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) index += 1 text = _remove_whitespace(self._text_getter(td)) if self.extract_links in ('all', section): href = self._href_getter(td) text = (text, href) rowspan = int(self._attr_getter(td, 'rowspan') or 1) colspan = int(self._attr_getter(td, 'colspan') or 1) for _ in range(colspan): texts.append(text) if rowspan > 1: next_remainder.append((index, text, rowspan - 1)) index += 1 for (prev_i, prev_text, prev_rowspan) in remainder: texts.append(prev_text) if prev_rowspan > 1: next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) all_texts.append(texts) remainder = next_remainder while remainder: next_remainder = [] texts = [] for (prev_i, prev_text, prev_rowspan) in remainder: texts.append(prev_text) if prev_rowspan > 1: next_remainder.append((prev_i, prev_text, prev_rowspan - 1)) all_texts.append(texts) remainder = next_remainder return all_texts def _handle_hidden_tables(self, tbl_list, attr_name: str): if not self.displayed_only: return tbl_list return [x for x in tbl_list if 'display:none' not in getattr(x, attr_name).get('style', '').replace(' ', '')] class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser): def _parse_tables(self, document, match, attrs): element_name = 'table' tables = document.find_all(element_name, attrs=attrs) if not tables: raise ValueError('No tables found') result = [] unique_tables = set() tables = self._handle_hidden_tables(tables, 'attrs') for table in tables: if self.displayed_only: for elem in table.find_all('style'): elem.decompose() for elem in table.find_all(style=re.compile('display:\\s*none')): elem.decompose() if table not in unique_tables and table.find(string=match) is not None: result.append(table) unique_tables.add(table) if not result: raise ValueError(f'No tables found matching pattern {match.pattern!r}') return result def _href_getter(self, obj) -> str | None: a = obj.find('a', href=True) return None if not a else a['href'] def _text_getter(self, obj): return obj.text def _equals_tag(self, obj, tag) -> bool: return obj.name == tag def _parse_td(self, row): return row.find_all(('td', 'th'), recursive=False) def _parse_thead_tr(self, table): return table.select('thead tr') def _parse_tbody_tr(self, table): from_tbody = table.select('tbody tr') from_root = table.find_all('tr', recursive=False) return from_tbody + from_root def _parse_tfoot_tr(self, table): return table.select('tfoot tr') def _setup_build_doc(self): raw_text = _read(self.io, self.encoding, self.storage_options) if not raw_text: raise ValueError(f'No text parsed from document: {self.io}') return raw_text def _build_doc(self): from bs4 import BeautifulSoup bdoc = self._setup_build_doc() if isinstance(bdoc, bytes) and self.encoding is not None: udoc = bdoc.decode(self.encoding) from_encoding = None else: udoc = bdoc from_encoding = self.encoding soup = BeautifulSoup(udoc, features='html5lib', from_encoding=from_encoding) for br in soup.find_all('br'): br.replace_with('\n' + br.text) return soup def _build_xpath_expr(attrs) -> str: if 'class_' in attrs: attrs['class'] = attrs.pop('class_') s = ' and '.join([f'@{k}={v!r}' for (k, v) in attrs.items()]) return f'[{s}]' _re_namespace = {'re': 'http://exslt.org/regular-expressions'} class _LxmlFrameParser(_HtmlFrameParser): def _href_getter(self, obj) -> str | None: href = obj.xpath('.//a/@href') return None if not href else href[0] def _text_getter(self, obj): return obj.text_content() def _parse_td(self, row): return row.xpath('./td|./th') def _parse_tables(self, document, match, kwargs): pattern = match.pattern xpath_expr = f'//table[.//text()[re:test(., {pattern!r})]]' if kwargs: xpath_expr += _build_xpath_expr(kwargs) tables = document.xpath(xpath_expr, namespaces=_re_namespace) tables = self._handle_hidden_tables(tables, 'attrib') if self.displayed_only: for table in tables: for elem in table.xpath('.//style'): elem.drop_tree() for elem in table.xpath('.//*[@style]'): if 'display:none' in elem.attrib.get('style', '').replace(' ', ''): elem.drop_tree() if not tables: raise ValueError(f'No tables found matching regex {pattern!r}') return tables def _equals_tag(self, obj, tag) -> bool: return obj.tag == tag def _build_doc(self): from lxml.etree import XMLSyntaxError from lxml.html import HTMLParser, parse parser = HTMLParser(recover=True, encoding=self.encoding) if is_url(self.io): with get_handle(self.io, 'r', storage_options=self.storage_options) as f: r = parse(f.handle, parser=parser) else: try: r = parse(self.io, parser=parser) except OSError as err: raise FileNotFoundError(f'[Errno {errno.ENOENT}] {os.strerror(errno.ENOENT)}: {self.io}') from err try: r = r.getroot() except AttributeError: pass else: if not hasattr(r, 'text_content'): raise XMLSyntaxError('no text parsed from document', 0, 0, 0) for br in r.xpath('*//br'): br.tail = '\n' + (br.tail or '') return r def _parse_thead_tr(self, table): rows = [] for thead in table.xpath('.//thead'): rows.extend(thead.xpath('./tr')) elements_at_root = thead.xpath('./td|./th') if elements_at_root: rows.append(thead) return rows def _parse_tbody_tr(self, table): from_tbody = table.xpath('.//tbody//tr') from_root = table.xpath('./tr') return from_tbody + from_root def _parse_tfoot_tr(self, table): return table.xpath('.//tfoot//tr') def _expand_elements(body) -> None: data = [len(elem) for elem in body] lens = Series(data) lens_max = lens.max() not_max = lens[lens != lens_max] empty = [''] for (ind, length) in not_max.items(): body[ind] += empty * (lens_max - length) def _data_to_frame(**kwargs): (head, body, foot) = kwargs.pop('data') header = kwargs.pop('header') kwargs['skiprows'] = _get_skiprows(kwargs['skiprows']) if head: body = head + body if header is None: if len(head) == 1: header = 0 else: header = [i for (i, row) in enumerate(head) if any((text for text in row))] if foot: body += foot _expand_elements(body) with TextParser(body, header=header, **kwargs) as tp: return tp.read() _valid_parsers = {'lxml': _LxmlFrameParser, None: _LxmlFrameParser, 'html5lib': _BeautifulSoupHtml5LibFrameParser, 'bs4': _BeautifulSoupHtml5LibFrameParser} def _parser_dispatch(flavor: HTMLFlavors | None) -> type[_HtmlFrameParser]: valid_parsers = list(_valid_parsers.keys()) if flavor not in valid_parsers: raise ValueError(f'{flavor!r} is not a valid flavor, valid flavors are {valid_parsers}') if flavor in ('bs4', 'html5lib'): import_optional_dependency('html5lib') import_optional_dependency('bs4') else: import_optional_dependency('lxml.etree') return _valid_parsers[flavor] def _print_as_set(s) -> str: arg = ', '.join([pprint_thing(el) for el in s]) return f'{{{arg}}}' def _validate_flavor(flavor): if flavor is None: flavor = ('lxml', 'bs4') elif isinstance(flavor, str): flavor = (flavor,) elif isinstance(flavor, abc.Iterable): if not all((isinstance(flav, str) for flav in flavor)): raise TypeError(f'Object of type {type(flavor).__name__!r} is not an iterable of strings') else: msg = repr(flavor) if isinstance(flavor, str) else str(flavor) msg += ' is not a valid flavor' raise ValueError(msg) flavor = tuple(flavor) valid_flavors = set(_valid_parsers) flavor_set = set(flavor) if not flavor_set & valid_flavors: raise ValueError(f'{_print_as_set(flavor_set)} is not a valid set of flavors, valid flavors are {_print_as_set(valid_flavors)}') return flavor def _parse(flavor, io, match, attrs, encoding, displayed_only, extract_links, storage_options, **kwargs): flavor = _validate_flavor(flavor) compiled_match = re.compile(match) retained = None for flav in flavor: parser = _parser_dispatch(flav) p = parser(io, compiled_match, attrs, encoding, displayed_only, extract_links, storage_options) try: tables = p.parse_tables() except ValueError as caught: if hasattr(io, 'seekable') and io.seekable(): io.seek(0) elif hasattr(io, 'seekable') and (not io.seekable()): raise ValueError(f"The flavor {flav} failed to parse your input. Since you passed a non-rewindable file object, we can't rewind it to try another parser. Try read_html() with a different flavor.") from caught retained = caught else: break else: assert retained is not None raise retained ret = [] for table in tables: try: df = _data_to_frame(data=table, **kwargs) if extract_links in ('all', 'header') and isinstance(df.columns, MultiIndex): df.columns = Index(((col[0], None if isna(col[1]) else col[1]) for col in df.columns), tupleize_cols=False) ret.append(df) except EmptyDataError: continue return ret @doc(storage_options=_shared_docs['storage_options']) def read_html(io: FilePath | ReadBuffer[str], *, match: str | Pattern='.+', flavor: HTMLFlavors | Sequence[HTMLFlavors] | None=None, header: int | Sequence[int] | None=None, index_col: int | Sequence[int] | None=None, skiprows: int | Sequence[int] | slice | None=None, attrs: dict[str, str] | None=None, parse_dates: bool=False, thousands: str | None=',', encoding: str | None=None, decimal: str='.', converters: dict | None=None, na_values: Iterable[object] | None=None, keep_default_na: bool=True, displayed_only: bool=True, extract_links: Literal[None, 'header', 'footer', 'body', 'all']=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, storage_options: StorageOptions=None) -> list[DataFrame]: if isinstance(skiprows, numbers.Integral) and skiprows < 0: raise ValueError('cannot skip rows starting from the end of the data (you passed a negative value)') if extract_links not in [None, 'header', 'footer', 'body', 'all']: raise ValueError(f'`extract_links` must be one of {{None, "header", "footer", "body", "all"}}, got "{extract_links}"') validate_header_arg(header) check_dtype_backend(dtype_backend) io = stringify_path(io) return _parse(flavor=flavor, io=io, match=match, header=header, index_col=index_col, skiprows=skiprows, parse_dates=parse_dates, thousands=thousands, attrs=attrs, encoding=encoding, decimal=decimal, converters=converters, na_values=na_values, keep_default_na=keep_default_na, displayed_only=displayed_only, extract_links=extract_links, dtype_backend=dtype_backend, storage_options=storage_options) # File: pandas-main/pandas/io/json/__init__.py from pandas.io.json._json import read_json, to_json, ujson_dumps, ujson_loads from pandas.io.json._table_schema import build_table_schema __all__ = ['ujson_dumps', 'ujson_loads', 'read_json', 'to_json', 'build_table_schema'] # File: pandas-main/pandas/io/json/_json.py from __future__ import annotations from abc import ABC, abstractmethod from collections import abc from itertools import islice from typing import TYPE_CHECKING, Any, Generic, Literal, TypeVar, final, overload import numpy as np from pandas._libs import lib from pandas._libs.json import ujson_dumps, ujson_loads from pandas._libs.tslibs import iNaT from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import ensure_str, is_string_dtype from pandas.core.dtypes.dtypes import PeriodDtype from pandas import ArrowDtype, DataFrame, Index, MultiIndex, Series, isna, notna, to_datetime from pandas.core.reshape.concat import concat from pandas.core.shared_docs import _shared_docs from pandas.io.common import IOHandles, dedup_names, get_handle, is_potential_multi_index, stringify_path from pandas.io.json._normalize import convert_to_line_delimits from pandas.io.json._table_schema import build_table_schema, parse_table_schema, set_default_names from pandas.io.parsers.readers import validate_integer if TYPE_CHECKING: from collections.abc import Callable, Hashable, Mapping from types import TracebackType from pandas._typing import CompressionOptions, DtypeArg, DtypeBackend, FilePath, IndexLabel, JSONEngine, JSONSerializable, ReadBuffer, Self, StorageOptions, WriteBuffer from pandas.core.generic import NDFrame FrameSeriesStrT = TypeVar('FrameSeriesStrT', bound=Literal['frame', 'series']) @overload def to_json(path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes], obj: NDFrame, orient: str | None=..., date_format: str=..., double_precision: int=..., force_ascii: bool=..., date_unit: str=..., default_handler: Callable[[Any], JSONSerializable] | None=..., lines: bool=..., compression: CompressionOptions=..., index: bool | None=..., indent: int=..., storage_options: StorageOptions=..., mode: Literal['a', 'w']=...) -> None: ... @overload def to_json(path_or_buf: None, obj: NDFrame, orient: str | None=..., date_format: str=..., double_precision: int=..., force_ascii: bool=..., date_unit: str=..., default_handler: Callable[[Any], JSONSerializable] | None=..., lines: bool=..., compression: CompressionOptions=..., index: bool | None=..., indent: int=..., storage_options: StorageOptions=..., mode: Literal['a', 'w']=...) -> str: ... def to_json(path_or_buf: FilePath | WriteBuffer[str] | WriteBuffer[bytes] | None, obj: NDFrame, orient: str | None=None, date_format: str='epoch', double_precision: int=10, force_ascii: bool=True, date_unit: str='ms', default_handler: Callable[[Any], JSONSerializable] | None=None, lines: bool=False, compression: CompressionOptions='infer', index: bool | None=None, indent: int=0, storage_options: StorageOptions | None=None, mode: Literal['a', 'w']='w') -> str | None: if orient in ['records', 'values'] and index is True: raise ValueError("'index=True' is only valid when 'orient' is 'split', 'table', 'index', or 'columns'.") elif orient in ['index', 'columns'] and index is False: raise ValueError("'index=False' is only valid when 'orient' is 'split', 'table', 'records', or 'values'.") elif index is None: index = True if lines and orient != 'records': raise ValueError("'lines' keyword only valid when 'orient' is records") if mode not in ['a', 'w']: msg = f"mode={mode} is not a valid option.Only 'w' and 'a' are currently supported." raise ValueError(msg) if mode == 'a' and (not lines or orient != 'records'): msg = "mode='a' (append) is only supported when lines is True and orient is 'records'" raise ValueError(msg) if orient == 'table' and isinstance(obj, Series): obj = obj.to_frame(name=obj.name or 'values') writer: type[Writer] if orient == 'table' and isinstance(obj, DataFrame): writer = JSONTableWriter elif isinstance(obj, Series): writer = SeriesWriter elif isinstance(obj, DataFrame): writer = FrameWriter else: raise NotImplementedError("'obj' should be a Series or a DataFrame") s = writer(obj, orient=orient, date_format=date_format, double_precision=double_precision, ensure_ascii=force_ascii, date_unit=date_unit, default_handler=default_handler, index=index, indent=indent).write() if lines: s = convert_to_line_delimits(s) if path_or_buf is not None: with get_handle(path_or_buf, mode, compression=compression, storage_options=storage_options) as handles: handles.handle.write(s) else: return s return None class Writer(ABC): _default_orient: str def __init__(self, obj: NDFrame, orient: str | None, date_format: str, double_precision: int, ensure_ascii: bool, date_unit: str, index: bool, default_handler: Callable[[Any], JSONSerializable] | None=None, indent: int=0) -> None: self.obj = obj if orient is None: orient = self._default_orient self.orient = orient self.date_format = date_format self.double_precision = double_precision self.ensure_ascii = ensure_ascii self.date_unit = date_unit self.default_handler = default_handler self.index = index self.indent = indent self._format_axes() def _format_axes(self) -> None: raise AbstractMethodError(self) def write(self) -> str: iso_dates = self.date_format == 'iso' return ujson_dumps(self.obj_to_write, orient=self.orient, double_precision=self.double_precision, ensure_ascii=self.ensure_ascii, date_unit=self.date_unit, iso_dates=iso_dates, default_handler=self.default_handler, indent=self.indent) @property @abstractmethod def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: class SeriesWriter(Writer): _default_orient = 'index' @property def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: if not self.index and self.orient == 'split': return {'name': self.obj.name, 'data': self.obj.values} else: return self.obj def _format_axes(self) -> None: if not self.obj.index.is_unique and self.orient == 'index': raise ValueError(f"Series index must be unique for orient='{self.orient}'") class FrameWriter(Writer): _default_orient = 'columns' @property def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: if not self.index and self.orient == 'split': obj_to_write = self.obj.to_dict(orient='split') del obj_to_write['index'] else: obj_to_write = self.obj return obj_to_write def _format_axes(self) -> None: if not self.obj.index.is_unique and self.orient in ('index', 'columns'): raise ValueError(f"DataFrame index must be unique for orient='{self.orient}'.") if not self.obj.columns.is_unique and self.orient in ('index', 'columns', 'records'): raise ValueError(f"DataFrame columns must be unique for orient='{self.orient}'.") class JSONTableWriter(FrameWriter): _default_orient = 'records' def __init__(self, obj, orient: str | None, date_format: str, double_precision: int, ensure_ascii: bool, date_unit: str, index: bool, default_handler: Callable[[Any], JSONSerializable] | None=None, indent: int=0) -> None: super().__init__(obj, orient, date_format, double_precision, ensure_ascii, date_unit, index, default_handler=default_handler, indent=indent) if date_format != 'iso': msg = f"Trying to write with `orient='table'` and `date_format='{date_format}'`. Table Schema requires dates to be formatted with `date_format='iso'`" raise ValueError(msg) self.schema = build_table_schema(obj, index=self.index) if self.index: obj = set_default_names(obj) if obj.ndim == 2 and isinstance(obj.columns, MultiIndex): raise NotImplementedError("orient='table' is not supported for MultiIndex columns") if obj.ndim == 1 and obj.name in set(obj.index.names) or len(obj.columns.intersection(obj.index.names)): msg = 'Overlapping names between the index and columns' raise ValueError(msg) timedeltas = obj.select_dtypes(include=['timedelta']).columns copied = False if len(timedeltas): obj = obj.copy() copied = True obj[timedeltas] = obj[timedeltas].map(lambda x: x.isoformat()) if not self.index: self.obj = obj.reset_index(drop=True) else: if isinstance(obj.index.dtype, PeriodDtype): if not copied: obj = obj.copy(deep=False) obj.index = obj.index.to_timestamp() self.obj = obj.reset_index(drop=False) self.date_format = 'iso' self.orient = 'records' self.index = index @property def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]: return {'schema': self.schema, 'data': self.obj} @overload def read_json(path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None=..., typ: Literal['frame']=..., dtype: DtypeArg | None=..., convert_axes: bool | None=..., convert_dates: bool | list[str]=..., keep_default_dates: bool=..., precise_float: bool=..., date_unit: str | None=..., encoding: str | None=..., encoding_errors: str | None=..., lines: bool=..., chunksize: int, compression: CompressionOptions=..., nrows: int | None=..., storage_options: StorageOptions=..., dtype_backend: DtypeBackend | lib.NoDefault=..., engine: JSONEngine=...) -> JsonReader[Literal['frame']]: ... @overload def read_json(path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None=..., typ: Literal['series'], dtype: DtypeArg | None=..., convert_axes: bool | None=..., convert_dates: bool | list[str]=..., keep_default_dates: bool=..., precise_float: bool=..., date_unit: str | None=..., encoding: str | None=..., encoding_errors: str | None=..., lines: bool=..., chunksize: int, compression: CompressionOptions=..., nrows: int | None=..., storage_options: StorageOptions=..., dtype_backend: DtypeBackend | lib.NoDefault=..., engine: JSONEngine=...) -> JsonReader[Literal['series']]: ... @overload def read_json(path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None=..., typ: Literal['series'], dtype: DtypeArg | None=..., convert_axes: bool | None=..., convert_dates: bool | list[str]=..., keep_default_dates: bool=..., precise_float: bool=..., date_unit: str | None=..., encoding: str | None=..., encoding_errors: str | None=..., lines: bool=..., chunksize: None=..., compression: CompressionOptions=..., nrows: int | None=..., storage_options: StorageOptions=..., dtype_backend: DtypeBackend | lib.NoDefault=..., engine: JSONEngine=...) -> Series: ... @overload def read_json(path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None=..., typ: Literal['frame']=..., dtype: DtypeArg | None=..., convert_axes: bool | None=..., convert_dates: bool | list[str]=..., keep_default_dates: bool=..., precise_float: bool=..., date_unit: str | None=..., encoding: str | None=..., encoding_errors: str | None=..., lines: bool=..., chunksize: None=..., compression: CompressionOptions=..., nrows: int | None=..., storage_options: StorageOptions=..., dtype_backend: DtypeBackend | lib.NoDefault=..., engine: JSONEngine=...) -> DataFrame: ... @doc(storage_options=_shared_docs['storage_options'], decompression_options=_shared_docs['decompression_options'] % 'path_or_buf') def read_json(path_or_buf: FilePath | ReadBuffer[str] | ReadBuffer[bytes], *, orient: str | None=None, typ: Literal['frame', 'series']='frame', dtype: DtypeArg | None=None, convert_axes: bool | None=None, convert_dates: bool | list[str]=True, keep_default_dates: bool=True, precise_float: bool=False, date_unit: str | None=None, encoding: str | None=None, encoding_errors: str | None='strict', lines: bool=False, chunksize: int | None=None, compression: CompressionOptions='infer', nrows: int | None=None, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, engine: JSONEngine='ujson') -> DataFrame | Series | JsonReader: if orient == 'table' and dtype: raise ValueError("cannot pass both dtype and orient='table'") if orient == 'table' and convert_axes: raise ValueError("cannot pass both convert_axes and orient='table'") check_dtype_backend(dtype_backend) if dtype is None and orient != 'table': dtype = True if convert_axes is None and orient != 'table': convert_axes = True json_reader = JsonReader(path_or_buf, orient=orient, typ=typ, dtype=dtype, convert_axes=convert_axes, convert_dates=convert_dates, keep_default_dates=keep_default_dates, precise_float=precise_float, date_unit=date_unit, encoding=encoding, lines=lines, chunksize=chunksize, compression=compression, nrows=nrows, storage_options=storage_options, encoding_errors=encoding_errors, dtype_backend=dtype_backend, engine=engine) if chunksize: return json_reader else: return json_reader.read() class JsonReader(abc.Iterator, Generic[FrameSeriesStrT]): def __init__(self, filepath_or_buffer, orient, typ: FrameSeriesStrT, dtype, convert_axes: bool | None, convert_dates, keep_default_dates: bool, precise_float: bool, date_unit, encoding, lines: bool, chunksize: int | None, compression: CompressionOptions, nrows: int | None, storage_options: StorageOptions | None=None, encoding_errors: str | None='strict', dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, engine: JSONEngine='ujson') -> None: self.orient = orient self.typ = typ self.dtype = dtype self.convert_axes = convert_axes self.convert_dates = convert_dates self.keep_default_dates = keep_default_dates self.precise_float = precise_float self.date_unit = date_unit self.encoding = encoding self.engine = engine self.compression = compression self.storage_options = storage_options self.lines = lines self.chunksize = chunksize self.nrows_seen = 0 self.nrows = nrows self.encoding_errors = encoding_errors self.handles: IOHandles[str] | None = None self.dtype_backend = dtype_backend if self.engine not in {'pyarrow', 'ujson'}: raise ValueError(f'The engine type {self.engine} is currently not supported.') if self.chunksize is not None: self.chunksize = validate_integer('chunksize', self.chunksize, 1) if not self.lines: raise ValueError('chunksize can only be passed if lines=True') if self.engine == 'pyarrow': raise ValueError("currently pyarrow engine doesn't support chunksize parameter") if self.nrows is not None: self.nrows = validate_integer('nrows', self.nrows, 0) if not self.lines: raise ValueError('nrows can only be passed if lines=True') if self.engine == 'pyarrow': if not self.lines: raise ValueError('currently pyarrow engine only supports the line-delimited JSON format') self.data = filepath_or_buffer elif self.engine == 'ujson': data = self._get_data_from_filepath(filepath_or_buffer) if not (self.chunksize or self.nrows): with self: self.data = data.read() else: self.data = data def _get_data_from_filepath(self, filepath_or_buffer): filepath_or_buffer = stringify_path(filepath_or_buffer) try: self.handles = get_handle(filepath_or_buffer, 'r', encoding=self.encoding, compression=self.compression, storage_options=self.storage_options, errors=self.encoding_errors) except OSError as err: raise FileNotFoundError(f'File {filepath_or_buffer} does not exist') from err filepath_or_buffer = self.handles.handle return filepath_or_buffer def _combine_lines(self, lines) -> str: return f"[{','.join([line for line in (line.strip() for line in lines) if line])}]" @overload def read(self: JsonReader[Literal['frame']]) -> DataFrame: ... @overload def read(self: JsonReader[Literal['series']]) -> Series: ... @overload def read(self: JsonReader[Literal['frame', 'series']]) -> DataFrame | Series: ... def read(self) -> DataFrame | Series: obj: DataFrame | Series with self: if self.engine == 'pyarrow': pyarrow_json = import_optional_dependency('pyarrow.json') pa_table = pyarrow_json.read_json(self.data) mapping: type[ArrowDtype] | None | Callable if self.dtype_backend == 'pyarrow': mapping = ArrowDtype elif self.dtype_backend == 'numpy_nullable': from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping().get else: mapping = None return pa_table.to_pandas(types_mapper=mapping) elif self.engine == 'ujson': if self.lines: if self.chunksize: obj = concat(self) elif self.nrows: lines = list(islice(self.data, self.nrows)) lines_json = self._combine_lines(lines) obj = self._get_object_parser(lines_json) else: data = ensure_str(self.data) data_lines = data.split('\n') obj = self._get_object_parser(self._combine_lines(data_lines)) else: obj = self._get_object_parser(self.data) if self.dtype_backend is not lib.no_default: return obj.convert_dtypes(infer_objects=False, dtype_backend=self.dtype_backend) else: return obj def _get_object_parser(self, json: str) -> DataFrame | Series: typ = self.typ dtype = self.dtype kwargs = {'orient': self.orient, 'dtype': self.dtype, 'convert_axes': self.convert_axes, 'convert_dates': self.convert_dates, 'keep_default_dates': self.keep_default_dates, 'precise_float': self.precise_float, 'date_unit': self.date_unit, 'dtype_backend': self.dtype_backend} if typ == 'frame': return FrameParser(json, **kwargs).parse() elif typ == 'series': if not isinstance(dtype, bool): kwargs['dtype'] = dtype return SeriesParser(json, **kwargs).parse() else: raise ValueError(f"typ={typ!r} must be 'frame' or 'series'.") def close(self) -> None: if self.handles is not None: self.handles.close() def __iter__(self) -> Self: return self @overload def __next__(self: JsonReader[Literal['frame']]) -> DataFrame: ... @overload def __next__(self: JsonReader[Literal['series']]) -> Series: ... @overload def __next__(self: JsonReader[Literal['frame', 'series']]) -> DataFrame | Series: ... def __next__(self) -> DataFrame | Series: if self.nrows and self.nrows_seen >= self.nrows: self.close() raise StopIteration lines = list(islice(self.data, self.chunksize)) if not lines: self.close() raise StopIteration try: lines_json = self._combine_lines(lines) obj = self._get_object_parser(lines_json) obj.index = range(self.nrows_seen, self.nrows_seen + len(obj)) self.nrows_seen += len(obj) except Exception as ex: self.close() raise ex if self.dtype_backend is not lib.no_default: return obj.convert_dtypes(infer_objects=False, dtype_backend=self.dtype_backend) else: return obj def __enter__(self) -> Self: return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: self.close() class Parser: _split_keys: tuple[str, ...] _default_orient: str _STAMP_UNITS = ('s', 'ms', 'us', 'ns') _MIN_STAMPS = {'s': 31536000, 'ms': 31536000000, 'us': 31536000000000, 'ns': 31536000000000000} json: str def __init__(self, json: str, orient, dtype: DtypeArg | None=None, convert_axes: bool=True, convert_dates: bool | list[str]=True, keep_default_dates: bool=False, precise_float: bool=False, date_unit=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> None: self.json = json if orient is None: orient = self._default_orient self.orient = orient self.dtype = dtype if date_unit is not None: date_unit = date_unit.lower() if date_unit not in self._STAMP_UNITS: raise ValueError(f'date_unit must be one of {self._STAMP_UNITS}') self.min_stamp = self._MIN_STAMPS[date_unit] else: self.min_stamp = self._MIN_STAMPS['s'] self.precise_float = precise_float self.convert_axes = convert_axes self.convert_dates = convert_dates self.date_unit = date_unit self.keep_default_dates = keep_default_dates self.dtype_backend = dtype_backend @final def check_keys_split(self, decoded: dict) -> None: bad_keys = set(decoded.keys()).difference(set(self._split_keys)) if bad_keys: bad_keys_joined = ', '.join(bad_keys) raise ValueError(f'JSON data had unexpected key(s): {bad_keys_joined}') @final def parse(self) -> DataFrame | Series: obj = self._parse() if self.convert_axes: obj = self._convert_axes(obj) obj = self._try_convert_types(obj) return obj def _parse(self) -> DataFrame | Series: raise AbstractMethodError(self) @final def _convert_axes(self, obj: DataFrame | Series) -> DataFrame | Series: for axis_name in obj._AXIS_ORDERS: ax = obj._get_axis(axis_name) ser = Series(ax, dtype=ax.dtype, copy=False) (new_ser, result) = self._try_convert_data(name=axis_name, data=ser, use_dtypes=False, convert_dates=True, is_axis=True) if result: new_axis = Index(new_ser, dtype=new_ser.dtype, copy=False) setattr(obj, axis_name, new_axis) return obj def _try_convert_types(self, obj): raise AbstractMethodError(self) @final def _try_convert_data(self, name: Hashable, data: Series, use_dtypes: bool=True, convert_dates: bool | list[str]=True, is_axis: bool=False) -> tuple[Series, bool]: if use_dtypes: if not self.dtype: if all(notna(data)): return (data, False) filled = data.fillna(np.nan) return (filled, True) elif self.dtype is True: pass elif not _should_convert_dates(convert_dates, self.keep_default_dates, name): dtype = self.dtype.get(name) if isinstance(self.dtype, dict) else self.dtype if dtype is not None: try: return (data.astype(dtype), True) except (TypeError, ValueError): return (data, False) if convert_dates: new_data = self._try_convert_to_date(data) if new_data is not data: return (new_data, True) converted = False if self.dtype_backend is not lib.no_default and (not is_axis): return (data, True) elif is_string_dtype(data.dtype): try: data = data.astype('float64') converted = True except (TypeError, ValueError): pass if data.dtype.kind == 'f' and data.dtype != 'float64': try: data = data.astype('float64') converted = True except (TypeError, ValueError): pass if len(data) and data.dtype in ('float', 'object'): try: new_data = data.astype('int64') if (new_data == data).all(): data = new_data converted = True except (TypeError, ValueError, OverflowError): pass if data.dtype == 'int' and data.dtype != 'int64': try: data = data.astype('int64') converted = True except (TypeError, ValueError): pass if name == 'index' and len(data): if self.orient == 'split': return (data, False) return (data, converted) @final def _try_convert_to_date(self, data: Series) -> Series: if not len(data): return data new_data = data if new_data.dtype == 'string': new_data = new_data.astype(object) if new_data.dtype == 'object': try: new_data = data.astype('int64') except OverflowError: return data except (TypeError, ValueError): pass if issubclass(new_data.dtype.type, np.number): in_range = isna(new_data._values) | (new_data > self.min_stamp) | (new_data._values == iNaT) if not in_range.all(): return data date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS for date_unit in date_units: try: return to_datetime(new_data, errors='raise', unit=date_unit) except (ValueError, OverflowError, TypeError): continue return data class SeriesParser(Parser): _default_orient = 'index' _split_keys = ('name', 'index', 'data') def _parse(self) -> Series: data = ujson_loads(self.json, precise_float=self.precise_float) if self.orient == 'split': decoded = {str(k): v for (k, v) in data.items()} self.check_keys_split(decoded) return Series(**decoded) else: return Series(data) def _try_convert_types(self, obj: Series) -> Series: (obj, _) = self._try_convert_data('data', obj, convert_dates=self.convert_dates) return obj class FrameParser(Parser): _default_orient = 'columns' _split_keys = ('columns', 'index', 'data') def _parse(self) -> DataFrame: json = self.json orient = self.orient if orient == 'split': decoded = {str(k): v for (k, v) in ujson_loads(json, precise_float=self.precise_float).items()} self.check_keys_split(decoded) orig_names = [tuple(col) if isinstance(col, list) else col for col in decoded['columns']] decoded['columns'] = dedup_names(orig_names, is_potential_multi_index(orig_names, None)) return DataFrame(dtype=None, **decoded) elif orient == 'index': return DataFrame.from_dict(ujson_loads(json, precise_float=self.precise_float), dtype=None, orient='index') elif orient == 'table': return parse_table_schema(json, precise_float=self.precise_float) else: return DataFrame(ujson_loads(json, precise_float=self.precise_float), dtype=None) def _try_convert_types(self, obj: DataFrame) -> DataFrame: arrays = [] for (col_label, series) in obj.items(): (result, _) = self._try_convert_data(col_label, series, convert_dates=_should_convert_dates(self.convert_dates, keep_default_dates=self.keep_default_dates, col=col_label)) arrays.append(result.array) return DataFrame._from_arrays(arrays, obj.columns, obj.index, verify_integrity=False) def _should_convert_dates(convert_dates: bool | list[str], keep_default_dates: bool, col: Hashable) -> bool: if convert_dates is False: return False elif not isinstance(convert_dates, bool) and col in set(convert_dates): return True elif not keep_default_dates: return False elif not isinstance(col, str): return False col_lower = col.lower() if col_lower.endswith(('_at', '_time')) or col_lower in {'modified', 'date', 'datetime'} or col_lower.startswith('timestamp'): return True return False # File: pandas-main/pandas/io/json/_normalize.py from __future__ import annotations from collections import abc, defaultdict import copy from typing import TYPE_CHECKING, Any, DefaultDict, overload import numpy as np from pandas._libs.writers import convert_json_to_lines import pandas as pd from pandas import DataFrame, Series if TYPE_CHECKING: from collections.abc import Iterable from pandas._typing import IgnoreRaise, Scalar def convert_to_line_delimits(s: str) -> str: if not s[0] == '[' and s[-1] == ']': return s s = s[1:-1] return convert_json_to_lines(s) @overload def nested_to_record(ds: dict, prefix: str=..., sep: str=..., level: int=..., max_level: int | None=...) -> dict[str, Any]: ... @overload def nested_to_record(ds: list[dict], prefix: str=..., sep: str=..., level: int=..., max_level: int | None=...) -> list[dict[str, Any]]: ... def nested_to_record(ds: dict | list[dict], prefix: str='', sep: str='.', level: int=0, max_level: int | None=None) -> dict[str, Any] | list[dict[str, Any]]: singleton = False if isinstance(ds, dict): ds = [ds] singleton = True new_ds = [] for d in ds: new_d = copy.deepcopy(d) for (k, v) in d.items(): if not isinstance(k, str): k = str(k) if level == 0: newkey = k else: newkey = prefix + sep + k if not isinstance(v, dict) or (max_level is not None and level >= max_level): if level != 0: v = new_d.pop(k) new_d[newkey] = v continue v = new_d.pop(k) new_d.update(nested_to_record(v, newkey, sep, level + 1, max_level)) new_ds.append(new_d) if singleton: return new_ds[0] return new_ds def _normalise_json(data: Any, key_string: str, normalized_dict: dict[str, Any], separator: str) -> dict[str, Any]: if isinstance(data, dict): for (key, value) in data.items(): new_key = f'{key_string}{separator}{key}' if not key_string: new_key = new_key.removeprefix(separator) _normalise_json(data=value, key_string=new_key, normalized_dict=normalized_dict, separator=separator) else: normalized_dict[key_string] = data return normalized_dict def _normalise_json_ordered(data: dict[str, Any], separator: str) -> dict[str, Any]: top_dict_ = {k: v for (k, v) in data.items() if not isinstance(v, dict)} nested_dict_ = _normalise_json(data={k: v for (k, v) in data.items() if isinstance(v, dict)}, key_string='', normalized_dict={}, separator=separator) return {**top_dict_, **nested_dict_} def _simple_json_normalize(ds: dict | list[dict], sep: str='.') -> dict | list[dict] | Any: normalised_json_object = {} if isinstance(ds, dict): normalised_json_object = _normalise_json_ordered(data=ds, separator=sep) elif isinstance(ds, list): normalised_json_list = [_simple_json_normalize(row, sep=sep) for row in ds] return normalised_json_list return normalised_json_object def json_normalize(data: dict | list[dict] | Series, record_path: str | list | None=None, meta: str | list[str | list[str]] | None=None, meta_prefix: str | None=None, record_prefix: str | None=None, errors: IgnoreRaise='raise', sep: str='.', max_level: int | None=None) -> DataFrame: def _pull_field(js: dict[str, Any], spec: list | str, extract_record: bool=False) -> Scalar | Iterable: result = js try: if isinstance(spec, list): for field in spec: if result is None: raise KeyError(field) result = result[field] else: result = result[spec] except KeyError as e: if extract_record: raise KeyError(f'Key {e} not found. If specifying a record_path, all elements of data should have the path.') from e if errors == 'ignore': return np.nan else: raise KeyError(f"Key {e} not found. To replace missing values of {e} with np.nan, pass in errors='ignore'") from e return result def _pull_records(js: dict[str, Any], spec: list | str) -> list: result = _pull_field(js, spec, extract_record=True) if not isinstance(result, list): if pd.isnull(result): result = [] else: raise TypeError(f'Path must contain list or null, but got {type(result).__name__} at {spec!r}') return result if isinstance(data, Series): index = data.index else: index = None if isinstance(data, list) and (not data): return DataFrame() elif isinstance(data, dict): data = [data] elif isinstance(data, abc.Iterable) and (not isinstance(data, str)): data = list(data) else: raise NotImplementedError if record_path is None and meta is None and (meta_prefix is None) and (record_prefix is None) and (max_level is None): return DataFrame(_simple_json_normalize(data, sep=sep), index=index) if record_path is None: if any(([isinstance(x, dict) for x in y.values()] for y in data)): data = nested_to_record(data, sep=sep, max_level=max_level) return DataFrame(data, index=index) elif not isinstance(record_path, list): record_path = [record_path] if meta is None: meta = [] elif not isinstance(meta, list): meta = [meta] _meta = [m if isinstance(m, list) else [m] for m in meta] records: list = [] lengths = [] meta_vals: DefaultDict = defaultdict(list) meta_keys = [sep.join(val) for val in _meta] def _recursive_extract(data, path, seen_meta, level: int=0) -> None: if isinstance(data, dict): data = [data] if len(path) > 1: for obj in data: for (val, key) in zip(_meta, meta_keys): if level + 1 == len(val): seen_meta[key] = _pull_field(obj, val[-1]) _recursive_extract(obj[path[0]], path[1:], seen_meta, level=level + 1) else: for obj in data: recs = _pull_records(obj, path[0]) recs = [nested_to_record(r, sep=sep, max_level=max_level) if isinstance(r, dict) else r for r in recs] lengths.append(len(recs)) for (val, key) in zip(_meta, meta_keys): if level + 1 > len(val): meta_val = seen_meta[key] else: meta_val = _pull_field(obj, val[level:]) meta_vals[key].append(meta_val) records.extend(recs) _recursive_extract(data, record_path, {}, level=0) result = DataFrame(records) if record_prefix is not None: result = result.rename(columns=lambda x: f'{record_prefix}{x}') for (k, v) in meta_vals.items(): if meta_prefix is not None: k = meta_prefix + k if k in result: raise ValueError(f'Conflicting metadata name {k}, need distinguishing prefix ') values = np.array(v, dtype=object) if values.ndim > 1: values = np.empty((len(v),), dtype=object) for (i, val) in enumerate(v): values[i] = val result[k] = values.repeat(lengths) if index is not None: result.index = index.repeat(lengths) return result # File: pandas-main/pandas/io/json/_table_schema.py """""" from __future__ import annotations from typing import TYPE_CHECKING, Any, cast import warnings from pandas._libs import lib from pandas._libs.json import ujson_loads from pandas._libs.tslibs import timezones from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import _registry as registry from pandas.core.dtypes.common import is_bool_dtype, is_integer_dtype, is_numeric_dtype, is_string_dtype from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, ExtensionDtype, PeriodDtype from pandas import DataFrame import pandas.core.common as com from pandas.tseries.frequencies import to_offset if TYPE_CHECKING: from pandas._typing import DtypeObj, JSONSerializable from pandas import Series from pandas.core.indexes.multi import MultiIndex TABLE_SCHEMA_VERSION = '1.4.0' def as_json_table_type(x: DtypeObj) -> str: if is_integer_dtype(x): return 'integer' elif is_bool_dtype(x): return 'boolean' elif is_numeric_dtype(x): return 'number' elif lib.is_np_dtype(x, 'M') or isinstance(x, (DatetimeTZDtype, PeriodDtype)): return 'datetime' elif lib.is_np_dtype(x, 'm'): return 'duration' elif isinstance(x, ExtensionDtype): return 'any' elif is_string_dtype(x): return 'string' else: return 'any' def set_default_names(data): if com.all_not_none(*data.index.names): nms = data.index.names if len(nms) == 1 and data.index.name == 'index': warnings.warn("Index name of 'index' is not round-trippable.", stacklevel=find_stack_level()) elif len(nms) > 1 and any((x.startswith('level_') for x in nms)): warnings.warn("Index names beginning with 'level_' are not round-trippable.", stacklevel=find_stack_level()) return data data = data.copy(deep=False) if data.index.nlevels > 1: data.index.names = com.fill_missing_names(data.index.names) else: data.index.name = data.index.name or 'index' return data def convert_pandas_type_to_json_field(arr) -> dict[str, JSONSerializable]: dtype = arr.dtype name: JSONSerializable if arr.name is None: name = 'values' else: name = arr.name field: dict[str, JSONSerializable] = {'name': name, 'type': as_json_table_type(dtype)} if isinstance(dtype, CategoricalDtype): cats = dtype.categories ordered = dtype.ordered field['constraints'] = {'enum': list(cats)} field['ordered'] = ordered elif isinstance(dtype, PeriodDtype): field['freq'] = dtype.freq.freqstr elif isinstance(dtype, DatetimeTZDtype): if timezones.is_utc(dtype.tz): field['tz'] = 'UTC' else: zone = timezones.get_timezone(dtype.tz) if isinstance(zone, str): field['tz'] = zone elif isinstance(dtype, ExtensionDtype): field['extDtype'] = dtype.name return field def convert_json_field_to_pandas_type(field) -> str | CategoricalDtype: typ = field['type'] if typ == 'string': return 'object' elif typ == 'integer': return field.get('extDtype', 'int64') elif typ == 'number': return field.get('extDtype', 'float64') elif typ == 'boolean': return field.get('extDtype', 'bool') elif typ == 'duration': return 'timedelta64' elif typ == 'datetime': if field.get('tz'): return f"datetime64[ns, {field['tz']}]" elif field.get('freq'): offset = to_offset(field['freq']) freq = PeriodDtype(offset)._freqstr return f'period[{freq}]' else: return 'datetime64[ns]' elif typ == 'any': if 'constraints' in field and 'ordered' in field: return CategoricalDtype(categories=field['constraints']['enum'], ordered=field['ordered']) elif 'extDtype' in field: return registry.find(field['extDtype']) else: return 'object' raise ValueError(f'Unsupported or invalid field type: {typ}') def build_table_schema(data: DataFrame | Series, index: bool=True, primary_key: bool | None=None, version: bool=True) -> dict[str, JSONSerializable]: if index is True: data = set_default_names(data) schema: dict[str, Any] = {} fields = [] if index: if data.index.nlevels > 1: data.index = cast('MultiIndex', data.index) for (level, name) in zip(data.index.levels, data.index.names): new_field = convert_pandas_type_to_json_field(level) new_field['name'] = name fields.append(new_field) else: fields.append(convert_pandas_type_to_json_field(data.index)) if data.ndim > 1: for (column, s) in data.items(): fields.append(convert_pandas_type_to_json_field(s)) else: fields.append(convert_pandas_type_to_json_field(data)) schema['fields'] = fields if index and data.index.is_unique and (primary_key is None): if data.index.nlevels == 1: schema['primaryKey'] = [data.index.name] else: schema['primaryKey'] = data.index.names elif primary_key is not None: schema['primaryKey'] = primary_key if version: schema['pandas_version'] = TABLE_SCHEMA_VERSION return schema def parse_table_schema(json, precise_float: bool) -> DataFrame: table = ujson_loads(json, precise_float=precise_float) col_order = [field['name'] for field in table['schema']['fields']] df = DataFrame(table['data'], columns=col_order)[col_order] dtypes = {field['name']: convert_json_field_to_pandas_type(field) for field in table['schema']['fields']} if 'timedelta64' in dtypes.values(): raise NotImplementedError('table="orient" can not yet read ISO-formatted Timedelta data') df = df.astype(dtypes) if 'primaryKey' in table['schema']: df = df.set_index(table['schema']['primaryKey']) if len(df.index.names) == 1: if df.index.name == 'index': df.index.name = None else: df.index.names = [None if x.startswith('level_') else x for x in df.index.names] return df # File: pandas-main/pandas/io/orc.py """""" from __future__ import annotations import io from typing import TYPE_CHECKING, Any, Literal from pandas._config import using_string_dtype from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.util._validators import check_dtype_backend import pandas as pd from pandas.core.indexes.api import default_index from pandas.io._util import arrow_string_types_mapper from pandas.io.common import get_handle, is_fsspec_url if TYPE_CHECKING: import fsspec import pyarrow.fs from pandas._typing import DtypeBackend, FilePath, ReadBuffer, WriteBuffer from pandas.core.frame import DataFrame def read_orc(path: FilePath | ReadBuffer[bytes], columns: list[str] | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, filesystem: pyarrow.fs.FileSystem | fsspec.spec.AbstractFileSystem | None=None, **kwargs: Any) -> DataFrame: orc = import_optional_dependency('pyarrow.orc') check_dtype_backend(dtype_backend) with get_handle(path, 'rb', is_text=False) as handles: source = handles.handle if is_fsspec_url(path) and filesystem is None: pa = import_optional_dependency('pyarrow') pa_fs = import_optional_dependency('pyarrow.fs') try: (filesystem, source) = pa_fs.FileSystem.from_uri(path) except (TypeError, pa.ArrowInvalid): pass pa_table = orc.read_table(source=source, columns=columns, filesystem=filesystem, **kwargs) if dtype_backend is not lib.no_default: if dtype_backend == 'pyarrow': df = pa_table.to_pandas(types_mapper=pd.ArrowDtype) else: from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping() df = pa_table.to_pandas(types_mapper=mapping.get) return df else: if using_string_dtype(): types_mapper = arrow_string_types_mapper() else: types_mapper = None return pa_table.to_pandas(types_mapper=types_mapper) def to_orc(df: DataFrame, path: FilePath | WriteBuffer[bytes] | None=None, *, engine: Literal['pyarrow']='pyarrow', index: bool | None=None, engine_kwargs: dict[str, Any] | None=None) -> bytes | None: if index is None: index = df.index.names[0] is not None if engine_kwargs is None: engine_kwargs = {} if not df.index.equals(default_index(len(df))): raise ValueError('orc does not support serializing a non-default index for the index; you can .reset_index() to make the index into column(s)') if df.index.name is not None: raise ValueError('orc does not serialize index meta-data on a default index') if engine != 'pyarrow': raise ValueError("engine must be 'pyarrow'") pyarrow = import_optional_dependency(engine, min_version='10.0.1') pa = import_optional_dependency('pyarrow') orc = import_optional_dependency('pyarrow.orc') was_none = path is None if was_none: path = io.BytesIO() assert path is not None with get_handle(path, 'wb', is_text=False) as handles: try: orc.write_table(pyarrow.Table.from_pandas(df, preserve_index=index), handles.handle, **engine_kwargs) except (TypeError, pa.ArrowNotImplementedError) as e: raise NotImplementedError('The dtype of one or more columns is not supported yet.') from e if was_none: assert isinstance(path, io.BytesIO) return path.getvalue() return None # File: pandas-main/pandas/io/parquet.py """""" from __future__ import annotations import io import json import os from typing import TYPE_CHECKING, Any, Literal from warnings import catch_warnings, filterwarnings from pandas._config import using_string_dtype from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend import pandas as pd from pandas import DataFrame, get_option from pandas.core.shared_docs import _shared_docs from pandas.io._util import arrow_string_types_mapper from pandas.io.common import IOHandles, get_handle, is_fsspec_url, is_url, stringify_path if TYPE_CHECKING: from pandas._typing import DtypeBackend, FilePath, ReadBuffer, StorageOptions, WriteBuffer def get_engine(engine: str) -> BaseImpl: if engine == 'auto': engine = get_option('io.parquet.engine') if engine == 'auto': engine_classes = [PyArrowImpl, FastParquetImpl] error_msgs = '' for engine_class in engine_classes: try: return engine_class() except ImportError as err: error_msgs += '\n - ' + str(err) raise ImportError(f"Unable to find a usable engine; tried using: 'pyarrow', 'fastparquet'.\nA suitable version of pyarrow or fastparquet is required for parquet support.\nTrying to import the above resulted in these errors:{error_msgs}") if engine == 'pyarrow': return PyArrowImpl() elif engine == 'fastparquet': return FastParquetImpl() raise ValueError("engine must be one of 'pyarrow', 'fastparquet'") def _get_path_or_handle(path: FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], fs: Any, storage_options: StorageOptions | None=None, mode: str='rb', is_dir: bool=False) -> tuple[FilePath | ReadBuffer[bytes] | WriteBuffer[bytes], IOHandles[bytes] | None, Any]: path_or_handle = stringify_path(path) if fs is not None: pa_fs = import_optional_dependency('pyarrow.fs', errors='ignore') fsspec = import_optional_dependency('fsspec', errors='ignore') if pa_fs is not None and isinstance(fs, pa_fs.FileSystem): if storage_options: raise NotImplementedError('storage_options not supported with a pyarrow FileSystem.') elif fsspec is not None and isinstance(fs, fsspec.spec.AbstractFileSystem): pass else: raise ValueError(f'filesystem must be a pyarrow or fsspec FileSystem, not a {type(fs).__name__}') if is_fsspec_url(path_or_handle) and fs is None: if storage_options is None: pa = import_optional_dependency('pyarrow') pa_fs = import_optional_dependency('pyarrow.fs') try: (fs, path_or_handle) = pa_fs.FileSystem.from_uri(path) except (TypeError, pa.ArrowInvalid): pass if fs is None: fsspec = import_optional_dependency('fsspec') (fs, path_or_handle) = fsspec.core.url_to_fs(path_or_handle, **storage_options or {}) elif storage_options and (not is_url(path_or_handle) or mode != 'rb'): raise ValueError('storage_options passed with buffer, or non-supported URL') handles = None if not fs and (not is_dir) and isinstance(path_or_handle, str) and (not os.path.isdir(path_or_handle)): handles = get_handle(path_or_handle, mode, is_text=False, storage_options=storage_options) fs = None path_or_handle = handles.handle return (path_or_handle, handles, fs) class BaseImpl: @staticmethod def validate_dataframe(df: DataFrame) -> None: if not isinstance(df, DataFrame): raise ValueError('to_parquet only supports IO with DataFrames') def write(self, df: DataFrame, path, compression, **kwargs) -> None: raise AbstractMethodError(self) def read(self, path, columns=None, **kwargs) -> DataFrame: raise AbstractMethodError(self) class PyArrowImpl(BaseImpl): def __init__(self) -> None: import_optional_dependency('pyarrow', extra='pyarrow is required for parquet support.') import pyarrow.parquet import pandas.core.arrays.arrow.extension_types self.api = pyarrow def write(self, df: DataFrame, path: FilePath | WriteBuffer[bytes], compression: str | None='snappy', index: bool | None=None, storage_options: StorageOptions | None=None, partition_cols: list[str] | None=None, filesystem=None, **kwargs) -> None: self.validate_dataframe(df) from_pandas_kwargs: dict[str, Any] = {'schema': kwargs.pop('schema', None)} if index is not None: from_pandas_kwargs['preserve_index'] = index table = self.api.Table.from_pandas(df, **from_pandas_kwargs) if df.attrs: df_metadata = {'PANDAS_ATTRS': json.dumps(df.attrs)} existing_metadata = table.schema.metadata merged_metadata = {**existing_metadata, **df_metadata} table = table.replace_schema_metadata(merged_metadata) (path_or_handle, handles, filesystem) = _get_path_or_handle(path, filesystem, storage_options=storage_options, mode='wb', is_dir=partition_cols is not None) if isinstance(path_or_handle, io.BufferedWriter) and hasattr(path_or_handle, 'name') and isinstance(path_or_handle.name, (str, bytes)): if isinstance(path_or_handle.name, bytes): path_or_handle = path_or_handle.name.decode() else: path_or_handle = path_or_handle.name try: if partition_cols is not None: self.api.parquet.write_to_dataset(table, path_or_handle, compression=compression, partition_cols=partition_cols, filesystem=filesystem, **kwargs) else: self.api.parquet.write_table(table, path_or_handle, compression=compression, filesystem=filesystem, **kwargs) finally: if handles is not None: handles.close() def read(self, path, columns=None, filters=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, storage_options: StorageOptions | None=None, filesystem=None, **kwargs) -> DataFrame: kwargs['use_pandas_metadata'] = True to_pandas_kwargs = {} if dtype_backend == 'numpy_nullable': from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping() to_pandas_kwargs['types_mapper'] = mapping.get elif dtype_backend == 'pyarrow': to_pandas_kwargs['types_mapper'] = pd.ArrowDtype elif using_string_dtype(): to_pandas_kwargs['types_mapper'] = arrow_string_types_mapper() (path_or_handle, handles, filesystem) = _get_path_or_handle(path, filesystem, storage_options=storage_options, mode='rb') try: pa_table = self.api.parquet.read_table(path_or_handle, columns=columns, filesystem=filesystem, filters=filters, **kwargs) with catch_warnings(): filterwarnings('ignore', 'make_block is deprecated', DeprecationWarning) result = pa_table.to_pandas(**to_pandas_kwargs) if pa_table.schema.metadata: if b'PANDAS_ATTRS' in pa_table.schema.metadata: df_metadata = pa_table.schema.metadata[b'PANDAS_ATTRS'] result.attrs = json.loads(df_metadata) return result finally: if handles is not None: handles.close() class FastParquetImpl(BaseImpl): def __init__(self) -> None: fastparquet = import_optional_dependency('fastparquet', extra='fastparquet is required for parquet support.') self.api = fastparquet def write(self, df: DataFrame, path, compression: Literal['snappy', 'gzip', 'brotli'] | None='snappy', index=None, partition_cols=None, storage_options: StorageOptions | None=None, filesystem=None, **kwargs) -> None: self.validate_dataframe(df) if 'partition_on' in kwargs and partition_cols is not None: raise ValueError('Cannot use both partition_on and partition_cols. Use partition_cols for partitioning data') if 'partition_on' in kwargs: partition_cols = kwargs.pop('partition_on') if partition_cols is not None: kwargs['file_scheme'] = 'hive' if filesystem is not None: raise NotImplementedError('filesystem is not implemented for the fastparquet engine.') path = stringify_path(path) if is_fsspec_url(path): fsspec = import_optional_dependency('fsspec') kwargs['open_with'] = lambda path, _: fsspec.open(path, 'wb', **storage_options or {}).open() elif storage_options: raise ValueError('storage_options passed with file object or non-fsspec file path') with catch_warnings(record=True): self.api.write(path, df, compression=compression, write_index=index, partition_on=partition_cols, **kwargs) def read(self, path, columns=None, filters=None, storage_options: StorageOptions | None=None, filesystem=None, **kwargs) -> DataFrame: parquet_kwargs: dict[str, Any] = {} dtype_backend = kwargs.pop('dtype_backend', lib.no_default) parquet_kwargs['pandas_nulls'] = False if dtype_backend is not lib.no_default: raise ValueError("The 'dtype_backend' argument is not supported for the fastparquet engine") if filesystem is not None: raise NotImplementedError('filesystem is not implemented for the fastparquet engine.') path = stringify_path(path) handles = None if is_fsspec_url(path): fsspec = import_optional_dependency('fsspec') parquet_kwargs['fs'] = fsspec.open(path, 'rb', **storage_options or {}).fs elif isinstance(path, str) and (not os.path.isdir(path)): handles = get_handle(path, 'rb', is_text=False, storage_options=storage_options) path = handles.handle try: parquet_file = self.api.ParquetFile(path, **parquet_kwargs) with catch_warnings(): filterwarnings('ignore', 'make_block is deprecated', DeprecationWarning) return parquet_file.to_pandas(columns=columns, filters=filters, **kwargs) finally: if handles is not None: handles.close() @doc(storage_options=_shared_docs['storage_options']) def to_parquet(df: DataFrame, path: FilePath | WriteBuffer[bytes] | None=None, engine: str='auto', compression: str | None='snappy', index: bool | None=None, storage_options: StorageOptions | None=None, partition_cols: list[str] | None=None, filesystem: Any=None, **kwargs) -> bytes | None: if isinstance(partition_cols, str): partition_cols = [partition_cols] impl = get_engine(engine) path_or_buf: FilePath | WriteBuffer[bytes] = io.BytesIO() if path is None else path impl.write(df, path_or_buf, compression=compression, index=index, partition_cols=partition_cols, storage_options=storage_options, filesystem=filesystem, **kwargs) if path is None: assert isinstance(path_or_buf, io.BytesIO) return path_or_buf.getvalue() else: return None @doc(storage_options=_shared_docs['storage_options']) def read_parquet(path: FilePath | ReadBuffer[bytes], engine: str='auto', columns: list[str] | None=None, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, filesystem: Any=None, filters: list[tuple] | list[list[tuple]] | None=None, **kwargs) -> DataFrame: impl = get_engine(engine) check_dtype_backend(dtype_backend) return impl.read(path, columns=columns, filters=filters, storage_options=storage_options, dtype_backend=dtype_backend, filesystem=filesystem, **kwargs) # File: pandas-main/pandas/io/parsers/arrow_parser_wrapper.py from __future__ import annotations from typing import TYPE_CHECKING import warnings from pandas._config import using_string_dtype from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.errors import ParserError, ParserWarning from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.inference import is_integer import pandas as pd from pandas import DataFrame from pandas.io._util import _arrow_dtype_mapping, arrow_string_types_mapper from pandas.io.parsers.base_parser import ParserBase if TYPE_CHECKING: from pandas._typing import ReadBuffer class ArrowParserWrapper(ParserBase): def __init__(self, src: ReadBuffer[bytes], **kwds) -> None: super().__init__(kwds) self.kwds = kwds self.src = src self._parse_kwds() def _parse_kwds(self) -> None: encoding: str | None = self.kwds.get('encoding') self.encoding = 'utf-8' if encoding is None else encoding na_values = self.kwds['na_values'] if isinstance(na_values, dict): raise ValueError("The pyarrow engine doesn't support passing a dict for na_values") self.na_values = list(self.kwds['na_values']) def _get_pyarrow_options(self) -> None: mapping = {'usecols': 'include_columns', 'na_values': 'null_values', 'escapechar': 'escape_char', 'skip_blank_lines': 'ignore_empty_lines', 'decimal': 'decimal_point', 'quotechar': 'quote_char'} for (pandas_name, pyarrow_name) in mapping.items(): if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None: self.kwds[pyarrow_name] = self.kwds.pop(pandas_name) date_format = self.date_format if isinstance(date_format, str): date_format = [date_format] else: date_format = None self.kwds['timestamp_parsers'] = date_format self.parse_options = {option_name: option_value for (option_name, option_value) in self.kwds.items() if option_value is not None and option_name in ('delimiter', 'quote_char', 'escape_char', 'ignore_empty_lines')} on_bad_lines = self.kwds.get('on_bad_lines') if on_bad_lines is not None: if callable(on_bad_lines): self.parse_options['invalid_row_handler'] = on_bad_lines elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR: self.parse_options['invalid_row_handler'] = None elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN: def handle_warning(invalid_row) -> str: warnings.warn(f'Expected {invalid_row.expected_columns} columns, but found {invalid_row.actual_columns}: {invalid_row.text}', ParserWarning, stacklevel=find_stack_level()) return 'skip' self.parse_options['invalid_row_handler'] = handle_warning elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP: self.parse_options['invalid_row_handler'] = lambda _: 'skip' self.convert_options = {option_name: option_value for (option_name, option_value) in self.kwds.items() if option_value is not None and option_name in ('include_columns', 'null_values', 'true_values', 'false_values', 'decimal_point', 'timestamp_parsers')} self.convert_options['strings_can_be_null'] = '' in self.kwds['null_values'] if self.header is None and 'include_columns' in self.convert_options: self.convert_options['include_columns'] = [f'f{n}' for n in self.convert_options['include_columns']] self.read_options = {'autogenerate_column_names': self.header is None, 'skip_rows': self.header if self.header is not None else self.kwds['skiprows'], 'encoding': self.encoding} def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame: num_cols = len(frame.columns) multi_index_named = True if self.header is None: if self.names is None: if self.header is None: self.names = range(num_cols) if len(self.names) != num_cols: self.names = list(range(num_cols - len(self.names))) + self.names multi_index_named = False frame.columns = self.names frame = self._do_date_conversions(frame.columns, frame) if self.index_col is not None: index_to_set = self.index_col.copy() for (i, item) in enumerate(self.index_col): if is_integer(item): index_to_set[i] = frame.columns[item] elif item not in frame.columns: raise ValueError(f'Index {item} invalid') if self.dtype is not None: (key, new_dtype) = (item, self.dtype.get(item)) if self.dtype.get(item) is not None else (frame.columns[item], self.dtype.get(frame.columns[item])) if new_dtype is not None: frame[key] = frame[key].astype(new_dtype) del self.dtype[key] frame.set_index(index_to_set, drop=True, inplace=True) if self.header is None and (not multi_index_named): frame.index.names = [None] * len(frame.index.names) if self.dtype is not None: if isinstance(self.dtype, dict): self.dtype = {k: pandas_dtype(v) for (k, v) in self.dtype.items() if k in frame.columns} else: self.dtype = pandas_dtype(self.dtype) try: frame = frame.astype(self.dtype) except TypeError as err: raise ValueError(str(err)) from err return frame def _validate_usecols(self, usecols) -> None: if lib.is_list_like(usecols) and (not all((isinstance(x, str) for x in usecols))): raise ValueError("The pyarrow engine does not allow 'usecols' to be integer column positions. Pass a list of string column names instead.") elif callable(usecols): raise ValueError("The pyarrow engine does not allow 'usecols' to be a callable.") def read(self) -> DataFrame: pa = import_optional_dependency('pyarrow') pyarrow_csv = import_optional_dependency('pyarrow.csv') self._get_pyarrow_options() try: convert_options = pyarrow_csv.ConvertOptions(**self.convert_options) except TypeError as err: include = self.convert_options.get('include_columns', None) if include is not None: self._validate_usecols(include) nulls = self.convert_options.get('null_values', set()) if not lib.is_list_like(nulls) or not all((isinstance(x, str) for x in nulls)): raise TypeError("The 'pyarrow' engine requires all na_values to be strings") from err raise try: table = pyarrow_csv.read_csv(self.src, read_options=pyarrow_csv.ReadOptions(**self.read_options), parse_options=pyarrow_csv.ParseOptions(**self.parse_options), convert_options=convert_options) except pa.ArrowInvalid as e: raise ParserError(e) from e dtype_backend = self.kwds['dtype_backend'] if dtype_backend is lib.no_default: new_schema = table.schema new_type = pa.float64() for (i, arrow_type) in enumerate(table.schema.types): if pa.types.is_null(arrow_type): new_schema = new_schema.set(i, new_schema.field(i).with_type(new_type)) table = table.cast(new_schema) with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'make_block is deprecated', DeprecationWarning) if dtype_backend == 'pyarrow': frame = table.to_pandas(types_mapper=pd.ArrowDtype) elif dtype_backend == 'numpy_nullable': dtype_mapping = _arrow_dtype_mapping() dtype_mapping[pa.null()] = pd.Int64Dtype() frame = table.to_pandas(types_mapper=dtype_mapping.get) elif using_string_dtype(): frame = table.to_pandas(types_mapper=arrow_string_types_mapper()) else: frame = table.to_pandas() return self._finalize_pandas_output(frame) # File: pandas-main/pandas/io/parsers/base_parser.py from __future__ import annotations from collections import defaultdict from copy import copy import csv from enum import Enum import itertools from typing import TYPE_CHECKING, Any, cast, final, overload import warnings import numpy as np from pandas._libs import lib, parsers import pandas._libs.ops as libops from pandas._libs.parsers import STR_NA_VALUES from pandas.compat._optional import import_optional_dependency from pandas.errors import ParserError, ParserWarning from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_bool_dtype, is_dict_like, is_float_dtype, is_integer, is_integer_dtype, is_list_like, is_object_dtype, is_string_dtype from pandas.core.dtypes.missing import isna from pandas import DataFrame, DatetimeIndex, StringDtype from pandas.core import algorithms from pandas.core.arrays import ArrowExtensionArray, BaseMaskedArray, BooleanArray, FloatingArray, IntegerArray from pandas.core.indexes.api import Index, MultiIndex, default_index, ensure_index_from_sequences from pandas.core.series import Series from pandas.core.tools import datetimes as tools from pandas.io.common import is_potential_multi_index if TYPE_CHECKING: from collections.abc import Callable, Iterable, Mapping, Sequence from pandas._typing import ArrayLike, DtypeArg, Hashable, HashableT, Scalar, SequenceT class ParserBase: class BadLineHandleMethod(Enum): ERROR = 0 WARN = 1 SKIP = 2 _implicit_index: bool _first_chunk: bool keep_default_na: bool dayfirst: bool cache_dates: bool usecols_dtype: str | None def __init__(self, kwds) -> None: self._implicit_index = False self.names = kwds.get('names') self.orig_names: Sequence[Hashable] | None = None self.index_col = kwds.get('index_col', None) self.unnamed_cols: set = set() self.index_names: Sequence[Hashable] | None = None self.col_names: Sequence[Hashable] | None = None parse_dates = kwds.pop('parse_dates', False) if parse_dates is None or lib.is_bool(parse_dates): parse_dates = bool(parse_dates) elif not isinstance(parse_dates, list): raise TypeError("Only booleans and lists are accepted for the 'parse_dates' parameter") self.parse_dates: bool | list = parse_dates self.date_parser = kwds.pop('date_parser', lib.no_default) self.date_format = kwds.pop('date_format', None) self.dayfirst = kwds.pop('dayfirst', False) self.na_values = kwds.get('na_values') self.na_fvalues = kwds.get('na_fvalues') self.na_filter = kwds.get('na_filter', False) self.keep_default_na = kwds.get('keep_default_na', True) self.dtype = copy(kwds.get('dtype', None)) self.converters = kwds.get('converters') self.dtype_backend = kwds.get('dtype_backend') self.true_values = kwds.get('true_values') self.false_values = kwds.get('false_values') self.cache_dates = kwds.pop('cache_dates', True) self.header = kwds.get('header') if is_list_like(self.header, allow_sets=False): if kwds.get('usecols'): raise ValueError('cannot specify usecols when specifying a multi-index header') if kwds.get('names'): raise ValueError('cannot specify names when specifying a multi-index header') if self.index_col is not None: if is_integer(self.index_col): self.index_col = [self.index_col] elif not (is_list_like(self.index_col, allow_sets=False) and all(map(is_integer, self.index_col))): raise ValueError('index_col must only contain integers of column positions when specifying a multi-index header') else: self.index_col = list(self.index_col) self._first_chunk = True (self.usecols, self.usecols_dtype) = _validate_usecols_arg(kwds['usecols']) self.on_bad_lines = kwds.get('on_bad_lines', self.BadLineHandleMethod.ERROR) def close(self) -> None: pass @final def _should_parse_dates(self, i: int) -> bool: if isinstance(self.parse_dates, bool): return self.parse_dates else: if self.index_names is not None: name = self.index_names[i] else: name = None j = i if self.index_col is None else self.index_col[i] return j in self.parse_dates or (name is not None and name in self.parse_dates) @final def _extract_multi_indexer_columns(self, header, index_names: Sequence[Hashable] | None, passed_names: bool=False) -> tuple[Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool]: if len(header) < 2: return (header[0], index_names, None, passed_names) ic = self.index_col if ic is None: ic = [] if not isinstance(ic, (list, tuple, np.ndarray)): ic = [ic] sic = set(ic) index_names = header.pop(-1) (index_names, _, _) = self._clean_index_names(index_names, self.index_col) field_count = len(header[0]) if not all((len(header_iter) == field_count for header_iter in header[1:])): raise ParserError('Header rows must have an equal number of columns.') def extract(r): return tuple((r[i] for i in range(field_count) if i not in sic)) columns = list(zip(*(extract(r) for r in header))) names = columns.copy() for single_ic in sorted(ic): names.insert(single_ic, single_ic) if len(ic): col_names = [r[ic[0]] if r[ic[0]] is not None and r[ic[0]] not in self.unnamed_cols else None for r in header] else: col_names = [None] * len(header) passed_names = True return (names, index_names, col_names, passed_names) @final def _maybe_make_multi_index_columns(self, columns: SequenceT, col_names: Sequence[Hashable] | None=None) -> SequenceT | MultiIndex: if is_potential_multi_index(columns): columns_mi = cast('Sequence[tuple[Hashable, ...]]', columns) return MultiIndex.from_tuples(columns_mi, names=col_names) return columns @final def _make_index(self, alldata, columns, indexnamerow: list[Scalar] | None=None) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]: index: Index | None if isinstance(self.index_col, list) and len(self.index_col): to_remove = [] indexes = [] for idx in self.index_col: if isinstance(idx, str): raise ValueError(f'Index {idx} invalid') to_remove.append(idx) indexes.append(alldata[idx]) for i in sorted(to_remove, reverse=True): alldata.pop(i) if not self._implicit_index: columns.pop(i) index = self._agg_index(indexes) if indexnamerow: coffset = len(indexnamerow) - len(columns) index = index.set_names(indexnamerow[:coffset]) else: index = None columns = self._maybe_make_multi_index_columns(columns, self.col_names) return (index, columns) @final def _clean_mapping(self, mapping): if not isinstance(mapping, dict): return mapping clean = {} assert self.orig_names is not None for (col, v) in mapping.items(): if isinstance(col, int) and col not in self.orig_names: col = self.orig_names[col] clean[col] = v if isinstance(mapping, defaultdict): remaining_cols = set(self.orig_names) - set(clean.keys()) clean.update({col: mapping[col] for col in remaining_cols}) return clean @final def _agg_index(self, index) -> Index: arrays = [] converters = self._clean_mapping(self.converters) clean_dtypes = self._clean_mapping(self.dtype) if self.index_names is not None: names: Iterable = self.index_names else: names = itertools.cycle([None]) for (i, (arr, name)) in enumerate(zip(index, names)): if self._should_parse_dates(i): arr = date_converter(arr, col=self.index_names[i] if self.index_names is not None else None, dayfirst=self.dayfirst, cache_dates=self.cache_dates, date_format=self.date_format) if self.na_filter: col_na_values = self.na_values col_na_fvalues = self.na_fvalues else: col_na_values = set() col_na_fvalues = set() if isinstance(self.na_values, dict): assert self.index_names is not None col_name = self.index_names[i] if col_name is not None: (col_na_values, col_na_fvalues) = get_na_values(col_name, self.na_values, self.na_fvalues, self.keep_default_na) else: (col_na_values, col_na_fvalues) = (set(), set()) cast_type = None index_converter = False if self.index_names is not None: if isinstance(clean_dtypes, dict): cast_type = clean_dtypes.get(self.index_names[i], None) if isinstance(converters, dict): index_converter = converters.get(self.index_names[i]) is not None try_num_bool = not (cast_type and is_string_dtype(cast_type) or index_converter) (arr, _) = self._infer_types(arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool) if cast_type is not None: idx = Index(arr, name=name, dtype=cast_type) else: idx = ensure_index_from_sequences([arr], [name]) arrays.append(idx) if len(arrays) == 1: return arrays[0] else: return MultiIndex.from_arrays(arrays) @final def _set_noconvert_dtype_columns(self, col_indices: list[int], names: Sequence[Hashable]) -> set[int]: usecols: list[int] | list[str] | None noconvert_columns = set() if self.usecols_dtype == 'integer': usecols = sorted(self.usecols) elif callable(self.usecols) or self.usecols_dtype not in ('empty', None): usecols = col_indices else: usecols = None def _set(x) -> int: if usecols is not None and is_integer(x): x = usecols[x] if not is_integer(x): x = col_indices[names.index(x)] return x if isinstance(self.parse_dates, list): validate_parse_dates_presence(self.parse_dates, names) for val in self.parse_dates: noconvert_columns.add(_set(val)) elif self.parse_dates: if isinstance(self.index_col, list): for k in self.index_col: noconvert_columns.add(_set(k)) elif self.index_col is not None: noconvert_columns.add(_set(self.index_col)) return noconvert_columns @final def _infer_types(self, values, na_values, no_dtype_specified, try_num_bool: bool=True) -> tuple[ArrayLike, int]: na_count = 0 if issubclass(values.dtype.type, (np.number, np.bool_)): na_values = np.array([val for val in na_values if not isinstance(val, str)]) mask = algorithms.isin(values, na_values) na_count = mask.astype('uint8', copy=False).sum() if na_count > 0: if is_integer_dtype(values): values = values.astype(np.float64) np.putmask(values, mask, np.nan) return (values, na_count) dtype_backend = self.dtype_backend non_default_dtype_backend = no_dtype_specified and dtype_backend is not lib.no_default result: ArrayLike if try_num_bool and is_object_dtype(values.dtype): try: (result, result_mask) = lib.maybe_convert_numeric(values, na_values, False, convert_to_masked_nullable=non_default_dtype_backend) except (ValueError, TypeError): na_count = parsers.sanitize_objects(values, na_values) result = values else: if non_default_dtype_backend: if result_mask is None: result_mask = np.zeros(result.shape, dtype=np.bool_) if result_mask.all(): result = IntegerArray(np.ones(result_mask.shape, dtype=np.int64), result_mask) elif is_integer_dtype(result): result = IntegerArray(result, result_mask) elif is_bool_dtype(result): result = BooleanArray(result, result_mask) elif is_float_dtype(result): result = FloatingArray(result, result_mask) na_count = result_mask.sum() else: na_count = isna(result).sum() else: result = values if values.dtype == np.object_: na_count = parsers.sanitize_objects(values, na_values) if result.dtype == np.object_ and try_num_bool: (result, bool_mask) = libops.maybe_convert_bool(np.asarray(values), true_values=self.true_values, false_values=self.false_values, convert_to_masked_nullable=non_default_dtype_backend) if result.dtype == np.bool_ and non_default_dtype_backend: if bool_mask is None: bool_mask = np.zeros(result.shape, dtype=np.bool_) result = BooleanArray(result, bool_mask) elif result.dtype == np.object_ and non_default_dtype_backend: if not lib.is_datetime_array(result, skipna=True): dtype = StringDtype() cls = dtype.construct_array_type() result = cls._from_sequence(values, dtype=dtype) if dtype_backend == 'pyarrow': pa = import_optional_dependency('pyarrow') if isinstance(result, np.ndarray): result = ArrowExtensionArray(pa.array(result, from_pandas=True)) elif isinstance(result, BaseMaskedArray): if result._mask.all(): result = ArrowExtensionArray(pa.array([None] * len(result))) else: result = ArrowExtensionArray(pa.array(result._data, mask=result._mask)) else: result = ArrowExtensionArray(pa.array(result.to_numpy(), from_pandas=True)) return (result, na_count) @overload def _do_date_conversions(self, names: Index, data: DataFrame) -> DataFrame: ... @overload def _do_date_conversions(self, names: Sequence[Hashable], data: Mapping[Hashable, ArrayLike]) -> Mapping[Hashable, ArrayLike]: ... @final def _do_date_conversions(self, names: Sequence[Hashable] | Index, data: Mapping[Hashable, ArrayLike] | DataFrame) -> Mapping[Hashable, ArrayLike] | DataFrame: if not isinstance(self.parse_dates, list): return data for colspec in self.parse_dates: if isinstance(colspec, int) and colspec not in data: colspec = names[colspec] if isinstance(self.index_col, list) and colspec in self.index_col or (isinstance(self.index_names, list) and colspec in self.index_names): continue result = date_converter(data[colspec], col=colspec, dayfirst=self.dayfirst, cache_dates=self.cache_dates, date_format=self.date_format) data[colspec] = result return data @final def _check_data_length(self, columns: Sequence[Hashable], data: Sequence[ArrayLike]) -> None: if not self.index_col and len(columns) != len(data) and columns: empty_str = is_object_dtype(data[-1]) and data[-1] == '' empty_str_or_na = empty_str | isna(data[-1]) if len(columns) == len(data) - 1 and np.all(empty_str_or_na): return warnings.warn('Length of header or names does not match length of data. This leads to a loss of data with index_col=False.', ParserWarning, stacklevel=find_stack_level()) @final def _validate_usecols_names(self, usecols: SequenceT, names: Sequence) -> SequenceT: missing = [c for c in usecols if c not in names] if len(missing) > 0: raise ValueError(f'Usecols do not match columns, columns expected but not found: {missing}') return usecols @final def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]: if not is_index_col(index_col): return (None, columns, index_col) columns = list(columns) if not columns: return ([None] * len(index_col), columns, index_col) cp_cols = list(columns) index_names: list[str | int | None] = [] index_col = list(index_col) for (i, c) in enumerate(index_col): if isinstance(c, str): index_names.append(c) for (j, name) in enumerate(cp_cols): if name == c: index_col[i] = j columns.remove(name) break else: name = cp_cols[c] columns.remove(name) index_names.append(name) for (i, name) in enumerate(index_names): if isinstance(name, str) and name in self.unnamed_cols: index_names[i] = None return (index_names, columns, index_col) @final def _get_empty_meta(self, columns: Sequence[HashableT], dtype: DtypeArg | None=None) -> tuple[Index, list[HashableT], dict[HashableT, Series]]: columns = list(columns) index_col = self.index_col index_names = self.index_names dtype_dict: defaultdict[Hashable, Any] if not is_dict_like(dtype): dtype_dict = defaultdict(lambda : dtype) else: dtype = cast(dict, dtype) dtype_dict = defaultdict(lambda : None, {columns[k] if is_integer(k) else k: v for (k, v) in dtype.items()}) index: Index if (index_col is None or index_col is False) or index_names is None: index = default_index(0) else: data = [Index([], name=name, dtype=dtype_dict[name]) for name in index_names] if len(data) == 1: index = data[0] else: index = MultiIndex.from_arrays(data) index_col.sort() for (i, n) in enumerate(index_col): columns.pop(n - i) col_dict = {col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns} return (index, columns, col_dict) def date_converter(date_col, col: Hashable, dayfirst: bool=False, cache_dates: bool=True, date_format: dict[Hashable, str] | str | None=None): if date_col.dtype.kind in 'Mm': return date_col date_fmt = date_format.get(col) if isinstance(date_format, dict) else date_format str_objs = lib.ensure_string_array(np.asarray(date_col)) try: result = tools.to_datetime(str_objs, format=date_fmt, utc=False, dayfirst=dayfirst, cache=cache_dates) except (ValueError, TypeError): return str_objs if isinstance(result, DatetimeIndex): arr = result.to_numpy() arr.flags.writeable = True return arr return result._values parser_defaults = {'delimiter': None, 'escapechar': None, 'quotechar': '"', 'quoting': csv.QUOTE_MINIMAL, 'doublequote': True, 'skipinitialspace': False, 'lineterminator': None, 'header': 'infer', 'index_col': None, 'names': None, 'skiprows': None, 'skipfooter': 0, 'nrows': None, 'na_values': None, 'keep_default_na': True, 'true_values': None, 'false_values': None, 'converters': None, 'dtype': None, 'cache_dates': True, 'thousands': None, 'comment': None, 'decimal': '.', 'parse_dates': False, 'dayfirst': False, 'date_format': None, 'usecols': None, 'chunksize': None, 'encoding': None, 'compression': None, 'skip_blank_lines': True, 'encoding_errors': 'strict', 'on_bad_lines': ParserBase.BadLineHandleMethod.ERROR, 'dtype_backend': lib.no_default} def get_na_values(col, na_values, na_fvalues, keep_default_na: bool): if isinstance(na_values, dict): if col in na_values: return (na_values[col], na_fvalues[col]) else: if keep_default_na: return (STR_NA_VALUES, set()) return (set(), set()) else: return (na_values, na_fvalues) def is_index_col(col) -> bool: return col is not None and col is not False def validate_parse_dates_presence(parse_dates: bool | list, columns: Sequence[Hashable]) -> set: if not isinstance(parse_dates, list): return set() missing = set() unique_cols = set() for col in parse_dates: if isinstance(col, str): if col not in columns: missing.add(col) else: unique_cols.add(col) elif col in columns: unique_cols.add(col) else: unique_cols.add(columns[col]) if missing: missing_cols = ', '.join(sorted(missing)) raise ValueError(f"Missing column provided to 'parse_dates': '{missing_cols}'") return unique_cols def _validate_usecols_arg(usecols): msg = "'usecols' must either be list-like of all strings, all unicode, all integers or a callable." if usecols is not None: if callable(usecols): return (usecols, None) if not is_list_like(usecols): raise ValueError(msg) usecols_dtype = lib.infer_dtype(usecols, skipna=False) if usecols_dtype not in ('empty', 'integer', 'string'): raise ValueError(msg) usecols = set(usecols) return (usecols, usecols_dtype) return (usecols, None) @overload def evaluate_callable_usecols(usecols: Callable[[Hashable], object], names: Iterable[Hashable]) -> set[int]: ... @overload def evaluate_callable_usecols(usecols: SequenceT, names: Iterable[Hashable]) -> SequenceT: ... def evaluate_callable_usecols(usecols: Callable[[Hashable], object] | SequenceT, names: Iterable[Hashable]) -> SequenceT | set[int]: if callable(usecols): return {i for (i, name) in enumerate(names) if usecols(name)} return usecols # File: pandas-main/pandas/io/parsers/c_parser_wrapper.py from __future__ import annotations from collections import defaultdict from typing import TYPE_CHECKING import warnings import numpy as np from pandas._libs import lib, parsers from pandas.compat._optional import import_optional_dependency from pandas.errors import DtypeWarning from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import pandas_dtype from pandas.core.dtypes.concat import concat_compat, union_categoricals from pandas.core.dtypes.dtypes import CategoricalDtype from pandas.core.indexes.api import ensure_index_from_sequences from pandas.io.common import dedup_names, is_potential_multi_index from pandas.io.parsers.base_parser import ParserBase, ParserError, date_converter, evaluate_callable_usecols, is_index_col, validate_parse_dates_presence if TYPE_CHECKING: from collections.abc import Hashable, Mapping, Sequence from pandas._typing import AnyArrayLike, ArrayLike, DtypeArg, DtypeObj, ReadCsvBuffer, SequenceT from pandas import Index, MultiIndex class CParserWrapper(ParserBase): low_memory: bool _reader: parsers.TextReader def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None: super().__init__(kwds) self.kwds = kwds kwds = kwds.copy() self.low_memory = kwds.pop('low_memory', False) kwds['allow_leading_cols'] = self.index_col is not False kwds['usecols'] = self.usecols kwds['on_bad_lines'] = self.on_bad_lines.value for key in ('storage_options', 'encoding', 'memory_map', 'compression'): kwds.pop(key, None) kwds['dtype'] = ensure_dtype_objs(kwds.get('dtype', None)) if 'dtype_backend' not in kwds or kwds['dtype_backend'] is lib.no_default: kwds['dtype_backend'] = 'numpy' if kwds['dtype_backend'] == 'pyarrow': import_optional_dependency('pyarrow') self._reader = parsers.TextReader(src, **kwds) self.unnamed_cols = self._reader.unnamed_cols passed_names = self.names is None if self._reader.header is None: self.names = None else: (self.names, self.index_names, self.col_names, passed_names) = self._extract_multi_indexer_columns(self._reader.header, self.index_names, passed_names) if self.names is None: self.names = list(range(self._reader.table_width)) self.orig_names = self.names[:] if self.usecols: usecols = evaluate_callable_usecols(self.usecols, self.orig_names) assert self.orig_names is not None if self.usecols_dtype == 'string' and (not set(usecols).issubset(self.orig_names)): self._validate_usecols_names(usecols, self.orig_names) if len(self.names) > len(usecols): self.names = [n for (i, n) in enumerate(self.names) if i in usecols or n in usecols] if len(self.names) < len(usecols): self._validate_usecols_names(usecols, self.names) validate_parse_dates_presence(self.parse_dates, self.names) self._set_noconvert_columns() self.orig_names = self.names if self._reader.leading_cols == 0 and is_index_col(self.index_col): (index_names, self.names, self.index_col) = self._clean_index_names(self.names, self.index_col) if self.index_names is None: self.index_names = index_names if self._reader.header is None and (not passed_names): assert self.index_names is not None self.index_names = [None] * len(self.index_names) self._implicit_index = self._reader.leading_cols > 0 def close(self) -> None: try: self._reader.close() except ValueError: pass def _set_noconvert_columns(self) -> None: assert self.orig_names is not None names_dict = {x: i for (i, x) in enumerate(self.orig_names)} col_indices = [names_dict[x] for x in self.names] noconvert_columns = self._set_noconvert_dtype_columns(col_indices, self.names) for col in noconvert_columns: self._reader.set_noconvert(col) def read(self, nrows: int | None=None) -> tuple[Index | MultiIndex | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, AnyArrayLike]]: index: Index | MultiIndex | None column_names: Sequence[Hashable] | MultiIndex try: if self.low_memory: chunks = self._reader.read_low_memory(nrows) data = _concatenate_chunks(chunks, self.names) else: data = self._reader.read(nrows) except StopIteration: if self._first_chunk: self._first_chunk = False names = dedup_names(self.orig_names, is_potential_multi_index(self.orig_names, self.index_col)) (index, columns, col_dict) = self._get_empty_meta(names, dtype=self.dtype) columns = self._maybe_make_multi_index_columns(columns, self.col_names) columns = _filter_usecols(self.usecols, columns) col_dict = {k: v for (k, v) in col_dict.items() if k in columns} return (index, columns, col_dict) else: self.close() raise self._first_chunk = False names = self.names if self._reader.leading_cols: arrays = [] if self.index_col and self._reader.leading_cols != len(self.index_col): raise ParserError(f'Could not construct index. Requested to use {len(self.index_col)} number of columns, but {self._reader.leading_cols} left to parse.') for i in range(self._reader.leading_cols): if self.index_col is None: values = data.pop(i) else: values = data.pop(self.index_col[i]) if self._should_parse_dates(i): values = date_converter(values, col=self.index_names[i] if self.index_names is not None else None, dayfirst=self.dayfirst, cache_dates=self.cache_dates, date_format=self.date_format) arrays.append(values) index = ensure_index_from_sequences(arrays) names = _filter_usecols(self.usecols, names) names = dedup_names(names, is_potential_multi_index(names, self.index_col)) data_tups = sorted(data.items()) data = {k: v for (k, (i, v)) in zip(names, data_tups)} date_data = self._do_date_conversions(names, data) column_names = self._maybe_make_multi_index_columns(names, self.col_names) else: data_tups = sorted(data.items()) assert self.orig_names is not None names = list(self.orig_names) names = dedup_names(names, is_potential_multi_index(names, self.index_col)) names = _filter_usecols(self.usecols, names) alldata = [x[1] for x in data_tups] if self.usecols is None: self._check_data_length(names, alldata) data = {k: v for (k, (i, v)) in zip(names, data_tups)} date_data = self._do_date_conversions(names, data) (index, column_names) = self._make_index(alldata, names) return (index, column_names, date_data) def _filter_usecols(usecols, names: SequenceT) -> SequenceT | list[Hashable]: usecols = evaluate_callable_usecols(usecols, names) if usecols is not None and len(names) != len(usecols): return [name for (i, name) in enumerate(names) if i in usecols or name in usecols] return names def _concatenate_chunks(chunks: list[dict[int, ArrayLike]], column_names: list[str]) -> dict: names = list(chunks[0].keys()) warning_columns = [] result: dict = {} for name in names: arrs = [chunk.pop(name) for chunk in chunks] dtypes = {a.dtype for a in arrs} non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)} dtype = dtypes.pop() if isinstance(dtype, CategoricalDtype): result[name] = union_categoricals(arrs, sort_categories=False) else: result[name] = concat_compat(arrs) if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object): warning_columns.append(column_names[name]) if warning_columns: warning_names = ', '.join([f'{index}: {name}' for (index, name) in enumerate(warning_columns)]) warning_message = ' '.join([f'Columns ({warning_names}) have mixed types. Specify dtype option on import or set low_memory=False.']) warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) return result def ensure_dtype_objs(dtype: DtypeArg | dict[Hashable, DtypeArg] | None) -> DtypeObj | dict[Hashable, DtypeObj] | None: if isinstance(dtype, defaultdict): default_dtype = pandas_dtype(dtype.default_factory()) dtype_converted: defaultdict = defaultdict(lambda : default_dtype) for key in dtype.keys(): dtype_converted[key] = pandas_dtype(dtype[key]) return dtype_converted elif isinstance(dtype, dict): return {k: pandas_dtype(dtype[k]) for k in dtype} elif dtype is not None: return pandas_dtype(dtype) return dtype # File: pandas-main/pandas/io/parsers/python_parser.py from __future__ import annotations from collections import abc, defaultdict import csv from io import StringIO import re from typing import IO, TYPE_CHECKING, Any, DefaultDict, Literal, cast, final import warnings import numpy as np from pandas._libs import lib from pandas.errors import EmptyDataError, ParserError, ParserWarning from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.astype import astype_array from pandas.core.dtypes.common import is_bool_dtype, is_extension_array_dtype, is_integer, is_numeric_dtype, is_object_dtype, is_string_dtype, pandas_dtype from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype from pandas.core.dtypes.inference import is_dict_like from pandas.core import algorithms from pandas.core.arrays import Categorical, ExtensionArray from pandas.core.arrays.boolean import BooleanDtype from pandas.core.indexes.api import Index from pandas.io.common import dedup_names, is_potential_multi_index from pandas.io.parsers.base_parser import ParserBase, evaluate_callable_usecols, get_na_values, parser_defaults, validate_parse_dates_presence if TYPE_CHECKING: from collections.abc import Hashable, Iterator, Mapping, Sequence from pandas._typing import ArrayLike, DtypeObj, ReadCsvBuffer, Scalar, T from pandas import MultiIndex, Series _BOM = '\ufeff' class PythonParser(ParserBase): _no_thousands_columns: set[int] def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None: super().__init__(kwds) self.data: Iterator[list[str]] | list[list[Scalar]] = [] self.buf: list = [] self.pos = 0 self.line_pos = 0 self.skiprows = kwds['skiprows'] if callable(self.skiprows): self.skipfunc = self.skiprows else: self.skipfunc = lambda x: x in self.skiprows self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter']) self.delimiter = kwds['delimiter'] self.quotechar = kwds['quotechar'] if isinstance(self.quotechar, str): self.quotechar = str(self.quotechar) self.escapechar = kwds['escapechar'] self.doublequote = kwds['doublequote'] self.skipinitialspace = kwds['skipinitialspace'] self.lineterminator = kwds['lineterminator'] self.quoting = kwds['quoting'] self.skip_blank_lines = kwds['skip_blank_lines'] self.has_index_names = kwds.get('has_index_names', False) self.thousands = kwds['thousands'] self.decimal = kwds['decimal'] self.comment = kwds['comment'] if isinstance(f, list): self.data = f else: assert hasattr(f, 'readline') self.data = self._make_reader(f) self._col_indices: list[int] | None = None columns: list[list[Scalar | None]] (columns, self.num_original_columns, self.unnamed_cols) = self._infer_columns() (self.columns, self.index_names, self.col_names, _) = self._extract_multi_indexer_columns(columns, self.index_names) self.orig_names: list[Hashable] = list(self.columns) (index_names, self.orig_names, self.columns) = self._get_index_name() if self.index_names is None: self.index_names = index_names if self._col_indices is None: self._col_indices = list(range(len(self.columns))) self._no_thousands_columns = self._set_no_thousand_columns() if len(self.decimal) != 1: raise ValueError('Only length-1 decimal markers supported') @cache_readonly def num(self) -> re.Pattern: decimal = re.escape(self.decimal) if self.thousands is None: regex = f'^[\\-\\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\\-?[0-9]+)?$' else: thousands = re.escape(self.thousands) regex = f'^[\\-\\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?([0-9]?(E|e)\\-?[0-9]+)?$' return re.compile(regex) def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> Iterator[list[str]]: sep = self.delimiter if sep is None or len(sep) == 1: if self.lineterminator: raise ValueError('Custom line terminators not supported in python parser (yet)') class MyDialect(csv.Dialect): delimiter = self.delimiter quotechar = self.quotechar escapechar = self.escapechar doublequote = self.doublequote skipinitialspace = self.skipinitialspace quoting = self.quoting lineterminator = '\n' dia = MyDialect if sep is not None: dia.delimiter = sep else: line = f.readline() lines = self._check_comments([[line]])[0] while self.skipfunc(self.pos) or not lines: self.pos += 1 line = f.readline() lines = self._check_comments([[line]])[0] lines_str = cast(list[str], lines) line = lines_str[0] self.pos += 1 self.line_pos += 1 sniffed = csv.Sniffer().sniff(line) dia.delimiter = sniffed.delimiter line_rdr = csv.reader(StringIO(line), dialect=dia) self.buf.extend(list(line_rdr)) reader = csv.reader(f, dialect=dia, strict=True) else: def _read(): line = f.readline() pat = re.compile(sep) yield pat.split(line.strip()) for line in f: yield pat.split(line.strip()) reader = _read() return reader def read(self, rows: int | None=None) -> tuple[Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike | Series]]: try: content = self._get_lines(rows) except StopIteration: if self._first_chunk: content = [] else: self.close() raise self._first_chunk = False index: Index | None columns: Sequence[Hashable] = list(self.orig_names) if not len(content): names = dedup_names(self.orig_names, is_potential_multi_index(self.orig_names, self.index_col)) (index, columns, col_dict) = self._get_empty_meta(names, self.dtype) conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names) return (index, conv_columns, col_dict) indexnamerow = None if self.has_index_names and sum((int(v == '' or v is None) for v in content[0])) == len(columns): indexnamerow = content[0] content = content[1:] alldata = self._rows_to_cols(content) (data, columns) = self._exclude_implicit_index(alldata) conv_data = self._convert_data(data) conv_data = self._do_date_conversions(columns, conv_data) (index, result_columns) = self._make_index(alldata, columns, indexnamerow) return (index, result_columns, conv_data) def _exclude_implicit_index(self, alldata: list[np.ndarray]) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]: names = dedup_names(self.orig_names, is_potential_multi_index(self.orig_names, self.index_col)) offset = 0 if self._implicit_index: offset = len(self.index_col) len_alldata = len(alldata) self._check_data_length(names, alldata) return ({name: alldata[i + offset] for (i, name) in enumerate(names) if i < len_alldata}, names) def get_chunk(self, size: int | None=None) -> tuple[Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike | Series]]: if size is None: size = self.chunksize return self.read(rows=size) def _convert_data(self, data: Mapping[Hashable, np.ndarray]) -> Mapping[Hashable, ArrayLike]: clean_conv = self._clean_mapping(self.converters) clean_dtypes = self._clean_mapping(self.dtype) clean_na_values = {} clean_na_fvalues = {} if isinstance(self.na_values, dict): for col in self.na_values: if col is not None: na_value = self.na_values[col] na_fvalue = self.na_fvalues[col] if isinstance(col, int) and col not in self.orig_names: col = self.orig_names[col] clean_na_values[col] = na_value clean_na_fvalues[col] = na_fvalue else: clean_na_values = self.na_values clean_na_fvalues = self.na_fvalues return self._convert_to_ndarrays(data, clean_na_values, clean_na_fvalues, clean_conv, clean_dtypes) @final def _convert_to_ndarrays(self, dct: Mapping, na_values, na_fvalues, converters=None, dtypes=None) -> dict[Any, np.ndarray]: result = {} parse_date_cols = validate_parse_dates_presence(self.parse_dates, self.columns) for (c, values) in dct.items(): conv_f = None if converters is None else converters.get(c, None) if isinstance(dtypes, dict): cast_type = dtypes.get(c, None) else: cast_type = dtypes if self.na_filter: (col_na_values, col_na_fvalues) = get_na_values(c, na_values, na_fvalues, self.keep_default_na) else: (col_na_values, col_na_fvalues) = (set(), set()) if c in parse_date_cols: mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues) np.putmask(values, mask, np.nan) result[c] = values continue if conv_f is not None: if cast_type is not None: warnings.warn(f'Both a converter and dtype were specified for column {c} - only the converter will be used.', ParserWarning, stacklevel=find_stack_level()) try: values = lib.map_infer(values, conv_f) except ValueError: mask = algorithms.isin(values, list(na_values)).view(np.uint8) values = lib.map_infer_mask(values, conv_f, mask) (cvals, na_count) = self._infer_types(values, set(col_na_values) | col_na_fvalues, cast_type is None, try_num_bool=False) else: is_ea = is_extension_array_dtype(cast_type) is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type) try_num_bool = not (cast_type and is_str_or_ea_dtype) (cvals, na_count) = self._infer_types(values, set(col_na_values) | col_na_fvalues, cast_type is None, try_num_bool) if cast_type is not None: cast_type = pandas_dtype(cast_type) if cast_type and (cvals.dtype != cast_type or is_ea): if not is_ea and na_count > 0: if is_bool_dtype(cast_type): raise ValueError(f'Bool column has NA values in column {c}') cvals = self._cast_types(cvals, cast_type, c) result[c] = cvals return result @final def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike: if isinstance(cast_type, CategoricalDtype): known_cats = cast_type.categories is not None if not is_object_dtype(values.dtype) and (not known_cats): values = lib.ensure_string_array(values, skipna=False, convert_na_value=False) cats = Index(values).unique().dropna() values = Categorical._from_inferred_categories(cats, cats.get_indexer(values), cast_type, true_values=self.true_values) elif isinstance(cast_type, ExtensionDtype): array_type = cast_type.construct_array_type() try: if isinstance(cast_type, BooleanDtype): values_str = [str(val) for val in values] return array_type._from_sequence_of_strings(values_str, dtype=cast_type, true_values=self.true_values, false_values=self.false_values, none_values=self.na_values) else: return array_type._from_sequence_of_strings(values, dtype=cast_type) except NotImplementedError as err: raise NotImplementedError(f'Extension Array: {array_type} must implement _from_sequence_of_strings in order to be used in parser methods') from err elif isinstance(values, ExtensionArray): values = values.astype(cast_type, copy=False) elif issubclass(cast_type.type, str): values = lib.ensure_string_array(values, skipna=True, convert_na_value=False) else: try: values = astype_array(values, cast_type, copy=True) except ValueError as err: raise ValueError(f'Unable to convert column {column} to type {cast_type}') from err return values @cache_readonly def _have_mi_columns(self) -> bool: if self.header is None: return False header = self.header if isinstance(header, (list, tuple, np.ndarray)): return len(header) > 1 else: return False def _infer_columns(self) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]: names = self.names num_original_columns = 0 clear_buffer = True unnamed_cols: set[Scalar | None] = set() if self.header is not None: header = self.header have_mi_columns = self._have_mi_columns if isinstance(header, (list, tuple, np.ndarray)): if have_mi_columns: header = list(header) + [header[-1] + 1] else: header = [header] columns: list[list[Scalar | None]] = [] for (level, hr) in enumerate(header): try: line = self._buffered_line() while self.line_pos <= hr: line = self._next_line() except StopIteration as err: if 0 < self.line_pos <= hr and (not have_mi_columns or hr != header[-1]): joi = list(map(str, header[:-1] if have_mi_columns else header)) msg = f"[{','.join(joi)}], len of {len(joi)}, " raise ValueError(f'Passed header={msg}but only {self.line_pos} lines in file') from err if have_mi_columns and hr > 0: if clear_buffer: self.buf.clear() columns.append([None] * len(columns[-1])) return (columns, num_original_columns, unnamed_cols) if not self.names: raise EmptyDataError('No columns to parse from file') from err line = self.names[:] this_columns: list[Scalar | None] = [] this_unnamed_cols = [] for (i, c) in enumerate(line): if c == '': if have_mi_columns: col_name = f'Unnamed: {i}_level_{level}' else: col_name = f'Unnamed: {i}' this_unnamed_cols.append(i) this_columns.append(col_name) else: this_columns.append(c) if not have_mi_columns: counts: DefaultDict = defaultdict(int) col_loop_order = [i for i in range(len(this_columns)) if i not in this_unnamed_cols] + this_unnamed_cols for i in col_loop_order: col = this_columns[i] old_col = col cur_count = counts[col] if cur_count > 0: while cur_count > 0: counts[old_col] = cur_count + 1 col = f'{old_col}.{cur_count}' if col in this_columns: cur_count += 1 else: cur_count = counts[col] if self.dtype is not None and is_dict_like(self.dtype) and (self.dtype.get(old_col) is not None) and (self.dtype.get(col) is None): self.dtype.update({col: self.dtype.get(old_col)}) this_columns[i] = col counts[col] = cur_count + 1 elif have_mi_columns: if hr == header[-1]: lc = len(this_columns) sic = self.index_col ic = len(sic) if sic is not None else 0 unnamed_count = len(this_unnamed_cols) if lc != unnamed_count and lc - ic > unnamed_count or ic == 0: clear_buffer = False this_columns = [None] * lc self.buf = [self.buf[-1]] columns.append(this_columns) unnamed_cols.update({this_columns[i] for i in this_unnamed_cols}) if len(columns) == 1: num_original_columns = len(this_columns) if clear_buffer: self.buf.clear() first_line: list[Scalar] | None if names is not None: try: first_line = self._next_line() except StopIteration: first_line = None len_first_data_row = 0 if first_line is None else len(first_line) if len(names) > len(columns[0]) and len(names) > len_first_data_row: raise ValueError('Number of passed names did not match number of header fields in the file') if len(columns) > 1: raise TypeError('Cannot pass names with multi-index columns') if self.usecols is not None: self._handle_usecols(columns, names, num_original_columns) else: num_original_columns = len(names) if self._col_indices is not None and len(names) != len(self._col_indices): columns = [[names[i] for i in sorted(self._col_indices)]] else: columns = [names] else: columns = self._handle_usecols(columns, columns[0], num_original_columns) else: ncols = len(self._header_line) num_original_columns = ncols if not names: columns = [list(range(ncols))] columns = self._handle_usecols(columns, columns[0], ncols) elif self.usecols is None or len(names) >= ncols: columns = self._handle_usecols([names], names, ncols) num_original_columns = len(names) elif not callable(self.usecols) and len(names) != len(self.usecols): raise ValueError('Number of passed names did not match number of header fields in the file') else: columns = [names] self._handle_usecols(columns, columns[0], ncols) return (columns, num_original_columns, unnamed_cols) @cache_readonly def _header_line(self): if self.header is not None: return None try: line = self._buffered_line() except StopIteration as err: if not self.names: raise EmptyDataError('No columns to parse from file') from err line = self.names[:] return line def _handle_usecols(self, columns: list[list[Scalar | None]], usecols_key: list[Scalar | None], num_original_columns: int) -> list[list[Scalar | None]]: col_indices: set[int] | list[int] if self.usecols is not None: if callable(self.usecols): col_indices = evaluate_callable_usecols(self.usecols, usecols_key) elif any((isinstance(u, str) for u in self.usecols)): if len(columns) > 1: raise ValueError('If using multiple headers, usecols must be integers.') col_indices = [] for col in self.usecols: if isinstance(col, str): try: col_indices.append(usecols_key.index(col)) except ValueError: self._validate_usecols_names(self.usecols, usecols_key) else: col_indices.append(col) else: missing_usecols = [col for col in self.usecols if col >= num_original_columns] if missing_usecols: raise ParserError(f'Defining usecols with out-of-bounds indices is not allowed. {missing_usecols} are out-of-bounds.') col_indices = self.usecols columns = [[n for (i, n) in enumerate(column) if i in col_indices] for column in columns] self._col_indices = sorted(col_indices) return columns def _buffered_line(self) -> list[Scalar]: if len(self.buf) > 0: return self.buf[0] else: return self._next_line() def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]: if not first_row: return first_row if not isinstance(first_row[0], str): return first_row if not first_row[0]: return first_row first_elt = first_row[0][0] if first_elt != _BOM: return first_row first_row_bom = first_row[0] new_row: str if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar: start = 2 quote = first_row_bom[1] end = first_row_bom[2:].index(quote) + 2 new_row = first_row_bom[start:end] if len(first_row_bom) > end + 1: new_row += first_row_bom[end + 1:] else: new_row = first_row_bom[1:] new_row_list: list[Scalar] = [new_row] return new_row_list + first_row[1:] def _is_line_empty(self, line: Sequence[Scalar]) -> bool: return not line or all((not x for x in line)) def _next_line(self) -> list[Scalar]: if isinstance(self.data, list): while self.skipfunc(self.pos): if self.pos >= len(self.data): break self.pos += 1 while True: try: line = self._check_comments([self.data[self.pos]])[0] self.pos += 1 if not self.skip_blank_lines and (self._is_line_empty(self.data[self.pos - 1]) or line): break if self.skip_blank_lines: ret = self._remove_empty_lines([line]) if ret: line = ret[0] break except IndexError as err: raise StopIteration from err else: while self.skipfunc(self.pos): self.pos += 1 next(self.data) while True: orig_line = self._next_iter_line(row_num=self.pos + 1) self.pos += 1 if orig_line is not None: line = self._check_comments([orig_line])[0] if self.skip_blank_lines: ret = self._remove_empty_lines([line]) if ret: line = ret[0] break elif self._is_line_empty(orig_line) or line: break if self.pos == 1: line = self._check_for_bom(line) self.line_pos += 1 self.buf.append(line) return line def _alert_malformed(self, msg: str, row_num: int) -> None: if self.on_bad_lines == self.BadLineHandleMethod.ERROR: raise ParserError(msg) if self.on_bad_lines == self.BadLineHandleMethod.WARN: warnings.warn(f'Skipping line {row_num}: {msg}\n', ParserWarning, stacklevel=find_stack_level()) def _next_iter_line(self, row_num: int) -> list[Scalar] | None: try: assert not isinstance(self.data, list) line = next(self.data) return line except csv.Error as e: if self.on_bad_lines in (self.BadLineHandleMethod.ERROR, self.BadLineHandleMethod.WARN): msg = str(e) if 'NULL byte' in msg or 'line contains NUL' in msg: msg = "NULL byte detected. This byte cannot be processed in Python's native csv library at the moment, so please pass in engine='c' instead" if self.skipfooter > 0: reason = "Error could possibly be due to parsing errors in the skipped footer rows (the skipfooter keyword is only applied after Python's csv library has parsed all rows)." msg += '. ' + reason self._alert_malformed(msg, row_num) return None def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.comment is None: return lines ret = [] for line in lines: rl = [] for x in line: if not isinstance(x, str) or self.comment not in x or x in self.na_values: rl.append(x) else: x = x[:x.find(self.comment)] if len(x) > 0: rl.append(x) break ret.append(rl) return ret def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]: ret = [line for line in lines if len(line) > 1 or (len(line) == 1 and (not isinstance(line[0], str) or line[0].strip()))] return ret def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.thousands is None: return lines return self._search_replace_num_columns(lines=lines, search=self.thousands, replace='') def _search_replace_num_columns(self, lines: list[list[Scalar]], search: str, replace: str) -> list[list[Scalar]]: ret = [] for line in lines: rl = [] for (i, x) in enumerate(line): if not isinstance(x, str) or search not in x or i in self._no_thousands_columns or (not self.num.search(x.strip())): rl.append(x) else: rl.append(x.replace(search, replace)) ret.append(rl) return ret def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: if self.decimal == parser_defaults['decimal']: return lines return self._search_replace_num_columns(lines=lines, search=self.decimal, replace='.') def _get_index_name(self) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]: columns: Sequence[Hashable] = self.orig_names orig_names = list(columns) columns = list(columns) line: list[Scalar] | None if self._header_line is not None: line = self._header_line else: try: line = self._next_line() except StopIteration: line = None next_line: list[Scalar] | None try: next_line = self._next_line() except StopIteration: next_line = None implicit_first_cols = 0 if line is not None: index_col = self.index_col if index_col is not False: implicit_first_cols = len(line) - self.num_original_columns if next_line is not None and self.header is not None and (index_col is not False): if len(next_line) == len(line) + self.num_original_columns: self.index_col = list(range(len(line))) self.buf = self.buf[1:] for c in reversed(line): columns.insert(0, c) orig_names = list(columns) self.num_original_columns = len(columns) return (line, orig_names, columns) if implicit_first_cols > 0: self._implicit_index = True if self.index_col is None: self.index_col = list(range(implicit_first_cols)) index_name = None else: (index_name, _, self.index_col) = self._clean_index_names(columns, self.index_col) return (index_name, orig_names, columns) def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]: col_len = self.num_original_columns if self._implicit_index: col_len += len(self.index_col) max_len = max((len(row) for row in content)) if max_len > col_len and self.index_col is not False and (self.usecols is None): footers = self.skipfooter if self.skipfooter else 0 bad_lines = [] iter_content = enumerate(content) content_len = len(content) content = [] for (i, _content) in iter_content: actual_len = len(_content) if actual_len > col_len: if callable(self.on_bad_lines): new_l = self.on_bad_lines(_content) if new_l is not None: content.append(new_l) elif self.on_bad_lines in (self.BadLineHandleMethod.ERROR, self.BadLineHandleMethod.WARN): row_num = self.pos - (content_len - i + footers) bad_lines.append((row_num, actual_len)) if self.on_bad_lines == self.BadLineHandleMethod.ERROR: break else: content.append(_content) for (row_num, actual_len) in bad_lines: msg = f'Expected {col_len} fields in line {row_num + 1}, saw {actual_len}' if self.delimiter and len(self.delimiter) > 1 and (self.quoting != csv.QUOTE_NONE): reason = 'Error could possibly be due to quotes being ignored when a multi-char delimiter is used.' msg += '. ' + reason self._alert_malformed(msg, row_num + 1) zipped_content = list(lib.to_object_array(content, min_width=col_len).T) if self.usecols: assert self._col_indices is not None col_indices = self._col_indices if self._implicit_index: zipped_content = [a for (i, a) in enumerate(zipped_content) if i < len(self.index_col) or i - len(self.index_col) in col_indices] else: zipped_content = [a for (i, a) in enumerate(zipped_content) if i in col_indices] return zipped_content def _get_lines(self, rows: int | None=None) -> list[list[Scalar]]: lines = self.buf new_rows = None if rows is not None: if len(self.buf) >= rows: (new_rows, self.buf) = (self.buf[:rows], self.buf[rows:]) else: rows -= len(self.buf) if new_rows is None: if isinstance(self.data, list): if self.pos > len(self.data): raise StopIteration if rows is None: new_rows = self.data[self.pos:] new_pos = len(self.data) else: new_rows = self.data[self.pos:self.pos + rows] new_pos = self.pos + rows new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) self.pos = new_pos else: new_rows = [] try: if rows is not None: row_index = 0 row_ct = 0 offset = self.pos if self.pos is not None else 0 while row_ct < rows: new_row = next(self.data) if not self.skipfunc(offset + row_index): row_ct += 1 row_index += 1 new_rows.append(new_row) len_new_rows = len(new_rows) new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) else: rows = 0 while True: next_row = self._next_iter_line(row_num=self.pos + rows + 1) rows += 1 if next_row is not None: new_rows.append(next_row) len_new_rows = len(new_rows) except StopIteration: len_new_rows = len(new_rows) new_rows = self._remove_skipped_rows(new_rows) lines.extend(new_rows) if len(lines) == 0: raise self.pos += len_new_rows self.buf = [] else: lines = new_rows if self.skipfooter: lines = lines[:-self.skipfooter] lines = self._check_comments(lines) if self.skip_blank_lines: lines = self._remove_empty_lines(lines) lines = self._check_thousands(lines) return self._check_decimal(lines) def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]: if self.skiprows: return [row for (i, row) in enumerate(new_rows) if not self.skipfunc(i + self.pos)] return new_rows def _set_no_thousand_columns(self) -> set[int]: no_thousands_columns: set[int] = set() if self.columns and self.parse_dates: assert self._col_indices is not None no_thousands_columns = self._set_noconvert_dtype_columns(self._col_indices, self.columns) if self.columns and self.dtype: assert self._col_indices is not None for (i, col) in zip(self._col_indices, self.columns): if not isinstance(self.dtype, dict) and (not is_numeric_dtype(self.dtype)): no_thousands_columns.add(i) if isinstance(self.dtype, dict) and col in self.dtype and (not is_numeric_dtype(self.dtype[col]) or is_bool_dtype(self.dtype[col])): no_thousands_columns.add(i) return no_thousands_columns class FixedWidthReader(abc.Iterator): def __init__(self, f: IO[str] | ReadCsvBuffer[str], colspecs: list[tuple[int, int]] | Literal['infer'], delimiter: str | None, comment: str | None, skiprows: set[int] | None=None, infer_nrows: int=100) -> None: self.f = f self.buffer: Iterator | None = None self.delimiter = '\r\n' + delimiter if delimiter else '\n\r\t ' self.comment = comment if colspecs == 'infer': self.colspecs = self.detect_colspecs(infer_nrows=infer_nrows, skiprows=skiprows) else: self.colspecs = colspecs if not isinstance(self.colspecs, (tuple, list)): raise TypeError(f'column specifications must be a list or tuple, input was a {type(colspecs).__name__}') for colspec in self.colspecs: if not (isinstance(colspec, (tuple, list)) and len(colspec) == 2 and isinstance(colspec[0], (int, np.integer, type(None))) and isinstance(colspec[1], (int, np.integer, type(None)))): raise TypeError('Each column specification must be 2 element tuple or list of integers') def get_rows(self, infer_nrows: int, skiprows: set[int] | None=None) -> list[str]: if skiprows is None: skiprows = set() buffer_rows = [] detect_rows = [] for (i, row) in enumerate(self.f): if i not in skiprows: detect_rows.append(row) buffer_rows.append(row) if len(detect_rows) >= infer_nrows: break self.buffer = iter(buffer_rows) return detect_rows def detect_colspecs(self, infer_nrows: int=100, skiprows: set[int] | None=None) -> list[tuple[int, int]]: delimiters = ''.join([f'\\{x}' for x in self.delimiter]) pattern = re.compile(f'([^{delimiters}]+)') rows = self.get_rows(infer_nrows, skiprows) if not rows: raise EmptyDataError('No rows from which to infer column width') max_len = max(map(len, rows)) mask = np.zeros(max_len + 1, dtype=int) if self.comment is not None: rows = [row.partition(self.comment)[0] for row in rows] for row in rows: for m in pattern.finditer(row): mask[m.start():m.end()] = 1 shifted = np.roll(mask, 1) shifted[0] = 0 edges = np.where(mask ^ shifted == 1)[0] edge_pairs = list(zip(edges[::2], edges[1::2])) return edge_pairs def __next__(self) -> list[str]: if self.buffer is not None: try: line = next(self.buffer) except StopIteration: self.buffer = None line = next(self.f) else: line = next(self.f) return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs] class FixedWidthFieldParser(PythonParser): def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None: self.colspecs = kwds.pop('colspecs') self.infer_nrows = kwds.pop('infer_nrows') PythonParser.__init__(self, f, **kwds) def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader: return FixedWidthReader(f, self.colspecs, self.delimiter, self.comment, self.skiprows, self.infer_nrows) def _remove_empty_lines(self, lines: list[list[T]]) -> list[list[T]]: return [line for line in lines if any((not isinstance(e, str) or e.strip() for e in line))] def _validate_skipfooter_arg(skipfooter: int) -> int: if not is_integer(skipfooter): raise ValueError('skipfooter must be an integer') if skipfooter < 0: raise ValueError('skipfooter cannot be negative') return skipfooter # File: pandas-main/pandas/io/parsers/readers.py """""" from __future__ import annotations from collections import abc, defaultdict import csv import sys from textwrap import fill from typing import IO, TYPE_CHECKING, Any, Generic, Literal, TypedDict, overload import warnings import numpy as np from pandas._libs import lib from pandas._libs.parsers import STR_NA_VALUES from pandas.errors import AbstractMethodError, ParserWarning from pandas.util._decorators import Appender from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import is_file_like, is_float, is_integer, is_list_like, pandas_dtype from pandas import Series from pandas.core.frame import DataFrame from pandas.core.indexes.api import RangeIndex from pandas.core.shared_docs import _shared_docs from pandas.io.common import IOHandles, get_handle, stringify_path, validate_header_arg from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper from pandas.io.parsers.base_parser import ParserBase, is_index_col, parser_defaults from pandas.io.parsers.c_parser_wrapper import CParserWrapper from pandas.io.parsers.python_parser import FixedWidthFieldParser, PythonParser if TYPE_CHECKING: from collections.abc import Callable, Hashable, Iterable, Mapping, Sequence from types import TracebackType from pandas._typing import CompressionOptions, CSVEngine, DtypeArg, DtypeBackend, FilePath, HashableT, IndexLabel, ReadCsvBuffer, Self, StorageOptions, Unpack, UsecolsArgType class _read_shared(TypedDict, Generic[HashableT], total=False): sep: str | None | lib.NoDefault delimiter: str | None | lib.NoDefault header: int | Sequence[int] | None | Literal['infer'] names: Sequence[Hashable] | None | lib.NoDefault index_col: IndexLabel | Literal[False] | None usecols: UsecolsArgType dtype: DtypeArg | None engine: CSVEngine | None converters: Mapping[HashableT, Callable] | None true_values: list | None false_values: list | None skipinitialspace: bool skiprows: list[int] | int | Callable[[Hashable], bool] | None skipfooter: int nrows: int | None na_values: Hashable | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None keep_default_na: bool na_filter: bool skip_blank_lines: bool parse_dates: bool | Sequence[Hashable] | None date_format: str | dict[Hashable, str] | None dayfirst: bool cache_dates: bool compression: CompressionOptions thousands: str | None decimal: str lineterminator: str | None quotechar: str quoting: int doublequote: bool escapechar: str | None comment: str | None encoding: str | None encoding_errors: str | None dialect: str | csv.Dialect | None on_bad_lines: str low_memory: bool memory_map: bool float_precision: Literal['high', 'legacy', 'round_trip'] | None storage_options: StorageOptions | None dtype_backend: DtypeBackend | lib.NoDefault else: _read_shared = dict _doc_read_csv_and_table = '\n{summary}\n\nAlso supports optionally iterating or breaking of the file\ninto chunks.\n\nAdditional help can be found in the online docs for\n`IO Tools `_.\n\nParameters\n----------\nfilepath_or_buffer : str, path object or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is\n expected. A local file could be: file://localhost/path/to/table.csv.\n\n If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method, such as\n a file handle (e.g. via builtin ``open`` function) or ``StringIO``.\nsep : str, default {_default_sep}\n Character or regex pattern to treat as the delimiter. If ``sep=None``, the\n C engine cannot automatically detect\n the separator, but the Python parsing engine can, meaning the latter will\n be used and automatically detect the separator from only the first valid\n row of the file by Python\'s builtin sniffer tool, ``csv.Sniffer``.\n In addition, separators longer than 1 character and different from\n ``\'\\s+\'`` will be interpreted as regular expressions and will also force\n the use of the Python parsing engine. Note that regex delimiters are prone\n to ignoring quoted data. Regex example: ``\'\\r\\t\'``.\ndelimiter : str, optional\n Alias for ``sep``.\nheader : int, Sequence of int, \'infer\' or None, default \'infer\'\n Row number(s) containing column labels and marking the start of the\n data (zero-indexed). Default behavior is to infer the column names: if no ``names``\n are passed the behavior is identical to ``header=0`` and column\n names are inferred from the first line of the file, if column\n names are passed explicitly to ``names`` then the behavior is identical to\n ``header=None``. Explicitly pass ``header=0`` to be able to\n replace existing names. The header can be a list of integers that\n specify row locations for a :class:`~pandas.MultiIndex` on the columns\n e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be\n skipped (e.g. 2 in this example is skipped). Note that this\n parameter ignores commented lines and empty lines if\n ``skip_blank_lines=True``, so ``header=0`` denotes the first line of\n data rather than the first line of the file.\n\n When inferred from the file contents, headers are kept distinct from\n each other by renaming duplicate names with a numeric suffix of the form\n ``".{{count}}"`` starting from 1, e.g. ``"foo"`` and ``"foo.1"``.\n Empty headers are named ``"Unnamed: {{i}}"`` or ``"Unnamed: {{i}}_level_{{level}}"``\n in the case of MultiIndex columns.\nnames : Sequence of Hashable, optional\n Sequence of column labels to apply. If the file contains a header row,\n then you should explicitly pass ``header=0`` to override the column names.\n Duplicates in this list are not allowed.\nindex_col : Hashable, Sequence of Hashable or False, optional\n Column(s) to use as row label(s), denoted either by column labels or column\n indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex`\n will be formed for the row labels.\n\n Note: ``index_col=False`` can be used to force pandas to *not* use the first\n column as the index, e.g., when you have a malformed file with delimiters at\n the end of each line.\nusecols : Sequence of Hashable or Callable, optional\n Subset of columns to select, denoted either by column labels or column indices.\n If list-like, all elements must either\n be positional (i.e. integer indices into the document columns) or strings\n that correspond to column names provided either by the user in ``names`` or\n inferred from the document header row(s). If ``names`` are given, the document\n header row(s) are not taken into account. For example, a valid list-like\n ``usecols`` parameter would be ``[0, 1, 2]`` or ``[\'foo\', \'bar\', \'baz\']``.\n Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\n To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order\n preserved use ``pd.read_csv(data, usecols=[\'foo\', \'bar\'])[[\'foo\', \'bar\']]``\n for columns in ``[\'foo\', \'bar\']`` order or\n ``pd.read_csv(data, usecols=[\'foo\', \'bar\'])[[\'bar\', \'foo\']]``\n for ``[\'bar\', \'foo\']`` order.\n\n If callable, the callable function will be evaluated against the column\n names, returning names where the callable function evaluates to ``True``. An\n example of a valid callable argument would be ``lambda x: x.upper() in\n [\'AAA\', \'BBB\', \'DDD\']``. Using this parameter results in much faster\n parsing time and lower memory usage.\ndtype : dtype or dict of {{Hashable : dtype}}, optional\n Data type(s) to apply to either the whole dataset or individual columns.\n E.g., ``{{\'a\': np.float64, \'b\': np.int32, \'c\': \'Int64\'}}``\n Use ``str`` or ``object`` together with suitable ``na_values`` settings\n to preserve and not interpret ``dtype``.\n If ``converters`` are specified, they will be applied INSTEAD\n of ``dtype`` conversion.\n\n .. versionadded:: 1.5.0\n\n Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where\n the default determines the ``dtype`` of the columns which are not explicitly\n listed.\nengine : {{\'c\', \'python\', \'pyarrow\'}}, optional\n Parser engine to use. The C and pyarrow engines are faster, while the python engine\n is currently more feature-complete. Multithreading is currently only supported by\n the pyarrow engine.\n\n .. versionadded:: 1.4.0\n\n The \'pyarrow\' engine was added as an *experimental* engine, and some features\n are unsupported, or may not work correctly, with this engine.\nconverters : dict of {{Hashable : Callable}}, optional\n Functions for converting values in specified columns. Keys can either\n be column labels or column indices.\ntrue_values : list, optional\n Values to consider as ``True`` in addition to case-insensitive variants of \'True\'.\nfalse_values : list, optional\n Values to consider as ``False`` in addition to case-insensitive variants of \'False\'.\nskipinitialspace : bool, default False\n Skip spaces after delimiter.\nskiprows : int, list of int or Callable, optional\n Line numbers to skip (0-indexed) or number of lines to skip (``int``)\n at the start of the file.\n\n If callable, the callable function will be evaluated against the row\n indices, returning ``True`` if the row should be skipped and ``False`` otherwise.\n An example of a valid callable argument would be ``lambda x: x in [0, 2]``.\nskipfooter : int, default 0\n Number of lines at bottom of file to skip (Unsupported with ``engine=\'c\'``).\nnrows : int, optional\n Number of rows of file to read. Useful for reading pieces of large files.\n Refers to the number of data rows in the returned DataFrame, excluding:\n\n * The header row containing column names.\n * Rows before the header row, if ``header=1`` or larger.\n\n Example usage:\n\n * To read the first 999,999 (non-header) rows:\n ``read_csv(..., nrows=999999)``\n\n * To read rows 1,000,000 through 1,999,999:\n ``read_csv(..., skiprows=1000000, nrows=999999)``\nna_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional\n Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific\n per-column ``NA`` values. By default the following values are interpreted as\n ``NaN``: "{na_values_str}".\nkeep_default_na : bool, default True\n Whether or not to include the default ``NaN`` values when parsing the data.\n Depending on whether ``na_values`` is passed in, the behavior is as follows:\n\n * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values``\n is appended to the default ``NaN`` values used for parsing.\n * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only\n the default ``NaN`` values are used for parsing.\n * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only\n the ``NaN`` values specified ``na_values`` are used for parsing.\n * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no\n strings will be parsed as ``NaN``.\n\n Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and\n ``na_values`` parameters will be ignored.\nna_filter : bool, default True\n Detect missing value markers (empty strings and the value of ``na_values``). In\n data without any ``NA`` values, passing ``na_filter=False`` can improve the\n performance of reading a large file.\nskip_blank_lines : bool, default True\n If ``True``, skip over blank lines rather than interpreting as ``NaN`` values.\nparse_dates : bool, None, list of Hashable, default None\n The behavior is as follows:\n\n * ``bool``. If ``True`` -> try parsing the index.\n * ``None``. Behaves like ``True`` if ``date_format`` is specified.\n * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3\n each as a separate date column.\n\n If a column or index cannot be represented as an array of ``datetime``,\n say because of an unparsable value or a mixture of timezones, the column\n or index will be returned unaltered as an ``object`` data type. For\n non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after\n :func:`~pandas.read_csv`.\n\n Note: A fast-path exists for iso8601-formatted dates.\ndate_format : str or dict of column -> format, optional\n Format to use for parsing dates and/or times when used in conjunction with ``parse_dates``.\n The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See\n `strftime documentation\n `_ for more information on choices, though\n note that :const:`"%f"` will parse all the way up to nanoseconds.\n You can also pass:\n\n - "ISO8601", to parse any `ISO8601 `_\n time string (not necessarily in exactly the same format);\n - "mixed", to infer the format for each element individually. This is risky,\n and you should probably use it along with `dayfirst`.\n\n .. versionadded:: 2.0.0\ndayfirst : bool, default False\n DD/MM format dates, international and European format.\ncache_dates : bool, default True\n If ``True``, use a cache of unique, converted dates to apply the ``datetime``\n conversion. May produce significant speed-up when parsing duplicate\n date strings, especially ones with timezone offsets.\n\niterator : bool, default False\n Return ``TextFileReader`` object for iteration or getting chunks with\n ``get_chunk()``.\nchunksize : int, optional\n Number of lines to read from the file per chunk. Passing a value will cause the\n function to return a ``TextFileReader`` object for iteration.\n See the `IO Tools docs\n `_\n for more information on ``iterator`` and ``chunksize``.\n\n{decompression_options}\n\n .. versionchanged:: 1.4.0 Zstandard support.\n\nthousands : str (length 1), optional\n Character acting as the thousands separator in numerical values.\ndecimal : str (length 1), default \'.\'\n Character to recognize as decimal point (e.g., use \',\' for European data).\nlineterminator : str (length 1), optional\n Character used to denote a line break. Only valid with C parser.\nquotechar : str (length 1), optional\n Character used to denote the start and end of a quoted item. Quoted\n items can include the ``delimiter`` and it will be ignored.\nquoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, 3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL\n Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is\n ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special\n characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``,\n or ``lineterminator``.\ndoublequote : bool, default True\n When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate\n whether or not to interpret two consecutive ``quotechar`` elements INSIDE a\n field as a single ``quotechar`` element.\nescapechar : str (length 1), optional\n Character used to escape other characters.\ncomment : str (length 1), optional\n Character indicating that the remainder of line should not be parsed.\n If found at the beginning\n of a line, the line will be ignored altogether. This parameter must be a\n single character. Like empty lines (as long as ``skip_blank_lines=True``),\n fully commented lines are ignored by the parameter ``header`` but not by\n ``skiprows``. For example, if ``comment=\'#\'``, parsing\n ``#empty\\\\na,b,c\\\\n1,2,3`` with ``header=0`` will result in ``\'a,b,c\'`` being\n treated as the header.\nencoding : str, optional, default \'utf-8\'\n Encoding to use for UTF when reading/writing (ex. ``\'utf-8\'``). `List of Python\n standard encodings\n `_ .\n\nencoding_errors : str, optional, default \'strict\'\n How encoding errors are treated. `List of possible values\n `_ .\n\n .. versionadded:: 1.3.0\n\ndialect : str or csv.Dialect, optional\n If provided, this parameter will override values (default or not) for the\n following parameters: ``delimiter``, ``doublequote``, ``escapechar``,\n ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to\n override values, a ``ParserWarning`` will be issued. See ``csv.Dialect``\n documentation for more details.\non_bad_lines : {{\'error\', \'warn\', \'skip\'}} or Callable, default \'error\'\n Specifies what to do upon encountering a bad line (a line with too many fields).\n Allowed values are:\n\n - ``\'error\'``, raise an Exception when a bad line is encountered.\n - ``\'warn\'``, raise a warning when a bad line is encountered and skip that line.\n - ``\'skip\'``, skip bad lines without raising or warning when they are encountered.\n - Callable, function that will process a single bad line.\n - With ``engine=\'python\'``, function with signature\n ``(bad_line: list[str]) -> list[str] | None``.\n ``bad_line`` is a list of strings split by the ``sep``.\n If the function returns ``None``, the bad line will be ignored.\n If the function returns a new ``list`` of strings with more elements than\n expected, a ``ParserWarning`` will be emitted while dropping extra elements.\n - With ``engine=\'pyarrow\'``, function with signature\n as described in pyarrow documentation: `invalid_row_handler\n `_.\n\n .. versionadded:: 1.3.0\n\n .. versionadded:: 1.4.0\n\n Callable\n\n .. versionchanged:: 2.2.0\n\n Callable for ``engine=\'pyarrow\'``\n\nlow_memory : bool, default True\n Internally process the file in chunks, resulting in lower memory use\n while parsing, but possibly mixed type inference. To ensure no mixed\n types either set ``False``, or specify the type with the ``dtype`` parameter.\n Note that the entire file is read into a single :class:`~pandas.DataFrame`\n regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in\n chunks. (Only valid with C parser).\nmemory_map : bool, default False\n If a filepath is provided for ``filepath_or_buffer``, map the file object\n directly onto memory and access the data directly from there. Using this\n option can improve performance because there is no longer any I/O overhead.\nfloat_precision : {{\'high\', \'legacy\', \'round_trip\'}}, optional\n Specifies which converter the C engine should use for floating-point\n values. The options are ``None`` or ``\'high\'`` for the ordinary converter,\n ``\'legacy\'`` for the original lower precision pandas converter, and\n ``\'round_trip\'`` for the round-trip converter.\n\n{storage_options}\n\ndtype_backend : {{\'numpy_nullable\', \'pyarrow\'}}\n Back-end data type applied to the resultant :class:`DataFrame`\n (still experimental). If not specified, the default behavior\n is to not use nullable data types. If specified, the behavior\n is as follows:\n\n * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`\n * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` :class:`DataFrame`\n\n .. versionadded:: 2.0\n\nReturns\n-------\nDataFrame or TextFileReader\n A comma-separated values (csv) file is returned as two-dimensional\n data structure with labeled axes.\n\nSee Also\n--------\nDataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n{see_also_func_name} : {see_also_func_summary}\nread_fwf : Read a table of fixed-width formatted lines into DataFrame.\n\nExamples\n--------\n>>> pd.{func_name}(\'data.csv\') # doctest: +SKIP\n Name Value\n0 foo 1\n1 bar 2\n2 #baz 3\n\nIndex and header can be specified via the `index_col` and `header` arguments.\n\n>>> pd.{func_name}(\'data.csv\', header=None) # doctest: +SKIP\n 0 1\n0 Name Value\n1 foo 1\n2 bar 2\n3 #baz 3\n\n>>> pd.{func_name}(\'data.csv\', index_col=\'Value\') # doctest: +SKIP\n Name\nValue\n1 foo\n2 bar\n3 #baz\n\nColumn types are inferred but can be explicitly specified using the dtype argument.\n\n>>> pd.{func_name}(\'data.csv\', dtype={{\'Value\': float}}) # doctest: +SKIP\n Name Value\n0 foo 1.0\n1 bar 2.0\n2 #baz 3.0\n\nTrue, False, and NA values, and thousands separators have defaults,\nbut can be explicitly specified, too. Supply the values you would like\nas strings or lists of strings!\n\n>>> pd.{func_name}(\'data.csv\', na_values=[\'foo\', \'bar\']) # doctest: +SKIP\n Name Value\n0 NaN 1\n1 NaN 2\n2 #baz 3\n\nComment lines in the input file can be skipped using the `comment` argument.\n\n>>> pd.{func_name}(\'data.csv\', comment=\'#\') # doctest: +SKIP\n Name Value\n0 foo 1\n1 bar 2\n\nBy default, columns with dates will be read as ``object`` rather than ``datetime``.\n\n>>> df = pd.{func_name}(\'tmp.csv\') # doctest: +SKIP\n\n>>> df # doctest: +SKIP\n col 1 col 2 col 3\n0 10 10/04/2018 Sun 15 Jan 2023\n1 20 15/04/2018 Fri 12 May 2023\n\n>>> df.dtypes # doctest: +SKIP\ncol 1 int64\ncol 2 object\ncol 3 object\ndtype: object\n\nSpecific columns can be parsed as dates by using the `parse_dates` and\n`date_format` arguments.\n\n>>> df = pd.{func_name}(\n... \'tmp.csv\',\n... parse_dates=[1, 2],\n... date_format={{\'col 2\': \'%d/%m/%Y\', \'col 3\': \'%a %d %b %Y\'}},\n... ) # doctest: +SKIP\n\n>>> df.dtypes # doctest: +SKIP\ncol 1 int64\ncol 2 datetime64[ns]\ncol 3 datetime64[ns]\ndtype: object\n' class _C_Parser_Defaults(TypedDict): na_filter: Literal[True] low_memory: Literal[True] memory_map: Literal[False] float_precision: None _c_parser_defaults: _C_Parser_Defaults = {'na_filter': True, 'low_memory': True, 'memory_map': False, 'float_precision': None} class _Fwf_Defaults(TypedDict): colspecs: Literal['infer'] infer_nrows: Literal[100] widths: None _fwf_defaults: _Fwf_Defaults = {'colspecs': 'infer', 'infer_nrows': 100, 'widths': None} _c_unsupported = {'skipfooter'} _python_unsupported = {'low_memory', 'float_precision'} _pyarrow_unsupported = {'skipfooter', 'float_precision', 'chunksize', 'comment', 'nrows', 'thousands', 'memory_map', 'dialect', 'quoting', 'lineterminator', 'converters', 'iterator', 'dayfirst', 'skipinitialspace', 'low_memory'} @overload def validate_integer(name: str, val: None, min_val: int=...) -> None: ... @overload def validate_integer(name: str, val: float, min_val: int=...) -> int: ... @overload def validate_integer(name: str, val: int | None, min_val: int=...) -> int | None: ... def validate_integer(name: str, val: int | float | None, min_val: int=0) -> int | None: if val is None: return val msg = f"'{name:s}' must be an integer >={min_val:d}" if is_float(val): if int(val) != val: raise ValueError(msg) val = int(val) elif not (is_integer(val) and val >= min_val): raise ValueError(msg) return int(val) def _validate_names(names: Sequence[Hashable] | None) -> None: if names is not None: if len(names) != len(set(names)): raise ValueError('Duplicate names are not allowed.') if not (is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView)): raise ValueError('Names should be an ordered collection.') def _read(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds) -> DataFrame | TextFileReader: if kwds.get('parse_dates', None) is None: if kwds.get('date_format', None) is None: kwds['parse_dates'] = False else: kwds['parse_dates'] = True iterator = kwds.get('iterator', False) chunksize = kwds.get('chunksize', None) errors = kwds.get('encoding_errors', 'strict') if not isinstance(errors, str): raise ValueError(f'encoding_errors must be a string, got {type(errors).__name__}') if kwds.get('engine') == 'pyarrow': if iterator: raise ValueError("The 'iterator' option is not supported with the 'pyarrow' engine") if chunksize is not None: raise ValueError("The 'chunksize' option is not supported with the 'pyarrow' engine") else: chunksize = validate_integer('chunksize', chunksize, 1) nrows = kwds.get('nrows', None) _validate_names(kwds.get('names', None)) parser = TextFileReader(filepath_or_buffer, **kwds) if chunksize or iterator: return parser with parser: return parser.read(nrows) @overload def read_csv(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: Literal[True], chunksize: int | None=..., **kwds: Unpack[_read_shared[HashableT]]) -> TextFileReader: ... @overload def read_csv(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: bool=..., chunksize: int, **kwds: Unpack[_read_shared[HashableT]]) -> TextFileReader: ... @overload def read_csv(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: Literal[False]=..., chunksize: None=..., **kwds: Unpack[_read_shared[HashableT]]) -> DataFrame: ... @overload def read_csv(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: bool=..., chunksize: int | None=..., **kwds: Unpack[_read_shared[HashableT]]) -> DataFrame | TextFileReader: ... @Appender(_doc_read_csv_and_table.format(func_name='read_csv', summary='Read a comma-separated values (csv) file into DataFrame.', see_also_func_name='read_table', see_also_func_summary='Read general delimited file into DataFrame.', na_values_str=fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=' '), _default_sep="','", storage_options=_shared_docs['storage_options'], decompression_options=_shared_docs['decompression_options'] % 'filepath_or_buffer')) def read_csv(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None | lib.NoDefault=lib.no_default, delimiter: str | None | lib.NoDefault=None, header: int | Sequence[int] | None | Literal['infer']='infer', names: Sequence[Hashable] | None | lib.NoDefault=lib.no_default, index_col: IndexLabel | Literal[False] | None=None, usecols: UsecolsArgType=None, dtype: DtypeArg | None=None, engine: CSVEngine | None=None, converters: Mapping[HashableT, Callable] | None=None, true_values: list | None=None, false_values: list | None=None, skipinitialspace: bool=False, skiprows: list[int] | int | Callable[[Hashable], bool] | None=None, skipfooter: int=0, nrows: int | None=None, na_values: Hashable | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None=None, keep_default_na: bool=True, na_filter: bool=True, skip_blank_lines: bool=True, parse_dates: bool | Sequence[Hashable] | None=None, date_format: str | dict[Hashable, str] | None=None, dayfirst: bool=False, cache_dates: bool=True, iterator: bool=False, chunksize: int | None=None, compression: CompressionOptions='infer', thousands: str | None=None, decimal: str='.', lineterminator: str | None=None, quotechar: str='"', quoting: int=csv.QUOTE_MINIMAL, doublequote: bool=True, escapechar: str | None=None, comment: str | None=None, encoding: str | None=None, encoding_errors: str | None='strict', dialect: str | csv.Dialect | None=None, on_bad_lines: str='error', low_memory: bool=_c_parser_defaults['low_memory'], memory_map: bool=False, float_precision: Literal['high', 'legacy', 'round_trip'] | None=None, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame | TextFileReader: kwds = locals().copy() del kwds['filepath_or_buffer'] del kwds['sep'] kwds_defaults = _refine_defaults_read(dialect, delimiter, engine, sep, on_bad_lines, names, defaults={'delimiter': ','}, dtype_backend=dtype_backend) kwds.update(kwds_defaults) return _read(filepath_or_buffer, kwds) @overload def read_table(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: Literal[True], chunksize: int | None=..., **kwds: Unpack[_read_shared[HashableT]]) -> TextFileReader: ... @overload def read_table(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: bool=..., chunksize: int, **kwds: Unpack[_read_shared[HashableT]]) -> TextFileReader: ... @overload def read_table(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: Literal[False]=..., chunksize: None=..., **kwds: Unpack[_read_shared[HashableT]]) -> DataFrame: ... @overload def read_table(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, iterator: bool=..., chunksize: int | None=..., **kwds: Unpack[_read_shared[HashableT]]) -> DataFrame | TextFileReader: ... @Appender(_doc_read_csv_and_table.format(func_name='read_table', summary='Read general delimited file into DataFrame.', see_also_func_name='read_csv', see_also_func_summary='Read a comma-separated values (csv) file into DataFrame.', na_values_str=fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=' '), _default_sep="'\\\\t' (tab-stop)", storage_options=_shared_docs['storage_options'], decompression_options=_shared_docs['decompression_options'] % 'filepath_or_buffer')) def read_table(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, sep: str | None | lib.NoDefault=lib.no_default, delimiter: str | None | lib.NoDefault=None, header: int | Sequence[int] | None | Literal['infer']='infer', names: Sequence[Hashable] | None | lib.NoDefault=lib.no_default, index_col: IndexLabel | Literal[False] | None=None, usecols: UsecolsArgType=None, dtype: DtypeArg | None=None, engine: CSVEngine | None=None, converters: Mapping[HashableT, Callable] | None=None, true_values: list | None=None, false_values: list | None=None, skipinitialspace: bool=False, skiprows: list[int] | int | Callable[[Hashable], bool] | None=None, skipfooter: int=0, nrows: int | None=None, na_values: Hashable | Iterable[Hashable] | Mapping[Hashable, Iterable[Hashable]] | None=None, keep_default_na: bool=True, na_filter: bool=True, skip_blank_lines: bool=True, parse_dates: bool | Sequence[Hashable] | None=None, date_format: str | dict[Hashable, str] | None=None, dayfirst: bool=False, cache_dates: bool=True, iterator: bool=False, chunksize: int | None=None, compression: CompressionOptions='infer', thousands: str | None=None, decimal: str='.', lineterminator: str | None=None, quotechar: str='"', quoting: int=csv.QUOTE_MINIMAL, doublequote: bool=True, escapechar: str | None=None, comment: str | None=None, encoding: str | None=None, encoding_errors: str | None='strict', dialect: str | csv.Dialect | None=None, on_bad_lines: str='error', low_memory: bool=_c_parser_defaults['low_memory'], memory_map: bool=False, float_precision: Literal['high', 'legacy', 'round_trip'] | None=None, storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame | TextFileReader: kwds = locals().copy() del kwds['filepath_or_buffer'] del kwds['sep'] kwds_defaults = _refine_defaults_read(dialect, delimiter, engine, sep, on_bad_lines, names, defaults={'delimiter': '\t'}, dtype_backend=dtype_backend) kwds.update(kwds_defaults) return _read(filepath_or_buffer, kwds) @overload def read_fwf(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, colspecs: Sequence[tuple[int, int]] | str | None=..., widths: Sequence[int] | None=..., infer_nrows: int=..., iterator: Literal[True], chunksize: int | None=..., **kwds: Unpack[_read_shared[HashableT]]) -> TextFileReader: ... @overload def read_fwf(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, colspecs: Sequence[tuple[int, int]] | str | None=..., widths: Sequence[int] | None=..., infer_nrows: int=..., iterator: bool=..., chunksize: int, **kwds: Unpack[_read_shared[HashableT]]) -> TextFileReader: ... @overload def read_fwf(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, colspecs: Sequence[tuple[int, int]] | str | None=..., widths: Sequence[int] | None=..., infer_nrows: int=..., iterator: Literal[False]=..., chunksize: None=..., **kwds: Unpack[_read_shared[HashableT]]) -> DataFrame: ... def read_fwf(filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], *, colspecs: Sequence[tuple[int, int]] | str | None='infer', widths: Sequence[int] | None=None, infer_nrows: int=100, iterator: bool=False, chunksize: int | None=None, **kwds: Unpack[_read_shared[HashableT]]) -> DataFrame | TextFileReader: if colspecs is None and widths is None: raise ValueError('Must specify either colspecs or widths') if colspecs not in (None, 'infer') and widths is not None: raise ValueError("You must specify only one of 'widths' and 'colspecs'") if widths is not None: (colspecs, col) = ([], 0) for w in widths: colspecs.append((col, col + w)) col += w assert colspecs is not None names = kwds.get('names') if names is not None and names is not lib.no_default: if len(names) != len(colspecs) and colspecs != 'infer': len_index = 0 if kwds.get('index_col') is not None: index_col: Any = kwds.get('index_col') if index_col is not False: if not is_list_like(index_col): len_index = 1 else: assert index_col is not lib.no_default len_index = len(index_col) if kwds.get('usecols') is None and len(names) + len_index != len(colspecs): raise ValueError('Length of colspecs must match length of names') check_dtype_backend(kwds.setdefault('dtype_backend', lib.no_default)) return _read(filepath_or_buffer, kwds | {'colspecs': colspecs, 'infer_nrows': infer_nrows, 'engine': 'python-fwf', 'iterator': iterator, 'chunksize': chunksize}) class TextFileReader(abc.Iterator): def __init__(self, f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list, engine: CSVEngine | None=None, **kwds) -> None: if engine is not None: engine_specified = True else: engine = 'python' engine_specified = False self.engine = engine self._engine_specified = kwds.get('engine_specified', engine_specified) _validate_skipfooter(kwds) dialect = _extract_dialect(kwds) if dialect is not None: if engine == 'pyarrow': raise ValueError("The 'dialect' option is not supported with the 'pyarrow' engine") kwds = _merge_with_dialect_properties(dialect, kwds) if kwds.get('header', 'infer') == 'infer': kwds['header'] = 0 if kwds.get('names') is None else None self.orig_options = kwds self._currow = 0 options = self._get_options_with_defaults(engine) options['storage_options'] = kwds.get('storage_options', None) self.chunksize = options.pop('chunksize', None) self.nrows = options.pop('nrows', None) self._check_file_or_buffer(f, engine) (self.options, self.engine) = self._clean_options(options, engine) if 'has_index_names' in kwds: self.options['has_index_names'] = kwds['has_index_names'] self.handles: IOHandles | None = None self._engine = self._make_engine(f, self.engine) def close(self) -> None: if self.handles is not None: self.handles.close() self._engine.close() def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: kwds = self.orig_options options = {} default: object | None for (argname, default) in parser_defaults.items(): value = kwds.get(argname, default) if engine == 'pyarrow' and argname in _pyarrow_unsupported and (value != default) and (value != getattr(value, 'value', default)): raise ValueError(f"The {argname!r} option is not supported with the 'pyarrow' engine") options[argname] = value for (argname, default) in _c_parser_defaults.items(): if argname in kwds: value = kwds[argname] if engine != 'c' and value != default: if 'python' in engine and argname not in _python_unsupported: pass elif 'pyarrow' in engine and argname not in _pyarrow_unsupported: pass else: raise ValueError(f'The {argname!r} option is not supported with the {engine!r} engine') else: value = default options[argname] = value if engine == 'python-fwf': for (argname, default) in _fwf_defaults.items(): options[argname] = kwds.get(argname, default) return options def _check_file_or_buffer(self, f, engine: CSVEngine) -> None: if is_file_like(f) and engine != 'c' and (not hasattr(f, '__iter__')): raise ValueError("The 'python' engine cannot iterate through this file buffer.") if hasattr(f, 'encoding'): file_encoding = f.encoding orig_reader_enc = self.orig_options.get('encoding', None) any_none = file_encoding is None or orig_reader_enc is None if file_encoding != orig_reader_enc and (not any_none): file_path = getattr(f, 'name', None) raise ValueError(f'The specified reader encoding {orig_reader_enc} is different from the encoding {file_encoding} of file {file_path}.') def _clean_options(self, options: dict[str, Any], engine: CSVEngine) -> tuple[dict[str, Any], CSVEngine]: result = options.copy() fallback_reason = None if engine == 'c': if options['skipfooter'] > 0: fallback_reason = "the 'c' engine does not support skipfooter" engine = 'python' sep = options['delimiter'] if sep is not None and len(sep) > 1: if engine == 'c' and sep == '\\s+': result['delim_whitespace'] = True del result['delimiter'] elif engine not in ('python', 'python-fwf'): fallback_reason = f"the '{engine}' engine does not support regex separators (separators > 1 char and different from '\\s+' are interpreted as regex)" engine = 'python' elif sep is not None: encodeable = True encoding = sys.getfilesystemencoding() or 'utf-8' try: if len(sep.encode(encoding)) > 1: encodeable = False except UnicodeDecodeError: encodeable = False if not encodeable and engine not in ('python', 'python-fwf'): fallback_reason = f"the separator encoded in {encoding} is > 1 char long, and the '{engine}' engine does not support such separators" engine = 'python' quotechar = options['quotechar'] if quotechar is not None and isinstance(quotechar, (str, bytes)): if len(quotechar) == 1 and ord(quotechar) > 127 and (engine not in ('python', 'python-fwf')): fallback_reason = f"ord(quotechar) > 127, meaning the quotechar is larger than one byte, and the '{engine}' engine does not support such quotechars" engine = 'python' if fallback_reason and self._engine_specified: raise ValueError(fallback_reason) if engine == 'c': for arg in _c_unsupported: del result[arg] if 'python' in engine: for arg in _python_unsupported: if fallback_reason and result[arg] != _c_parser_defaults.get(arg): raise ValueError(f"Falling back to the 'python' engine because {fallback_reason}, but this causes {arg!r} to be ignored as it is not supported by the 'python' engine.") del result[arg] if fallback_reason: warnings.warn(f"Falling back to the 'python' engine because {fallback_reason}; you can avoid this warning by specifying engine='python'.", ParserWarning, stacklevel=find_stack_level()) index_col = options['index_col'] names = options['names'] converters = options['converters'] na_values = options['na_values'] skiprows = options['skiprows'] validate_header_arg(options['header']) if index_col is True: raise ValueError("The value of index_col couldn't be 'True'") if is_index_col(index_col): if not isinstance(index_col, (list, tuple, np.ndarray)): index_col = [index_col] result['index_col'] = index_col names = list(names) if names is not None else names if converters is not None: if not isinstance(converters, dict): raise TypeError(f'Type converters must be a dict or subclass, input was a {type(converters).__name__}') else: converters = {} keep_default_na = options['keep_default_na'] floatify = engine != 'pyarrow' (na_values, na_fvalues) = _clean_na_values(na_values, keep_default_na, floatify=floatify) if engine == 'pyarrow': if not is_integer(skiprows) and skiprows is not None: raise ValueError("skiprows argument must be an integer when using engine='pyarrow'") else: if is_integer(skiprows): skiprows = range(skiprows) if skiprows is None: skiprows = set() elif not callable(skiprows): skiprows = set(skiprows) result['names'] = names result['converters'] = converters result['na_values'] = na_values result['na_fvalues'] = na_fvalues result['skiprows'] = skiprows return (result, engine) def __next__(self) -> DataFrame: try: return self.get_chunk() except StopIteration: self.close() raise def _make_engine(self, f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO, engine: CSVEngine='c') -> ParserBase: mapping: dict[str, type[ParserBase]] = {'c': CParserWrapper, 'python': PythonParser, 'pyarrow': ArrowParserWrapper, 'python-fwf': FixedWidthFieldParser} if engine not in mapping: raise ValueError(f'Unknown engine: {engine} (valid options are {mapping.keys()})') if not isinstance(f, list): is_text = True mode = 'r' if engine == 'pyarrow': is_text = False mode = 'rb' elif engine == 'c' and self.options.get('encoding', 'utf-8') == 'utf-8' and isinstance(stringify_path(f), str): is_text = False if 'b' not in mode: mode += 'b' self.handles = get_handle(f, mode, encoding=self.options.get('encoding', None), compression=self.options.get('compression', None), memory_map=self.options.get('memory_map', False), is_text=is_text, errors=self.options.get('encoding_errors', 'strict'), storage_options=self.options.get('storage_options', None)) assert self.handles is not None f = self.handles.handle elif engine != 'python': msg = f'Invalid file path or buffer object type: {type(f)}' raise ValueError(msg) try: return mapping[engine](f, **self.options) except Exception: if self.handles is not None: self.handles.close() raise def _failover_to_python(self) -> None: raise AbstractMethodError(self) def read(self, nrows: int | None=None) -> DataFrame: if self.engine == 'pyarrow': try: df = self._engine.read() except Exception: self.close() raise else: nrows = validate_integer('nrows', nrows) try: (index, columns, col_dict) = self._engine.read(nrows) except Exception: self.close() raise if index is None: if col_dict: new_rows = len(next(iter(col_dict.values()))) index = RangeIndex(self._currow, self._currow + new_rows) else: new_rows = 0 else: new_rows = len(index) if hasattr(self, 'orig_options'): dtype_arg = self.orig_options.get('dtype', None) else: dtype_arg = None if isinstance(dtype_arg, dict): dtype = defaultdict(lambda : None) dtype.update(dtype_arg) elif dtype_arg is not None and pandas_dtype(dtype_arg) in (np.str_, np.object_): dtype = defaultdict(lambda : dtype_arg) else: dtype = None if dtype is not None: new_col_dict = {} for (k, v) in col_dict.items(): d = dtype[k] if pandas_dtype(dtype[k]) in (np.str_, np.object_) else None new_col_dict[k] = Series(v, index=index, dtype=d, copy=False) else: new_col_dict = col_dict df = DataFrame(new_col_dict, columns=columns, index=index, copy=False) self._currow += new_rows return df def get_chunk(self, size: int | None=None) -> DataFrame: if size is None: size = self.chunksize if self.nrows is not None: if self._currow >= self.nrows: raise StopIteration if size is None: size = self.nrows - self._currow else: size = min(size, self.nrows - self._currow) return self.read(nrows=size) def __enter__(self) -> Self: return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: self.close() def TextParser(*args, **kwds) -> TextFileReader: kwds['engine'] = 'python' return TextFileReader(*args, **kwds) def _clean_na_values(na_values, keep_default_na: bool=True, floatify: bool=True): na_fvalues: set | dict if na_values is None: if keep_default_na: na_values = STR_NA_VALUES else: na_values = set() na_fvalues = set() elif isinstance(na_values, dict): old_na_values = na_values.copy() na_values = {} for (k, v) in old_na_values.items(): if not is_list_like(v): v = [v] if keep_default_na: v = set(v) | STR_NA_VALUES na_values[k] = _stringify_na_values(v, floatify) na_fvalues = {k: _floatify_na_values(v) for (k, v) in na_values.items()} else: if not is_list_like(na_values): na_values = [na_values] na_values = _stringify_na_values(na_values, floatify) if keep_default_na: na_values = na_values | STR_NA_VALUES na_fvalues = _floatify_na_values(na_values) return (na_values, na_fvalues) def _floatify_na_values(na_values): result = set() for v in na_values: try: v = float(v) if not np.isnan(v): result.add(v) except (TypeError, ValueError, OverflowError): pass return result def _stringify_na_values(na_values, floatify: bool) -> set[str | float]: result: list[str | float] = [] for x in na_values: result.append(str(x)) result.append(x) try: v = float(x) if v == int(v): v = int(v) result.append(f'{v}.0') result.append(str(v)) if floatify: result.append(v) except (TypeError, ValueError, OverflowError): pass if floatify: try: result.append(int(x)) except (TypeError, ValueError, OverflowError): pass return set(result) def _refine_defaults_read(dialect: str | csv.Dialect | None, delimiter: str | None | lib.NoDefault, engine: CSVEngine | None, sep: str | None | lib.NoDefault, on_bad_lines: str | Callable, names: Sequence[Hashable] | None | lib.NoDefault, defaults: dict[str, Any], dtype_backend: DtypeBackend | lib.NoDefault): delim_default = defaults['delimiter'] kwds: dict[str, Any] = {} if dialect is not None: kwds['sep_override'] = delimiter is None and (sep is lib.no_default or sep == delim_default) if delimiter and sep is not lib.no_default: raise ValueError('Specified a sep and a delimiter; you can only specify one.') kwds['names'] = None if names is lib.no_default else names if delimiter is None: delimiter = sep if delimiter == '\n': raise ValueError('Specified \\n as separator or delimiter. This forces the python engine which does not accept a line terminator. Hence it is not allowed to use the line terminator as separator.') if delimiter is lib.no_default: kwds['delimiter'] = delim_default else: kwds['delimiter'] = delimiter if engine is not None: kwds['engine_specified'] = True else: kwds['engine'] = 'c' kwds['engine_specified'] = False if on_bad_lines == 'error': kwds['on_bad_lines'] = ParserBase.BadLineHandleMethod.ERROR elif on_bad_lines == 'warn': kwds['on_bad_lines'] = ParserBase.BadLineHandleMethod.WARN elif on_bad_lines == 'skip': kwds['on_bad_lines'] = ParserBase.BadLineHandleMethod.SKIP elif callable(on_bad_lines): if engine not in ['python', 'pyarrow']: raise ValueError("on_bad_line can only be a callable function if engine='python' or 'pyarrow'") kwds['on_bad_lines'] = on_bad_lines else: raise ValueError(f'Argument {on_bad_lines} is invalid for on_bad_lines') check_dtype_backend(dtype_backend) kwds['dtype_backend'] = dtype_backend return kwds def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None: if kwds.get('dialect') is None: return None dialect = kwds['dialect'] if dialect in csv.list_dialects(): dialect = csv.get_dialect(dialect) _validate_dialect(dialect) return dialect MANDATORY_DIALECT_ATTRS = ('delimiter', 'doublequote', 'escapechar', 'skipinitialspace', 'quotechar', 'quoting') def _validate_dialect(dialect: csv.Dialect) -> None: for param in MANDATORY_DIALECT_ATTRS: if not hasattr(dialect, param): raise ValueError(f'Invalid dialect {dialect} provided') def _merge_with_dialect_properties(dialect: csv.Dialect, defaults: dict[str, Any]) -> dict[str, Any]: kwds = defaults.copy() for param in MANDATORY_DIALECT_ATTRS: dialect_val = getattr(dialect, param) parser_default = parser_defaults[param] provided = kwds.get(param, parser_default) conflict_msgs = [] if provided not in (parser_default, dialect_val): msg = f"Conflicting values for '{param}': '{provided}' was provided, but the dialect specifies '{dialect_val}'. Using the dialect-specified value." if not (param == 'delimiter' and kwds.pop('sep_override', False)): conflict_msgs.append(msg) if conflict_msgs: warnings.warn('\n\n'.join(conflict_msgs), ParserWarning, stacklevel=find_stack_level()) kwds[param] = dialect_val return kwds def _validate_skipfooter(kwds: dict[str, Any]) -> None: if kwds.get('skipfooter'): if kwds.get('iterator') or kwds.get('chunksize'): raise ValueError("'skipfooter' not supported for iteration") if kwds.get('nrows'): raise ValueError("'skipfooter' not supported with 'nrows'") # File: pandas-main/pandas/io/pickle.py """""" from __future__ import annotations import pickle from typing import TYPE_CHECKING, Any import warnings from pandas.compat import pickle_compat from pandas.util._decorators import doc from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle if TYPE_CHECKING: from pandas._typing import CompressionOptions, FilePath, ReadPickleBuffer, StorageOptions, WriteBuffer from pandas import DataFrame, Series @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'filepath_or_buffer') def to_pickle(obj: Any, filepath_or_buffer: FilePath | WriteBuffer[bytes], compression: CompressionOptions='infer', protocol: int=pickle.HIGHEST_PROTOCOL, storage_options: StorageOptions | None=None) -> None: if protocol < 0: protocol = pickle.HIGHEST_PROTOCOL with get_handle(filepath_or_buffer, 'wb', compression=compression, is_text=False, storage_options=storage_options) as handles: pickle.dump(obj, handles.handle, protocol=protocol) @doc(storage_options=_shared_docs['storage_options'], decompression_options=_shared_docs['decompression_options'] % 'filepath_or_buffer') def read_pickle(filepath_or_buffer: FilePath | ReadPickleBuffer, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None) -> DataFrame | Series: excs_to_catch = (AttributeError, ImportError, ModuleNotFoundError, TypeError) with get_handle(filepath_or_buffer, 'rb', compression=compression, is_text=False, storage_options=storage_options) as handles: try: with warnings.catch_warnings(record=True): warnings.simplefilter('ignore', Warning) return pickle.load(handles.handle) except excs_to_catch: handles.handle.seek(0) return pickle_compat.Unpickler(handles.handle).load() # File: pandas-main/pandas/io/pytables.py """""" from __future__ import annotations from contextlib import suppress import copy from datetime import date, tzinfo import itertools import os import re from textwrap import dedent from typing import TYPE_CHECKING, Any, Final, Literal, cast, overload import warnings import numpy as np from pandas._config import config, get_option, using_string_dtype from pandas._libs import lib, writers as libwriters from pandas._libs.lib import is_string_array from pandas._libs.tslibs import timezones from pandas.compat._optional import import_optional_dependency from pandas.compat.pickle_compat import patch_pickle from pandas.errors import AttributeConflictWarning, ClosedFileError, IncompatibilityWarning, PerformanceWarning, PossibleDataLossError from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import ensure_object, is_bool_dtype, is_complex_dtype, is_list_like, is_string_dtype, needs_i8_conversion from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype, ExtensionDtype, PeriodDtype from pandas.core.dtypes.missing import array_equivalent from pandas import DataFrame, DatetimeIndex, Index, MultiIndex, PeriodIndex, RangeIndex, Series, StringDtype, TimedeltaIndex, concat, isna from pandas.core.arrays import Categorical, DatetimeArray, PeriodArray from pandas.core.arrays.datetimes import tz_to_dtype import pandas.core.common as com from pandas.core.computation.pytables import PyTablesExpr, maybe_expression from pandas.core.construction import extract_array from pandas.core.indexes.api import ensure_index from pandas.io.common import stringify_path from pandas.io.formats.printing import adjoin, pprint_thing if TYPE_CHECKING: from collections.abc import Callable, Hashable, Iterator, Sequence from types import TracebackType from tables import Col, File, Node from pandas._typing import AnyArrayLike, ArrayLike, AxisInt, DtypeArg, FilePath, Self, Shape, npt from pandas.core.internals.blocks import Block _version = '0.15.2' _default_encoding = 'UTF-8' def _ensure_encoding(encoding: str | None) -> str: if encoding is None: encoding = _default_encoding return encoding def _ensure_str(name): if isinstance(name, str): name = str(name) return name Term = PyTablesExpr def _ensure_term(where, scope_level: int): level = scope_level + 1 if isinstance(where, (list, tuple)): where = [Term(term, scope_level=level + 1) if maybe_expression(term) else term for term in where if term is not None] elif maybe_expression(where): where = Term(where, scope_level=level) return where if where is None or len(where) else None incompatibility_doc: Final = '\nwhere criteria is being ignored as this version [%s] is too old (or\nnot-defined), read the file in and write it out to a new file to upgrade (with\nthe copy_to method)\n' attribute_conflict_doc: Final = '\nthe [%s] attribute of the existing index is [%s] which conflicts with the new\n[%s], resetting the attribute to None\n' performance_doc: Final = '\nyour performance may suffer as PyTables will pickle object types that it cannot\nmap directly to c-types [inferred_type->%s,key->%s] [items->%s]\n' _FORMAT_MAP = {'f': 'fixed', 'fixed': 'fixed', 't': 'table', 'table': 'table'} _AXES_MAP = {DataFrame: [0]} dropna_doc: Final = '\n: boolean\n drop ALL nan rows when appending to a table\n' format_doc: Final = "\n: format\n default format writing format, if None, then\n put will default to 'fixed' and append will default to 'table'\n" with config.config_prefix('io.hdf'): config.register_option('dropna_table', False, dropna_doc, validator=config.is_bool) config.register_option('default_format', None, format_doc, validator=config.is_one_of_factory(['fixed', 'table', None])) _table_mod = None _table_file_open_policy_is_strict = False def _tables(): global _table_mod global _table_file_open_policy_is_strict if _table_mod is None: import tables _table_mod = tables with suppress(AttributeError): _table_file_open_policy_is_strict = tables.file._FILE_OPEN_POLICY == 'strict' return _table_mod def to_hdf(path_or_buf: FilePath | HDFStore, key: str, value: DataFrame | Series, mode: str='a', complevel: int | None=None, complib: str | None=None, append: bool=False, format: str | None=None, index: bool=True, min_itemsize: int | dict[str, int] | None=None, nan_rep=None, dropna: bool | None=None, data_columns: Literal[True] | list[str] | None=None, errors: str='strict', encoding: str='UTF-8') -> None: if append: f = lambda store: store.append(key, value, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, dropna=dropna, data_columns=data_columns, errors=errors, encoding=encoding) else: f = lambda store: store.put(key, value, format=format, index=index, min_itemsize=min_itemsize, nan_rep=nan_rep, data_columns=data_columns, errors=errors, encoding=encoding, dropna=dropna) if isinstance(path_or_buf, HDFStore): f(path_or_buf) else: path_or_buf = stringify_path(path_or_buf) with HDFStore(path_or_buf, mode=mode, complevel=complevel, complib=complib) as store: f(store) def read_hdf(path_or_buf: FilePath | HDFStore, key=None, mode: str='r', errors: str='strict', where: str | list | None=None, start: int | None=None, stop: int | None=None, columns: list[str] | None=None, iterator: bool=False, chunksize: int | None=None, **kwargs): if mode not in ['r', 'r+', 'a']: raise ValueError(f'mode {mode} is not allowed while performing a read. Allowed modes are r, r+ and a.') if where is not None: where = _ensure_term(where, scope_level=1) if isinstance(path_or_buf, HDFStore): if not path_or_buf.is_open: raise OSError('The HDFStore must be open for reading.') store = path_or_buf auto_close = False else: path_or_buf = stringify_path(path_or_buf) if not isinstance(path_or_buf, str): raise NotImplementedError('Support for generic buffers has not been implemented.') try: exists = os.path.exists(path_or_buf) except (TypeError, ValueError): exists = False if not exists: raise FileNotFoundError(f'File {path_or_buf} does not exist') store = HDFStore(path_or_buf, mode=mode, errors=errors, **kwargs) auto_close = True try: if key is None: groups = store.groups() if len(groups) == 0: raise ValueError('Dataset(s) incompatible with Pandas data types, not table, or no datasets found in HDF5 file.') candidate_only_group = groups[0] for group_to_check in groups[1:]: if not _is_metadata_of(group_to_check, candidate_only_group): raise ValueError('key must be provided when HDF5 file contains multiple datasets.') key = candidate_only_group._v_pathname return store.select(key, where=where, start=start, stop=stop, columns=columns, iterator=iterator, chunksize=chunksize, auto_close=auto_close) except (ValueError, TypeError, LookupError): if not isinstance(path_or_buf, HDFStore): with suppress(AttributeError): store.close() raise def _is_metadata_of(group: Node, parent_group: Node) -> bool: if group._v_depth <= parent_group._v_depth: return False current = group while current._v_depth > 1: parent = current._v_parent if parent == parent_group and current._v_name == 'meta': return True current = current._v_parent return False class HDFStore: _handle: File | None _mode: str def __init__(self, path, mode: str='a', complevel: int | None=None, complib=None, fletcher32: bool=False, **kwargs) -> None: if 'format' in kwargs: raise ValueError('format is not a defined argument for HDFStore') tables = import_optional_dependency('tables') if complib is not None and complib not in tables.filters.all_complibs: raise ValueError(f'complib only supports {tables.filters.all_complibs} compression.') if complib is None and complevel is not None: complib = tables.filters.default_complib self._path = stringify_path(path) if mode is None: mode = 'a' self._mode = mode self._handle = None self._complevel = complevel if complevel else 0 self._complib = complib self._fletcher32 = fletcher32 self._filters = None self.open(mode=mode, **kwargs) def __fspath__(self) -> str: return self._path @property def root(self): self._check_if_open() assert self._handle is not None return self._handle.root @property def filename(self) -> str: return self._path def __getitem__(self, key: str): return self.get(key) def __setitem__(self, key: str, value) -> None: self.put(key, value) def __delitem__(self, key: str) -> int | None: return self.remove(key) def __getattr__(self, name: str): try: return self.get(name) except (KeyError, ClosedFileError): pass raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") def __contains__(self, key: str) -> bool: node = self.get_node(key) if node is not None: name = node._v_pathname if key in (name, name[1:]): return True return False def __len__(self) -> int: return len(self.groups()) def __repr__(self) -> str: pstr = pprint_thing(self._path) return f'{type(self)}\nFile path: {pstr}\n' def __enter__(self) -> Self: return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: self.close() def keys(self, include: str='pandas') -> list[str]: if include == 'pandas': return [n._v_pathname for n in self.groups()] elif include == 'native': assert self._handle is not None return [n._v_pathname for n in self._handle.walk_nodes('/', classname='Table')] raise ValueError(f"`include` should be either 'pandas' or 'native' but is '{include}'") def __iter__(self) -> Iterator[str]: return iter(self.keys()) def items(self) -> Iterator[tuple[str, list]]: for g in self.groups(): yield (g._v_pathname, g) def open(self, mode: str='a', **kwargs) -> None: tables = _tables() if self._mode != mode: if self._mode in ['a', 'w'] and mode in ['r', 'r+']: pass elif mode in ['w']: if self.is_open: raise PossibleDataLossError(f'Re-opening the file [{self._path}] with mode [{self._mode}] will delete the current file!') self._mode = mode if self.is_open: self.close() if self._complevel and self._complevel > 0: self._filters = _tables().Filters(self._complevel, self._complib, fletcher32=self._fletcher32) if _table_file_open_policy_is_strict and self.is_open: msg = 'Cannot open HDF5 file, which is already opened, even in read-only mode.' raise ValueError(msg) self._handle = tables.open_file(self._path, self._mode, **kwargs) def close(self) -> None: if self._handle is not None: self._handle.close() self._handle = None @property def is_open(self) -> bool: if self._handle is None: return False return bool(self._handle.isopen) def flush(self, fsync: bool=False) -> None: if self._handle is not None: self._handle.flush() if fsync: with suppress(OSError): os.fsync(self._handle.fileno()) def get(self, key: str): with patch_pickle(): group = self.get_node(key) if group is None: raise KeyError(f'No object named {key} in the file') return self._read_group(group) def select(self, key: str, where=None, start=None, stop=None, columns=None, iterator: bool=False, chunksize: int | None=None, auto_close: bool=False): group = self.get_node(key) if group is None: raise KeyError(f'No object named {key} in the file') where = _ensure_term(where, scope_level=1) s = self._create_storer(group) s.infer_axes() def func(_start, _stop, _where): return s.read(start=_start, stop=_stop, where=_where, columns=columns) it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result() def select_as_coordinates(self, key: str, where=None, start: int | None=None, stop: int | None=None): where = _ensure_term(where, scope_level=1) tbl = self.get_storer(key) if not isinstance(tbl, Table): raise TypeError('can only read_coordinates with a table') return tbl.read_coordinates(where=where, start=start, stop=stop) def select_column(self, key: str, column: str, start: int | None=None, stop: int | None=None): tbl = self.get_storer(key) if not isinstance(tbl, Table): raise TypeError('can only read_column with a table') return tbl.read_column(column=column, start=start, stop=stop) def select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator: bool=False, chunksize: int | None=None, auto_close: bool=False): where = _ensure_term(where, scope_level=1) if isinstance(keys, (list, tuple)) and len(keys) == 1: keys = keys[0] if isinstance(keys, str): return self.select(key=keys, where=where, columns=columns, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) if not isinstance(keys, (list, tuple)): raise TypeError('keys must be a list/tuple') if not len(keys): raise ValueError('keys must have a non-zero length') if selector is None: selector = keys[0] tbls = [self.get_storer(k) for k in keys] s = self.get_storer(selector) nrows = None for (t, k) in itertools.chain([(s, selector)], zip(tbls, keys)): if t is None: raise KeyError(f'Invalid table [{k}]') if not t.is_table: raise TypeError(f'object [{t.pathname}] is not a table, and cannot be used in all select as multiple') if nrows is None: nrows = t.nrows elif t.nrows != nrows: raise ValueError('all tables must have exactly the same nrows!') _tbls = [x for x in tbls if isinstance(x, Table)] axis = {t.non_index_axes[0][0] for t in _tbls}.pop() def func(_start, _stop, _where): objs = [t.read(where=_where, columns=columns, start=_start, stop=_stop) for t in tbls] return concat(objs, axis=axis, verify_integrity=False)._consolidate() it = TableIterator(self, s, func, where=where, nrows=nrows, start=start, stop=stop, iterator=iterator, chunksize=chunksize, auto_close=auto_close) return it.get_result(coordinates=True) def put(self, key: str, value: DataFrame | Series, format=None, index: bool=True, append: bool=False, complib=None, complevel: int | None=None, min_itemsize: int | dict[str, int] | None=None, nan_rep=None, data_columns: Literal[True] | list[str] | None=None, encoding=None, errors: str='strict', track_times: bool=True, dropna: bool=False) -> None: if format is None: format = get_option('io.hdf.default_format') or 'fixed' format = self._validate_format(format) self._write_to_group(key, value, format=format, index=index, append=append, complib=complib, complevel=complevel, min_itemsize=min_itemsize, nan_rep=nan_rep, data_columns=data_columns, encoding=encoding, errors=errors, track_times=track_times, dropna=dropna) def remove(self, key: str, where=None, start=None, stop=None) -> int | None: where = _ensure_term(where, scope_level=1) try: s = self.get_storer(key) except KeyError: raise except AssertionError: raise except Exception as err: if where is not None: raise ValueError('trying to remove a node with a non-None where clause!') from err node = self.get_node(key) if node is not None: node._f_remove(recursive=True) return None if com.all_none(where, start, stop): s.group._f_remove(recursive=True) return None if not s.is_table: raise ValueError('can only remove with where on objects written as tables') return s.delete(where=where, start=start, stop=stop) def append(self, key: str, value: DataFrame | Series, format=None, axes=None, index: bool | list[str]=True, append: bool=True, complib=None, complevel: int | None=None, columns=None, min_itemsize: int | dict[str, int] | None=None, nan_rep=None, chunksize: int | None=None, expectedrows=None, dropna: bool | None=None, data_columns: Literal[True] | list[str] | None=None, encoding=None, errors: str='strict') -> None: if columns is not None: raise TypeError('columns is not a supported keyword in append, try data_columns') if dropna is None: dropna = get_option('io.hdf.dropna_table') if format is None: format = get_option('io.hdf.default_format') or 'table' format = self._validate_format(format) self._write_to_group(key, value, format=format, axes=axes, index=index, append=append, complib=complib, complevel=complevel, min_itemsize=min_itemsize, nan_rep=nan_rep, chunksize=chunksize, expectedrows=expectedrows, dropna=dropna, data_columns=data_columns, encoding=encoding, errors=errors) def append_to_multiple(self, d: dict, value, selector, data_columns=None, axes=None, dropna: bool=False, **kwargs) -> None: if axes is not None: raise TypeError('axes is currently not accepted as a parameter to append_to_multiple; you can create the tables independently instead') if not isinstance(d, dict): raise ValueError('append_to_multiple must have a dictionary specified as the way to split the value') if selector not in d: raise ValueError('append_to_multiple requires a selector that is in passed dict') axis = next(iter(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))) remain_key = None remain_values: list = [] for (k, v) in d.items(): if v is None: if remain_key is not None: raise ValueError('append_to_multiple can only have one value in d that is None') remain_key = k else: remain_values.extend(v) if remain_key is not None: ordered = value.axes[axis] ordd = ordered.difference(Index(remain_values)) ordd = sorted(ordered.get_indexer(ordd)) d[remain_key] = ordered.take(ordd) if data_columns is None: data_columns = d[selector] if dropna: idxs = (value[cols].dropna(how='all').index for cols in d.values()) valid_index = next(idxs) for index in idxs: valid_index = valid_index.intersection(index) value = value.loc[valid_index] min_itemsize = kwargs.pop('min_itemsize', None) for (k, v) in d.items(): dc = data_columns if k == selector else None val = value.reindex(v, axis=axis) filtered = {key: value for (key, value) in min_itemsize.items() if key in v} if min_itemsize is not None else None self.append(k, val, data_columns=dc, min_itemsize=filtered, **kwargs) def create_table_index(self, key: str, columns=None, optlevel: int | None=None, kind: str | None=None) -> None: _tables() s = self.get_storer(key) if s is None: return if not isinstance(s, Table): raise TypeError('cannot create table index on a Fixed format store') s.create_index(columns=columns, optlevel=optlevel, kind=kind) def groups(self) -> list: _tables() self._check_if_open() assert self._handle is not None assert _table_mod is not None return [g for g in self._handle.walk_groups() if not isinstance(g, _table_mod.link.Link) and (getattr(g._v_attrs, 'pandas_type', None) or getattr(g, 'table', None) or (isinstance(g, _table_mod.table.Table) and g._v_name != 'table'))] def walk(self, where: str='/') -> Iterator[tuple[str, list[str], list[str]]]: _tables() self._check_if_open() assert self._handle is not None assert _table_mod is not None for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves) def get_node(self, key: str) -> Node | None: self._check_if_open() if not key.startswith('/'): key = '/' + key assert self._handle is not None assert _table_mod is not None try: node = self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None assert isinstance(node, _table_mod.Node), type(node) return node def get_storer(self, key: str) -> GenericFixed | Table: group = self.get_node(key) if group is None: raise KeyError(f'No object named {key} in the file') s = self._create_storer(group) s.infer_axes() return s def copy(self, file, mode: str='w', propindexes: bool=True, keys=None, complib=None, complevel: int | None=None, fletcher32: bool=False, overwrite: bool=True) -> HDFStore: new_store = HDFStore(file, mode=mode, complib=complib, complevel=complevel, fletcher32=fletcher32) if keys is None: keys = list(self.keys()) if not isinstance(keys, (tuple, list)): keys = [keys] for k in keys: s = self.get_storer(k) if s is not None: if k in new_store: if overwrite: new_store.remove(k) data = self.select(k) if isinstance(s, Table): index: bool | list[str] = False if propindexes: index = [a.name for a in s.axes if a.is_indexed] new_store.append(k, data, index=index, data_columns=getattr(s, 'data_columns', None), encoding=s.encoding) else: new_store.put(k, data, encoding=s.encoding) return new_store def info(self) -> str: path = pprint_thing(self._path) output = f'{type(self)}\nFile path: {path}\n' if self.is_open: lkeys = sorted(self.keys()) if len(lkeys): keys = [] values = [] for k in lkeys: try: s = self.get_storer(k) if s is not None: keys.append(pprint_thing(s.pathname or k)) values.append(pprint_thing(s or 'invalid_HDFStore node')) except AssertionError: raise except Exception as detail: keys.append(k) dstr = pprint_thing(detail) values.append(f'[invalid_HDFStore node: {dstr}]') output += adjoin(12, keys, values) else: output += 'Empty' else: output += 'File is CLOSED' return output def _check_if_open(self) -> None: if not self.is_open: raise ClosedFileError(f'{self._path} file is not open!') def _validate_format(self, format: str) -> str: try: format = _FORMAT_MAP[format.lower()] except KeyError as err: raise TypeError(f'invalid HDFStore format specified [{format}]') from err return format def _create_storer(self, group, format=None, value: DataFrame | Series | None=None, encoding: str='UTF-8', errors: str='strict') -> GenericFixed | Table: cls: type[GenericFixed | Table] if value is not None and (not isinstance(value, (Series, DataFrame))): raise TypeError('value must be None, Series, or DataFrame') pt = getattr(group._v_attrs, 'pandas_type', None) tt = getattr(group._v_attrs, 'table_type', None) if pt is None: if value is None: _tables() assert _table_mod is not None if getattr(group, 'table', None) or isinstance(group, _table_mod.table.Table): pt = 'frame_table' tt = 'generic_table' else: raise TypeError('cannot create a storer if the object is not existing nor a value are passed') else: if isinstance(value, Series): pt = 'series' else: pt = 'frame' if format == 'table': pt += '_table' if 'table' not in pt: _STORER_MAP = {'series': SeriesFixed, 'frame': FrameFixed} try: cls = _STORER_MAP[pt] except KeyError as err: raise TypeError(f'cannot properly create the storer for: [_STORER_MAP] [group->{group},value->{type(value)},format->{format}') from err return cls(self, group, encoding=encoding, errors=errors) if tt is None: if value is not None: if pt == 'series_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_series' elif index.nlevels > 1: tt = 'appendable_multiseries' elif pt == 'frame_table': index = getattr(value, 'index', None) if index is not None: if index.nlevels == 1: tt = 'appendable_frame' elif index.nlevels > 1: tt = 'appendable_multiframe' _TABLE_MAP = {'generic_table': GenericTable, 'appendable_series': AppendableSeriesTable, 'appendable_multiseries': AppendableMultiSeriesTable, 'appendable_frame': AppendableFrameTable, 'appendable_multiframe': AppendableMultiFrameTable, 'worm': WORMTable} try: cls = _TABLE_MAP[tt] except KeyError as err: raise TypeError(f'cannot properly create the storer for: [_TABLE_MAP] [group->{group},value->{type(value)},format->{format}') from err return cls(self, group, encoding=encoding, errors=errors) def _write_to_group(self, key: str, value: DataFrame | Series, format, axes=None, index: bool | list[str]=True, append: bool=False, complib=None, complevel: int | None=None, fletcher32=None, min_itemsize: int | dict[str, int] | None=None, chunksize: int | None=None, expectedrows=None, dropna: bool=False, nan_rep=None, data_columns=None, encoding=None, errors: str='strict', track_times: bool=True) -> None: if getattr(value, 'empty', None) and (format == 'table' or append): return group = self._identify_group(key, append) s = self._create_storer(group, format, value, encoding=encoding, errors=errors) if append: if not s.is_table or (s.is_table and format == 'fixed' and s.is_exists): raise ValueError('Can only append to Tables') if not s.is_exists: s.set_object_info() else: s.set_object_info() if not s.is_table and complib: raise ValueError('Compression not supported on Fixed format stores') s.write(obj=value, axes=axes, append=append, complib=complib, complevel=complevel, fletcher32=fletcher32, min_itemsize=min_itemsize, chunksize=chunksize, expectedrows=expectedrows, dropna=dropna, nan_rep=nan_rep, data_columns=data_columns, track_times=track_times) if isinstance(s, Table) and index: s.create_index(columns=index) def _read_group(self, group: Node): s = self._create_storer(group) s.infer_axes() return s.read() def _identify_group(self, key: str, append: bool) -> Node: group = self.get_node(key) assert self._handle is not None if group is not None and (not append): self._handle.remove_node(group, recursive=True) group = None if group is None: group = self._create_nodes_and_group(key) return group def _create_nodes_and_group(self, key: str) -> Node: assert self._handle is not None paths = key.split('/') path = '/' for p in paths: if not len(p): continue new_path = path if not path.endswith('/'): new_path += '/' new_path += p group = self.get_node(new_path) if group is None: group = self._handle.create_group(path, p) path = new_path return group class TableIterator: chunksize: int | None store: HDFStore s: GenericFixed | Table def __init__(self, store: HDFStore, s: GenericFixed | Table, func, where, nrows, start=None, stop=None, iterator: bool=False, chunksize: int | None=None, auto_close: bool=False) -> None: self.store = store self.s = s self.func = func self.where = where if self.s.is_table: if nrows is None: nrows = 0 if start is None: start = 0 if stop is None: stop = nrows stop = min(nrows, stop) self.nrows = nrows self.start = start self.stop = stop self.coordinates = None if iterator or chunksize is not None: if chunksize is None: chunksize = 100000 self.chunksize = int(chunksize) else: self.chunksize = None self.auto_close = auto_close def __iter__(self) -> Iterator: current = self.start if self.coordinates is None: raise ValueError('Cannot iterate until get_result is called.') while current < self.stop: stop = min(current + self.chunksize, self.stop) value = self.func(None, None, self.coordinates[current:stop]) current = stop if value is None or not len(value): continue yield value self.close() def close(self) -> None: if self.auto_close: self.store.close() def get_result(self, coordinates: bool=False): if self.chunksize is not None: if not isinstance(self.s, Table): raise TypeError('can only use an iterator or chunksize on a table') self.coordinates = self.s.read_coordinates(where=self.where) return self if coordinates: if not isinstance(self.s, Table): raise TypeError('can only read_coordinates on a table') where = self.s.read_coordinates(where=self.where, start=self.start, stop=self.stop) else: where = self.where results = self.func(self.start, self.stop, where) self.close() return results class IndexCol: is_an_indexable: bool = True is_data_indexable: bool = True _info_fields = ['freq', 'tz', 'index_name'] def __init__(self, name: str, values=None, kind=None, typ=None, cname: str | None=None, axis=None, pos=None, freq=None, tz=None, index_name=None, ordered=None, table=None, meta=None, metadata=None) -> None: if not isinstance(name, str): raise ValueError('`name` must be a str.') self.values = values self.kind = kind self.typ = typ self.name = name self.cname = cname or name self.axis = axis self.pos = pos self.freq = freq self.tz = tz self.index_name = index_name self.ordered = ordered self.table = table self.meta = meta self.metadata = metadata if pos is not None: self.set_pos(pos) assert isinstance(self.name, str) assert isinstance(self.cname, str) @property def itemsize(self) -> int: return self.typ.itemsize @property def kind_attr(self) -> str: return f'{self.name}_kind' def set_pos(self, pos: int) -> None: self.pos = pos if pos is not None and self.typ is not None: self.typ._v_pos = pos def __repr__(self) -> str: temp = tuple(map(pprint_thing, (self.name, self.cname, self.axis, self.pos, self.kind))) return ','.join([f'{key}->{value}' for (key, value) in zip(['name', 'cname', 'axis', 'pos', 'kind'], temp)]) def __eq__(self, other: object) -> bool: return all((getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'axis', 'pos'])) def __ne__(self, other) -> bool: return not self.__eq__(other) @property def is_indexed(self) -> bool: if not hasattr(self.table, 'cols'): return False return getattr(self.table.cols, self.cname).is_indexed def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str) -> tuple[np.ndarray, np.ndarray] | tuple[Index, Index]: assert isinstance(values, np.ndarray), type(values) if values.dtype.fields is not None: values = values[self.cname].copy() val_kind = self.kind values = _maybe_convert(values, val_kind, encoding, errors) kwargs = {} kwargs['name'] = self.index_name if self.freq is not None: kwargs['freq'] = self.freq factory: type[Index | DatetimeIndex] = Index if lib.is_np_dtype(values.dtype, 'M') or isinstance(values.dtype, DatetimeTZDtype): factory = DatetimeIndex elif values.dtype == 'i8' and 'freq' in kwargs: factory = lambda x, **kwds: PeriodIndex.from_ordinals(x, freq=kwds.get('freq', None))._rename(kwds['name']) try: new_pd_index = factory(values, **kwargs) except ValueError: if 'freq' in kwargs: kwargs['freq'] = None new_pd_index = factory(values, **kwargs) final_pd_index: Index if self.tz is not None and isinstance(new_pd_index, DatetimeIndex): final_pd_index = new_pd_index.tz_localize('UTC').tz_convert(self.tz) else: final_pd_index = new_pd_index return (final_pd_index, final_pd_index) def take_data(self): return self.values @property def attrs(self): return self.table._v_attrs @property def description(self): return self.table.description @property def col(self): return getattr(self.description, self.cname, None) @property def cvalues(self): return self.values def __iter__(self) -> Iterator: return iter(self.values) def maybe_set_size(self, min_itemsize=None) -> None: if self.kind == 'string': if isinstance(min_itemsize, dict): min_itemsize = min_itemsize.get(self.name) if min_itemsize is not None and self.typ.itemsize < min_itemsize: self.typ = _tables().StringCol(itemsize=min_itemsize, pos=self.pos) def validate_names(self) -> None: pass def validate_and_set(self, handler: AppendableTable, append: bool) -> None: self.table = handler.table self.validate_col() self.validate_attr(append) self.validate_metadata(handler) self.write_metadata(handler) self.set_attr() def validate_col(self, itemsize=None): if self.kind == 'string': c = self.col if c is not None: if itemsize is None: itemsize = self.itemsize if c.itemsize < itemsize: raise ValueError(f'Trying to store a string with len [{itemsize}] in [{self.cname}] column but\nthis column has a limit of [{c.itemsize}]!\nConsider using min_itemsize to preset the sizes on these columns') return c.itemsize return None def validate_attr(self, append: bool) -> None: if append: existing_kind = getattr(self.attrs, self.kind_attr, None) if existing_kind is not None and existing_kind != self.kind: raise TypeError(f'incompatible kind in col [{existing_kind} - {self.kind}]') def update_info(self, info) -> None: for key in self._info_fields: value = getattr(self, key, None) idx = info.setdefault(self.name, {}) existing_value = idx.get(key) if key in idx and value is not None and (existing_value != value): if key in ['freq', 'index_name']: ws = attribute_conflict_doc % (key, existing_value, value) warnings.warn(ws, AttributeConflictWarning, stacklevel=find_stack_level()) idx[key] = None setattr(self, key, None) else: raise ValueError(f'invalid info for [{self.name}] for [{key}], existing_value [{existing_value}] conflicts with new value [{value}]') elif value is not None or existing_value is not None: idx[key] = value def set_info(self, info) -> None: idx = info.get(self.name) if idx is not None: self.__dict__.update(idx) def set_attr(self) -> None: setattr(self.attrs, self.kind_attr, self.kind) def validate_metadata(self, handler: AppendableTable) -> None: if self.meta == 'category': new_metadata = self.metadata cur_metadata = handler.read_metadata(self.cname) if new_metadata is not None and cur_metadata is not None and (not array_equivalent(new_metadata, cur_metadata, strict_nan=True, dtype_equal=True)): raise ValueError('cannot append a categorical with different categories to the existing') def write_metadata(self, handler: AppendableTable) -> None: if self.metadata is not None: handler.write_metadata(self.cname, self.metadata) class GenericIndexCol(IndexCol): @property def is_indexed(self) -> bool: return False def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str) -> tuple[Index, Index]: assert isinstance(values, np.ndarray), type(values) index = RangeIndex(len(values)) return (index, index) def set_attr(self) -> None: pass class DataCol(IndexCol): is_an_indexable = False is_data_indexable = False _info_fields = ['tz', 'ordered'] def __init__(self, name: str, values=None, kind=None, typ=None, cname: str | None=None, pos=None, tz=None, ordered=None, table=None, meta=None, metadata=None, dtype: DtypeArg | None=None, data=None) -> None: super().__init__(name=name, values=values, kind=kind, typ=typ, pos=pos, cname=cname, tz=tz, ordered=ordered, table=table, meta=meta, metadata=metadata) self.dtype = dtype self.data = data @property def dtype_attr(self) -> str: return f'{self.name}_dtype' @property def meta_attr(self) -> str: return f'{self.name}_meta' def __repr__(self) -> str: temp = tuple(map(pprint_thing, (self.name, self.cname, self.dtype, self.kind, self.shape))) return ','.join([f'{key}->{value}' for (key, value) in zip(['name', 'cname', 'dtype', 'kind', 'shape'], temp)]) def __eq__(self, other: object) -> bool: return all((getattr(self, a, None) == getattr(other, a, None) for a in ['name', 'cname', 'dtype', 'pos'])) def set_data(self, data: ArrayLike) -> None: assert data is not None assert self.dtype is None (data, dtype_name) = _get_data_and_dtype_name(data) self.data = data self.dtype = dtype_name self.kind = _dtype_to_kind(dtype_name) def take_data(self): return self.data @classmethod def _get_atom(cls, values: ArrayLike) -> Col: dtype = values.dtype itemsize = dtype.itemsize shape = values.shape if values.ndim == 1: shape = (1, values.size) if isinstance(values, Categorical): codes = values.codes atom = cls.get_atom_data(shape, kind=codes.dtype.name) elif lib.is_np_dtype(dtype, 'M') or isinstance(dtype, DatetimeTZDtype): atom = cls.get_atom_datetime64(shape) elif lib.is_np_dtype(dtype, 'm'): atom = cls.get_atom_timedelta64(shape) elif is_complex_dtype(dtype): atom = _tables().ComplexCol(itemsize=itemsize, shape=shape[0]) elif is_string_dtype(dtype): atom = cls.get_atom_string(shape, itemsize) else: atom = cls.get_atom_data(shape, kind=dtype.name) return atom @classmethod def get_atom_string(cls, shape, itemsize): return _tables().StringCol(itemsize=itemsize, shape=shape[0]) @classmethod def get_atom_coltype(cls, kind: str) -> type[Col]: if kind.startswith('uint'): k4 = kind[4:] col_name = f'UInt{k4}Col' elif kind.startswith('period'): col_name = 'Int64Col' else: kcap = kind.capitalize() col_name = f'{kcap}Col' return getattr(_tables(), col_name) @classmethod def get_atom_data(cls, shape, kind: str) -> Col: return cls.get_atom_coltype(kind=kind)(shape=shape[0]) @classmethod def get_atom_datetime64(cls, shape): return _tables().Int64Col(shape=shape[0]) @classmethod def get_atom_timedelta64(cls, shape): return _tables().Int64Col(shape=shape[0]) @property def shape(self): return getattr(self.data, 'shape', None) @property def cvalues(self): return self.data def validate_attr(self, append) -> None: if append: existing_fields = getattr(self.attrs, self.kind_attr, None) if existing_fields is not None and existing_fields != list(self.values): raise ValueError('appended items do not match existing items in table!') existing_dtype = getattr(self.attrs, self.dtype_attr, None) if existing_dtype is not None and existing_dtype != self.dtype: raise ValueError('appended items dtype do not match existing items dtype in table!') def convert(self, values: np.ndarray, nan_rep, encoding: str, errors: str): assert isinstance(values, np.ndarray), type(values) if values.dtype.fields is not None: values = values[self.cname] assert self.typ is not None if self.dtype is None: (converted, dtype_name) = _get_data_and_dtype_name(values) kind = _dtype_to_kind(dtype_name) else: converted = values dtype_name = self.dtype kind = self.kind assert isinstance(converted, np.ndarray) meta = self.meta metadata = self.metadata ordered = self.ordered tz = self.tz assert dtype_name is not None dtype = dtype_name if dtype.startswith('datetime64'): converted = _set_tz(converted, tz, dtype) elif dtype == 'timedelta64': converted = np.asarray(converted, dtype='m8[ns]') elif dtype == 'date': try: converted = np.asarray([date.fromordinal(v) for v in converted], dtype=object) except ValueError: converted = np.asarray([date.fromtimestamp(v) for v in converted], dtype=object) elif meta == 'category': categories = metadata codes = converted.ravel() if categories is None: categories = Index([], dtype=np.float64) else: mask = isna(categories) if mask.any(): categories = categories[~mask] codes[codes != -1] -= mask.astype(int).cumsum()._values converted = Categorical.from_codes(codes, categories=categories, ordered=ordered, validate=False) else: try: converted = converted.astype(dtype, copy=False) except TypeError: converted = converted.astype('O', copy=False) if kind == 'string': converted = _unconvert_string_array(converted, nan_rep=nan_rep, encoding=encoding, errors=errors) return (self.values, converted) def set_attr(self) -> None: setattr(self.attrs, self.kind_attr, self.values) setattr(self.attrs, self.meta_attr, self.meta) assert self.dtype is not None setattr(self.attrs, self.dtype_attr, self.dtype) class DataIndexableCol(DataCol): is_data_indexable = True def validate_names(self) -> None: if not is_string_dtype(Index(self.values).dtype): raise ValueError('cannot have non-object label DataIndexableCol') @classmethod def get_atom_string(cls, shape, itemsize): return _tables().StringCol(itemsize=itemsize) @classmethod def get_atom_data(cls, shape, kind: str) -> Col: return cls.get_atom_coltype(kind=kind)() @classmethod def get_atom_datetime64(cls, shape): return _tables().Int64Col() @classmethod def get_atom_timedelta64(cls, shape): return _tables().Int64Col() class GenericDataIndexableCol(DataIndexableCol): class Fixed: pandas_kind: str format_type: str = 'fixed' obj_type: type[DataFrame | Series] ndim: int parent: HDFStore is_table: bool = False def __init__(self, parent: HDFStore, group: Node, encoding: str | None='UTF-8', errors: str='strict') -> None: assert isinstance(parent, HDFStore), type(parent) assert _table_mod is not None assert isinstance(group, _table_mod.Node), type(group) self.parent = parent self.group = group self.encoding = _ensure_encoding(encoding) self.errors = errors @property def is_old_version(self) -> bool: return self.version[0] <= 0 and self.version[1] <= 10 and (self.version[2] < 1) @property def version(self) -> tuple[int, int, int]: version = getattr(self.group._v_attrs, 'pandas_version', None) if isinstance(version, str): version_tup = tuple((int(x) for x in version.split('.'))) if len(version_tup) == 2: version_tup = version_tup + (0,) assert len(version_tup) == 3 return version_tup else: return (0, 0, 0) @property def pandas_type(self): return getattr(self.group._v_attrs, 'pandas_type', None) def __repr__(self) -> str: self.infer_axes() s = self.shape if s is not None: if isinstance(s, (list, tuple)): jshape = ','.join([pprint_thing(x) for x in s]) s = f'[{jshape}]' return f'{self.pandas_type:12.12} (shape->{s})' return self.pandas_type def set_object_info(self) -> None: self.attrs.pandas_type = str(self.pandas_kind) self.attrs.pandas_version = str(_version) def copy(self) -> Fixed: new_self = copy.copy(self) return new_self @property def shape(self): return self.nrows @property def pathname(self): return self.group._v_pathname @property def _handle(self): return self.parent._handle @property def _filters(self): return self.parent._filters @property def _complevel(self) -> int: return self.parent._complevel @property def _fletcher32(self) -> bool: return self.parent._fletcher32 @property def attrs(self): return self.group._v_attrs def set_attrs(self) -> None: def get_attrs(self) -> None: @property def storable(self): return self.group @property def is_exists(self) -> bool: return False @property def nrows(self): return getattr(self.storable, 'nrows', None) def validate(self, other) -> Literal[True] | None: if other is None: return None return True def validate_version(self, where=None) -> None: def infer_axes(self) -> bool: s = self.storable if s is None: return False self.get_attrs() return True def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None) -> Series | DataFrame: raise NotImplementedError('cannot read on an abstract storer: subclasses should implement') def write(self, obj, **kwargs) -> None: raise NotImplementedError('cannot write on an abstract storer: subclasses should implement') def delete(self, where=None, start: int | None=None, stop: int | None=None) -> int | None: if com.all_none(where, start, stop): self._handle.remove_node(self.group, recursive=True) return None raise TypeError('cannot delete on an abstract storer') class GenericFixed(Fixed): _index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'} _reverse_index_map = {v: k for (k, v) in _index_type_map.items()} attributes: list[str] = [] def _class_to_alias(self, cls) -> str: return self._index_type_map.get(cls, '') def _alias_to_class(self, alias): if isinstance(alias, type): return alias return self._reverse_index_map.get(alias, Index) def _get_index_factory(self, attrs): index_class = self._alias_to_class(getattr(attrs, 'index_class', '')) factory: Callable if index_class == DatetimeIndex: def f(values, freq=None, tz=None): dta = DatetimeArray._simple_new(values.values, dtype=values.dtype, freq=freq) result = DatetimeIndex._simple_new(dta, name=None) if tz is not None: result = result.tz_localize('UTC').tz_convert(tz) return result factory = f elif index_class == PeriodIndex: def f(values, freq=None, tz=None): dtype = PeriodDtype(freq) parr = PeriodArray._simple_new(values, dtype=dtype) return PeriodIndex._simple_new(parr, name=None) factory = f else: factory = index_class kwargs = {} if 'freq' in attrs: kwargs['freq'] = attrs['freq'] if index_class is Index: factory = TimedeltaIndex if 'tz' in attrs: kwargs['tz'] = attrs['tz'] assert index_class is DatetimeIndex return (factory, kwargs) def validate_read(self, columns, where) -> None: if columns is not None: raise TypeError('cannot pass a column specification when reading a Fixed format store. this store must be selected in its entirety') if where is not None: raise TypeError('cannot pass a where specification when reading from a Fixed format store. this store must be selected in its entirety') @property def is_exists(self) -> bool: return True def set_attrs(self) -> None: self.attrs.encoding = self.encoding self.attrs.errors = self.errors def get_attrs(self) -> None: self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None)) self.errors = getattr(self.attrs, 'errors', 'strict') for n in self.attributes: setattr(self, n, getattr(self.attrs, n, None)) def write(self, obj, **kwargs) -> None: self.set_attrs() def read_array(self, key: str, start: int | None=None, stop: int | None=None): import tables node = getattr(self.group, key) attrs = node._v_attrs transposed = getattr(attrs, 'transposed', False) if isinstance(node, tables.VLArray): ret = node[0][start:stop] else: dtype = getattr(attrs, 'value_type', None) shape = getattr(attrs, 'shape', None) if shape is not None: ret = np.empty(shape, dtype=dtype) else: ret = node[start:stop] if dtype and dtype.startswith('datetime64'): tz = getattr(attrs, 'tz', None) ret = _set_tz(ret, tz, dtype) elif dtype == 'timedelta64': ret = np.asarray(ret, dtype='m8[ns]') if transposed: return ret.T else: return ret def read_index(self, key: str, start: int | None=None, stop: int | None=None) -> Index: variety = getattr(self.attrs, f'{key}_variety') if variety == 'multi': return self.read_multi_index(key, start=start, stop=stop) elif variety == 'regular': node = getattr(self.group, key) index = self.read_index_node(node, start=start, stop=stop) return index else: raise TypeError(f'unrecognized index variety: {variety}') def write_index(self, key: str, index: Index) -> None: if isinstance(index, MultiIndex): setattr(self.attrs, f'{key}_variety', 'multi') self.write_multi_index(key, index) else: setattr(self.attrs, f'{key}_variety', 'regular') converted = _convert_index('index', index, self.encoding, self.errors) self.write_array(key, converted.values) node = getattr(self.group, key) node._v_attrs.kind = converted.kind node._v_attrs.name = index.name if isinstance(index, (DatetimeIndex, PeriodIndex)): node._v_attrs.index_class = self._class_to_alias(type(index)) if isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)): node._v_attrs.freq = index.freq if isinstance(index, DatetimeIndex) and index.tz is not None: node._v_attrs.tz = _get_tz(index.tz) def write_multi_index(self, key: str, index: MultiIndex) -> None: setattr(self.attrs, f'{key}_nlevels', index.nlevels) for (i, (lev, level_codes, name)) in enumerate(zip(index.levels, index.codes, index.names)): if isinstance(lev.dtype, ExtensionDtype): raise NotImplementedError('Saving a MultiIndex with an extension dtype is not supported.') level_key = f'{key}_level{i}' conv_level = _convert_index(level_key, lev, self.encoding, self.errors) self.write_array(level_key, conv_level.values) node = getattr(self.group, level_key) node._v_attrs.kind = conv_level.kind node._v_attrs.name = name setattr(node._v_attrs, f'{key}_name{name}', name) label_key = f'{key}_label{i}' self.write_array(label_key, level_codes) def read_multi_index(self, key: str, start: int | None=None, stop: int | None=None) -> MultiIndex: nlevels = getattr(self.attrs, f'{key}_nlevels') levels = [] codes = [] names: list[Hashable] = [] for i in range(nlevels): level_key = f'{key}_level{i}' node = getattr(self.group, level_key) lev = self.read_index_node(node, start=start, stop=stop) levels.append(lev) names.append(lev.name) label_key = f'{key}_label{i}' level_codes = self.read_array(label_key, start=start, stop=stop) codes.append(level_codes) return MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=True) def read_index_node(self, node: Node, start: int | None=None, stop: int | None=None) -> Index: data = node[start:stop] if 'shape' in node._v_attrs and np.prod(node._v_attrs.shape) == 0: data = np.empty(node._v_attrs.shape, dtype=node._v_attrs.value_type) kind = node._v_attrs.kind name = None if 'name' in node._v_attrs: name = _ensure_str(node._v_attrs.name) attrs = node._v_attrs (factory, kwargs) = self._get_index_factory(attrs) if kind in ('date', 'object'): index = factory(_unconvert_index(data, kind, encoding=self.encoding, errors=self.errors), dtype=object, **kwargs) else: index = factory(_unconvert_index(data, kind, encoding=self.encoding, errors=self.errors), **kwargs) index.name = name return index def write_array_empty(self, key: str, value: ArrayLike) -> None: arr = np.empty((1,) * value.ndim) self._handle.create_array(self.group, key, arr) node = getattr(self.group, key) node._v_attrs.value_type = str(value.dtype) node._v_attrs.shape = value.shape def write_array(self, key: str, obj: AnyArrayLike, items: Index | None=None) -> None: value = extract_array(obj, extract_numpy=True) if key in self.group: self._handle.remove_node(self.group, key) empty_array = value.size == 0 transposed = False if isinstance(value.dtype, CategoricalDtype): raise NotImplementedError('Cannot store a category dtype in a HDF5 dataset that uses format="fixed". Use format="table".') if not empty_array: if hasattr(value, 'T'): value = value.T transposed = True atom = None if self._filters is not None: with suppress(ValueError): atom = _tables().Atom.from_dtype(value.dtype) if atom is not None: if not empty_array: ca = self._handle.create_carray(self.group, key, atom, value.shape, filters=self._filters) ca[:] = value else: self.write_array_empty(key, value) elif value.dtype.type == np.object_: inferred_type = lib.infer_dtype(value, skipna=False) if empty_array: pass elif inferred_type == 'string': pass elif get_option('performance_warnings'): ws = performance_doc % (inferred_type, key, items) warnings.warn(ws, PerformanceWarning, stacklevel=find_stack_level()) vlarr = self._handle.create_vlarray(self.group, key, _tables().ObjectAtom()) vlarr.append(value) elif lib.is_np_dtype(value.dtype, 'M'): self._handle.create_array(self.group, key, value.view('i8')) getattr(self.group, key)._v_attrs.value_type = str(value.dtype) elif isinstance(value.dtype, DatetimeTZDtype): self._handle.create_array(self.group, key, value.asi8) node = getattr(self.group, key) node._v_attrs.tz = _get_tz(value.tz) node._v_attrs.value_type = f'datetime64[{value.dtype.unit}]' elif lib.is_np_dtype(value.dtype, 'm'): self._handle.create_array(self.group, key, value.view('i8')) getattr(self.group, key)._v_attrs.value_type = 'timedelta64' elif empty_array: self.write_array_empty(key, value) else: self._handle.create_array(self.group, key, value) getattr(self.group, key)._v_attrs.transposed = transposed class SeriesFixed(GenericFixed): pandas_kind = 'series' attributes = ['name'] name: Hashable @property def shape(self) -> tuple[int] | None: try: return (len(self.group.values),) except (TypeError, AttributeError): return None def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None) -> Series: self.validate_read(columns, where) index = self.read_index('index', start=start, stop=stop) values = self.read_array('values', start=start, stop=stop) result = Series(values, index=index, name=self.name, copy=False) if using_string_dtype() and is_string_array(values, skipna=True): result = result.astype(StringDtype(na_value=np.nan)) return result def write(self, obj, **kwargs) -> None: super().write(obj, **kwargs) self.write_index('index', obj.index) self.write_array('values', obj) self.attrs.name = obj.name class BlockManagerFixed(GenericFixed): attributes = ['ndim', 'nblocks'] nblocks: int @property def shape(self) -> Shape | None: try: ndim = self.ndim items = 0 for i in range(self.nblocks): node = getattr(self.group, f'block{i}_items') shape = getattr(node, 'shape', None) if shape is not None: items += shape[0] node = self.group.block0_values shape = getattr(node, 'shape', None) if shape is not None: shape = list(shape[0:ndim - 1]) else: shape = [] shape.append(items) return shape except AttributeError: return None def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None) -> DataFrame: self.validate_read(columns, where) select_axis = self.obj_type()._get_block_manager_axis(0) axes = [] for i in range(self.ndim): (_start, _stop) = (start, stop) if i == select_axis else (None, None) ax = self.read_index(f'axis{i}', start=_start, stop=_stop) axes.append(ax) items = axes[0] dfs = [] for i in range(self.nblocks): blk_items = self.read_index(f'block{i}_items') values = self.read_array(f'block{i}_values', start=_start, stop=_stop) columns = items[items.get_indexer(blk_items)] df = DataFrame(values.T, columns=columns, index=axes[1], copy=False) if using_string_dtype() and is_string_array(values, skipna=True): df = df.astype(StringDtype(na_value=np.nan)) dfs.append(df) if len(dfs) > 0: out = concat(dfs, axis=1).copy() return out.reindex(columns=items) return DataFrame(columns=axes[0], index=axes[1]) def write(self, obj, **kwargs) -> None: super().write(obj, **kwargs) data = obj._mgr if not data.is_consolidated(): data = data.consolidate() self.attrs.ndim = data.ndim for (i, ax) in enumerate(data.axes): if i == 0 and (not ax.is_unique): raise ValueError('Columns index has to be unique for fixed format') self.write_index(f'axis{i}', ax) self.attrs.nblocks = len(data.blocks) for (i, blk) in enumerate(data.blocks): blk_items = data.items.take(blk.mgr_locs) self.write_array(f'block{i}_values', blk.values, items=blk_items) self.write_index(f'block{i}_items', blk_items) class FrameFixed(BlockManagerFixed): pandas_kind = 'frame' obj_type = DataFrame class Table(Fixed): pandas_kind = 'wide_table' format_type: str = 'table' table_type: str levels: int | list[Hashable] = 1 is_table = True metadata: list def __init__(self, parent: HDFStore, group: Node, encoding: str | None=None, errors: str='strict', index_axes: list[IndexCol] | None=None, non_index_axes: list[tuple[AxisInt, Any]] | None=None, values_axes: list[DataCol] | None=None, data_columns: list | None=None, info: dict | None=None, nan_rep=None) -> None: super().__init__(parent, group, encoding=encoding, errors=errors) self.index_axes = index_axes or [] self.non_index_axes = non_index_axes or [] self.values_axes = values_axes or [] self.data_columns = data_columns or [] self.info = info or {} self.nan_rep = nan_rep @property def table_type_short(self) -> str: return self.table_type.split('_')[0] def __repr__(self) -> str: self.infer_axes() jdc = ','.join(self.data_columns) if len(self.data_columns) else '' dc = f',dc->[{jdc}]' ver = '' if self.is_old_version: jver = '.'.join([str(x) for x in self.version]) ver = f'[{jver}]' jindex_axes = ','.join([a.name for a in self.index_axes]) return f'{self.pandas_type:12.12}{ver} (typ->{self.table_type_short},nrows->{self.nrows},ncols->{self.ncols},indexers->[{jindex_axes}]{dc})' def __getitem__(self, c: str): for a in self.axes: if c == a.name: return a return None def validate(self, other) -> None: if other is None: return if other.table_type != self.table_type: raise TypeError(f'incompatible table_type with existing [{other.table_type} - {self.table_type}]') for c in ['index_axes', 'non_index_axes', 'values_axes']: sv = getattr(self, c, None) ov = getattr(other, c, None) if sv != ov: for (i, sax) in enumerate(sv): oax = ov[i] if sax != oax: raise ValueError(f'invalid combination of [{c}] on appending data [{sax}] vs current table [{oax}]') raise Exception(f'invalid combination of [{c}] on appending data [{sv}] vs current table [{ov}]') @property def is_multi_index(self) -> bool: return isinstance(self.levels, list) def validate_multiindex(self, obj: DataFrame | Series) -> tuple[DataFrame, list[Hashable]]: levels = com.fill_missing_names(obj.index.names) try: reset_obj = obj.reset_index() except ValueError as err: raise ValueError('duplicate names/columns in the multi-index when storing as a table') from err assert isinstance(reset_obj, DataFrame) return (reset_obj, levels) @property def nrows_expected(self) -> int: return np.prod([i.cvalues.shape[0] for i in self.index_axes]) @property def is_exists(self) -> bool: return 'table' in self.group @property def storable(self): return getattr(self.group, 'table', None) @property def table(self): return self.storable @property def dtype(self): return self.table.dtype @property def description(self): return self.table.description @property def axes(self) -> itertools.chain[IndexCol]: return itertools.chain(self.index_axes, self.values_axes) @property def ncols(self) -> int: return sum((len(a.values) for a in self.values_axes)) @property def is_transposed(self) -> bool: return False @property def data_orientation(self) -> tuple[int, ...]: return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes])) def queryables(self) -> dict[str, Any]: axis_names = {0: 'index', 1: 'columns'} d1 = [(a.cname, a) for a in self.index_axes] d2 = [(axis_names[axis], None) for (axis, values) in self.non_index_axes] d3 = [(v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)] return dict(d1 + d2 + d3) def index_cols(self) -> list[tuple[Any, Any]]: return [(i.axis, i.cname) for i in self.index_axes] def values_cols(self) -> list[str]: return [i.cname for i in self.values_axes] def _get_metadata_path(self, key: str) -> str: group = self.group._v_pathname return f'{group}/meta/{key}/meta' def write_metadata(self, key: str, values: np.ndarray) -> None: self.parent.put(self._get_metadata_path(key), Series(values, copy=False), format='table', encoding=self.encoding, errors=self.errors, nan_rep=self.nan_rep) def read_metadata(self, key: str): if getattr(getattr(self.group, 'meta', None), key, None) is not None: return self.parent.select(self._get_metadata_path(key)) return None def set_attrs(self) -> None: self.attrs.table_type = str(self.table_type) self.attrs.index_cols = self.index_cols() self.attrs.values_cols = self.values_cols() self.attrs.non_index_axes = self.non_index_axes self.attrs.data_columns = self.data_columns self.attrs.nan_rep = self.nan_rep self.attrs.encoding = self.encoding self.attrs.errors = self.errors self.attrs.levels = self.levels self.attrs.info = self.info def get_attrs(self) -> None: self.non_index_axes = getattr(self.attrs, 'non_index_axes', None) or [] self.data_columns = getattr(self.attrs, 'data_columns', None) or [] self.info = getattr(self.attrs, 'info', None) or {} self.nan_rep = getattr(self.attrs, 'nan_rep', None) self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None)) self.errors = getattr(self.attrs, 'errors', 'strict') self.levels: list[Hashable] = getattr(self.attrs, 'levels', None) or [] self.index_axes = [a for a in self.indexables if a.is_an_indexable] self.values_axes = [a for a in self.indexables if not a.is_an_indexable] def validate_version(self, where=None) -> None: if where is not None: if self.is_old_version: ws = incompatibility_doc % '.'.join([str(x) for x in self.version]) warnings.warn(ws, IncompatibilityWarning, stacklevel=find_stack_level()) def validate_min_itemsize(self, min_itemsize) -> None: if min_itemsize is None: return if not isinstance(min_itemsize, dict): return q = self.queryables() for k in min_itemsize: if k == 'values': continue if k not in q: raise ValueError(f'min_itemsize has the key [{k}] which is not an axis or data_column') @cache_readonly def indexables(self): _indexables = [] desc = self.description table_attrs = self.table.attrs for (i, (axis, name)) in enumerate(self.attrs.index_cols): atom = getattr(desc, name) md = self.read_metadata(name) meta = 'category' if md is not None else None kind_attr = f'{name}_kind' kind = getattr(table_attrs, kind_attr, None) index_col = IndexCol(name=name, axis=axis, pos=i, kind=kind, typ=atom, table=self.table, meta=meta, metadata=md) _indexables.append(index_col) dc = set(self.data_columns) base_pos = len(_indexables) def f(i, c: str) -> DataCol: assert isinstance(c, str) klass = DataCol if c in dc: klass = DataIndexableCol atom = getattr(desc, c) adj_name = _maybe_adjust_name(c, self.version) values = getattr(table_attrs, f'{adj_name}_kind', None) dtype = getattr(table_attrs, f'{adj_name}_dtype', None) kind = _dtype_to_kind(dtype) md = self.read_metadata(c) meta = getattr(table_attrs, f'{adj_name}_meta', None) obj = klass(name=adj_name, cname=c, values=values, kind=kind, pos=base_pos + i, typ=atom, table=self.table, meta=meta, metadata=md, dtype=dtype) return obj _indexables.extend([f(i, c) for (i, c) in enumerate(self.attrs.values_cols)]) return _indexables def create_index(self, columns=None, optlevel=None, kind: str | None=None) -> None: if not self.infer_axes(): return if columns is False: return if columns is None or columns is True: columns = [a.cname for a in self.axes if a.is_data_indexable] if not isinstance(columns, (tuple, list)): columns = [columns] kw = {} if optlevel is not None: kw['optlevel'] = optlevel if kind is not None: kw['kind'] = kind table = self.table for c in columns: v = getattr(table.cols, c, None) if v is not None: if v.is_indexed: index = v.index cur_optlevel = index.optlevel cur_kind = index.kind if kind is not None and cur_kind != kind: v.remove_index() else: kw['kind'] = cur_kind if optlevel is not None and cur_optlevel != optlevel: v.remove_index() else: kw['optlevel'] = cur_optlevel if not v.is_indexed: if v.type.startswith('complex'): raise TypeError('Columns containing complex values can be stored but cannot be indexed when using table format. Either use fixed format, set index=False, or do not include the columns containing complex values to data_columns when initializing the table.') v.create_index(**kw) elif c in self.non_index_axes[0][1]: raise AttributeError(f'column {c} is not a data_column.\nIn order to read column {c} you must reload the dataframe \ninto HDFStore and include {c} with the data_columns argument.') def _read_axes(self, where, start: int | None=None, stop: int | None=None) -> list[tuple[np.ndarray, np.ndarray] | tuple[Index, Index]]: selection = Selection(self, where=where, start=start, stop=stop) values = selection.select() results = [] for a in self.axes: a.set_info(self.info) res = a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors) results.append(res) return results @classmethod def get_object(cls, obj, transposed: bool): return obj def validate_data_columns(self, data_columns, min_itemsize, non_index_axes) -> list: if not len(non_index_axes): return [] (axis, axis_labels) = non_index_axes[0] info = self.info.get(axis, {}) if info.get('type') == 'MultiIndex' and data_columns: raise ValueError(f'cannot use a multi-index on axis [{axis}] with data_columns {data_columns}') if data_columns is True: data_columns = list(axis_labels) elif data_columns is None: data_columns = [] if isinstance(min_itemsize, dict): existing_data_columns = set(data_columns) data_columns = list(data_columns) data_columns.extend([k for k in min_itemsize.keys() if k != 'values' and k not in existing_data_columns]) return [c for c in data_columns if c in axis_labels] def _create_axes(self, axes, obj: DataFrame, validate: bool=True, nan_rep=None, data_columns=None, min_itemsize=None): if not isinstance(obj, DataFrame): group = self.group._v_name raise TypeError(f'cannot properly create the storer for: [group->{group},value->{type(obj)}]') if axes is None: axes = [0] axes = [obj._get_axis_number(a) for a in axes] if self.infer_axes(): table_exists = True axes = [a.axis for a in self.index_axes] data_columns = list(self.data_columns) nan_rep = self.nan_rep else: table_exists = False new_info = self.info assert self.ndim == 2 if len(axes) != self.ndim - 1: raise ValueError('currently only support ndim-1 indexers in an AppendableTable') new_non_index_axes: list = [] if nan_rep is None: nan_rep = 'nan' idx = next((x for x in [0, 1] if x not in axes)) a = obj.axes[idx] append_axis = list(a) if table_exists: indexer = len(new_non_index_axes) exist_axis = self.non_index_axes[indexer][1] if not array_equivalent(np.array(append_axis), np.array(exist_axis), strict_nan=True, dtype_equal=True): if array_equivalent(np.array(sorted(append_axis)), np.array(sorted(exist_axis)), strict_nan=True, dtype_equal=True): append_axis = exist_axis info = new_info.setdefault(idx, {}) info['names'] = list(a.names) info['type'] = type(a).__name__ new_non_index_axes.append((idx, append_axis)) idx = axes[0] a = obj.axes[idx] axis_name = obj._get_axis_name(idx) new_index = _convert_index(axis_name, a, self.encoding, self.errors) new_index.axis = idx new_index.set_pos(0) new_index.update_info(new_info) new_index.maybe_set_size(min_itemsize) new_index_axes = [new_index] j = len(new_index_axes) assert j == 1 assert len(new_non_index_axes) == 1 for a in new_non_index_axes: obj = _reindex_axis(obj, a[0], a[1]) transposed = new_index.axis == 1 data_columns = self.validate_data_columns(data_columns, min_itemsize, new_non_index_axes) frame = self.get_object(obj, transposed)._consolidate() (blocks, blk_items) = self._get_blocks_and_items(frame, table_exists, new_non_index_axes, self.values_axes, data_columns) vaxes = [] for (i, (blk, b_items)) in enumerate(zip(blocks, blk_items)): klass = DataCol name = None if data_columns and len(b_items) == 1 and (b_items[0] in data_columns): klass = DataIndexableCol name = b_items[0] if not (name is None or isinstance(name, str)): raise ValueError('cannot have non-object label DataIndexableCol') existing_col: DataCol | None if table_exists and validate: try: existing_col = self.values_axes[i] except (IndexError, KeyError) as err: raise ValueError(f'Incompatible appended table [{blocks}]with existing table [{self.values_axes}]') from err else: existing_col = None new_name = name or f'values_block_{i}' data_converted = _maybe_convert_for_string_atom(new_name, blk.values, existing_col=existing_col, min_itemsize=min_itemsize, nan_rep=nan_rep, encoding=self.encoding, errors=self.errors, columns=b_items) adj_name = _maybe_adjust_name(new_name, self.version) typ = klass._get_atom(data_converted) kind = _dtype_to_kind(data_converted.dtype.name) tz = None if getattr(data_converted, 'tz', None) is not None: tz = _get_tz(data_converted.tz) meta = metadata = ordered = None if isinstance(data_converted.dtype, CategoricalDtype): ordered = data_converted.ordered meta = 'category' metadata = np.asarray(data_converted.categories).ravel() (data, dtype_name) = _get_data_and_dtype_name(data_converted) col = klass(name=adj_name, cname=new_name, values=list(b_items), typ=typ, pos=j, kind=kind, tz=tz, ordered=ordered, meta=meta, metadata=metadata, dtype=dtype_name, data=data) col.update_info(new_info) vaxes.append(col) j += 1 dcs = [col.name for col in vaxes if col.is_data_indexable] new_table = type(self)(parent=self.parent, group=self.group, encoding=self.encoding, errors=self.errors, index_axes=new_index_axes, non_index_axes=new_non_index_axes, values_axes=vaxes, data_columns=dcs, info=new_info, nan_rep=nan_rep) if hasattr(self, 'levels'): new_table.levels = self.levels new_table.validate_min_itemsize(min_itemsize) if validate and table_exists: new_table.validate(self) return new_table @staticmethod def _get_blocks_and_items(frame: DataFrame, table_exists: bool, new_non_index_axes, values_axes, data_columns): def get_blk_items(mgr): return [mgr.items.take(blk.mgr_locs) for blk in mgr.blocks] mgr = frame._mgr blocks: list[Block] = list(mgr.blocks) blk_items: list[Index] = get_blk_items(mgr) if len(data_columns): (axis, axis_labels) = new_non_index_axes[0] new_labels = Index(axis_labels).difference(Index(data_columns)) mgr = frame.reindex(new_labels, axis=axis)._mgr blocks = list(mgr.blocks) blk_items = get_blk_items(mgr) for c in data_columns: mgr = frame.reindex([c], axis=axis)._mgr blocks.extend(mgr.blocks) blk_items.extend(get_blk_items(mgr)) if table_exists: by_items = {tuple(b_items.tolist()): (b, b_items) for (b, b_items) in zip(blocks, blk_items)} new_blocks: list[Block] = [] new_blk_items = [] for ea in values_axes: items = tuple(ea.values) try: (b, b_items) = by_items.pop(items) new_blocks.append(b) new_blk_items.append(b_items) except (IndexError, KeyError) as err: jitems = ','.join([pprint_thing(item) for item in items]) raise ValueError(f'cannot match existing table structure for [{jitems}] on appending data') from err blocks = new_blocks blk_items = new_blk_items return (blocks, blk_items) def process_axes(self, obj, selection: Selection, columns=None) -> DataFrame: if columns is not None: columns = list(columns) if columns is not None and self.is_multi_index: assert isinstance(self.levels, list) for n in self.levels: if n not in columns: columns.insert(0, n) for (axis, labels) in self.non_index_axes: obj = _reindex_axis(obj, axis, labels, columns) def process_filter(field, filt, op): for axis_name in obj._AXIS_ORDERS: axis_number = obj._get_axis_number(axis_name) axis_values = obj._get_axis(axis_name) assert axis_number is not None if field == axis_name: if self.is_multi_index: filt = filt.union(Index(self.levels)) takers = op(axis_values, filt) return obj.loc(axis=axis_number)[takers] elif field in axis_values: values = ensure_index(getattr(obj, field).values) filt = ensure_index(filt) if isinstance(obj, DataFrame): axis_number = 1 - axis_number takers = op(values, filt) return obj.loc(axis=axis_number)[takers] raise ValueError(f'cannot find the field [{field}] for filtering!') if selection.filter is not None: for (field, op, filt) in selection.filter.format(): obj = process_filter(field, filt, op) return obj def create_description(self, complib, complevel: int | None, fletcher32: bool, expectedrows: int | None) -> dict[str, Any]: if expectedrows is None: expectedrows = max(self.nrows_expected, 10000) d = {'name': 'table', 'expectedrows': expectedrows} d['description'] = {a.cname: a.typ for a in self.axes} if complib: if complevel is None: complevel = self._complevel or 9 filters = _tables().Filters(complevel=complevel, complib=complib, fletcher32=fletcher32 or self._fletcher32) d['filters'] = filters elif self._filters is not None: d['filters'] = self._filters return d def read_coordinates(self, where=None, start: int | None=None, stop: int | None=None): self.validate_version(where) if not self.infer_axes(): return False selection = Selection(self, where=where, start=start, stop=stop) coords = selection.select_coords() if selection.filter is not None: for (field, op, filt) in selection.filter.format(): data = self.read_column(field, start=coords.min(), stop=coords.max() + 1) coords = coords[op(data.iloc[coords - coords.min()], filt).values] return Index(coords) def read_column(self, column: str, where=None, start: int | None=None, stop: int | None=None): self.validate_version() if not self.infer_axes(): return False if where is not None: raise TypeError('read_column does not currently accept a where clause') for a in self.axes: if column == a.name: if not a.is_data_indexable: raise ValueError(f'column [{column}] can not be extracted individually; it is not data indexable') c = getattr(self.table.cols, column) a.set_info(self.info) col_values = a.convert(c[start:stop], nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors) cvs = col_values[1] return Series(cvs, name=column, copy=False) raise KeyError(f'column [{column}] not found in the table') class WORMTable(Table): table_type = 'worm' def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None): raise NotImplementedError('WORMTable needs to implement read') def write(self, obj, **kwargs) -> None: raise NotImplementedError('WORMTable needs to implement write') class AppendableTable(Table): table_type = 'appendable' def write(self, obj, axes=None, append: bool=False, complib=None, complevel=None, fletcher32=None, min_itemsize=None, chunksize: int | None=None, expectedrows=None, dropna: bool=False, nan_rep=None, data_columns=None, track_times: bool=True) -> None: if not append and self.is_exists: self._handle.remove_node(self.group, 'table') table = self._create_axes(axes=axes, obj=obj, validate=append, min_itemsize=min_itemsize, nan_rep=nan_rep, data_columns=data_columns) for a in table.axes: a.validate_names() if not table.is_exists: options = table.create_description(complib=complib, complevel=complevel, fletcher32=fletcher32, expectedrows=expectedrows) table.set_attrs() options['track_times'] = track_times table._handle.create_table(table.group, **options) table.attrs.info = table.info for a in table.axes: a.validate_and_set(table, append) table.write_data(chunksize, dropna=dropna) def write_data(self, chunksize: int | None, dropna: bool=False) -> None: names = self.dtype.names nrows = self.nrows_expected masks = [] if dropna: for a in self.values_axes: mask = isna(a.data).all(axis=0) if isinstance(mask, np.ndarray): masks.append(mask.astype('u1', copy=False)) if len(masks): mask = masks[0] for m in masks[1:]: mask = mask & m mask = mask.ravel() else: mask = None indexes = [a.cvalues for a in self.index_axes] nindexes = len(indexes) assert nindexes == 1, nindexes values = [a.take_data() for a in self.values_axes] values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1)) for v in values] bvalues = [] for (i, v) in enumerate(values): new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape bvalues.append(v.reshape(new_shape)) if chunksize is None: chunksize = 100000 rows = np.empty(min(chunksize, nrows), dtype=self.dtype) chunks = nrows // chunksize + 1 for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break self.write_data_chunk(rows, indexes=[a[start_i:end_i] for a in indexes], mask=mask[start_i:end_i] if mask is not None else None, values=[v[start_i:end_i] for v in bvalues]) def write_data_chunk(self, rows: np.ndarray, indexes: list[np.ndarray], mask: npt.NDArray[np.bool_] | None, values: list[np.ndarray]) -> None: for v in values: if not np.prod(v.shape): return nrows = indexes[0].shape[0] if nrows != len(rows): rows = np.empty(nrows, dtype=self.dtype) names = self.dtype.names nindexes = len(indexes) for (i, idx) in enumerate(indexes): rows[names[i]] = idx for (i, v) in enumerate(values): rows[names[i + nindexes]] = v if mask is not None: m = ~mask.ravel().astype(bool, copy=False) if not m.all(): rows = rows[m] if len(rows): self.table.append(rows) self.table.flush() def delete(self, where=None, start: int | None=None, stop: int | None=None) -> int | None: if where is None or not len(where): if start is None and stop is None: nrows = self.nrows self._handle.remove_node(self.group, recursive=True) else: if stop is None: stop = self.nrows nrows = self.table.remove_rows(start=start, stop=stop) self.table.flush() return nrows if not self.infer_axes(): return None table = self.table selection = Selection(self, where, start=start, stop=stop) values = selection.select_coords() sorted_series = Series(values, copy=False).sort_values() ln = len(sorted_series) if ln: diff = sorted_series.diff() groups = list(diff[diff > 1].index) if not len(groups): groups = [0] if groups[-1] != ln: groups.append(ln) if groups[0] != 0: groups.insert(0, 0) pg = groups.pop() for g in reversed(groups): rows = sorted_series.take(range(g, pg)) table.remove_rows(start=rows[rows.index[0]], stop=rows[rows.index[-1]] + 1) pg = g self.table.flush() return ln class AppendableFrameTable(AppendableTable): pandas_kind = 'frame_table' table_type = 'appendable_frame' ndim = 2 obj_type: type[DataFrame | Series] = DataFrame @property def is_transposed(self) -> bool: return self.index_axes[0].axis == 1 @classmethod def get_object(cls, obj, transposed: bool): if transposed: obj = obj.T return obj def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None): self.validate_version(where) if not self.infer_axes(): return None result = self._read_axes(where=where, start=start, stop=stop) info = self.info.get(self.non_index_axes[0][0], {}) if len(self.non_index_axes) else {} inds = [i for (i, ax) in enumerate(self.axes) if ax is self.index_axes[0]] assert len(inds) == 1 ind = inds[0] index = result[ind][0] frames = [] for (i, a) in enumerate(self.axes): if a not in self.values_axes: continue (index_vals, cvalues) = result[i] if info.get('type') != 'MultiIndex': cols = Index(index_vals) else: cols = MultiIndex.from_tuples(index_vals) names = info.get('names') if names is not None: cols.set_names(names, inplace=True) if self.is_transposed: values = cvalues index_ = cols cols_ = Index(index, name=getattr(index, 'name', None)) else: values = cvalues.T index_ = Index(index, name=getattr(index, 'name', None)) cols_ = cols if values.ndim == 1 and isinstance(values, np.ndarray): values = values.reshape((1, values.shape[0])) if isinstance(values, (np.ndarray, DatetimeArray)): df = DataFrame(values.T, columns=cols_, index=index_, copy=False) elif isinstance(values, Index): df = DataFrame(values, columns=cols_, index=index_) else: df = DataFrame._from_arrays([values], columns=cols_, index=index_) if not (using_string_dtype() and values.dtype.kind == 'O'): assert (df.dtypes == values.dtype).all(), (df.dtypes, values.dtype) if using_string_dtype() and is_string_array(values, skipna=True): df = df.astype(StringDtype(na_value=np.nan)) frames.append(df) if len(frames) == 1: df = frames[0] else: df = concat(frames, axis=1) selection = Selection(self, where=where, start=start, stop=stop) df = self.process_axes(df, selection=selection, columns=columns) return df class AppendableSeriesTable(AppendableFrameTable): pandas_kind = 'series_table' table_type = 'appendable_series' ndim = 2 obj_type = Series @property def is_transposed(self) -> bool: return False @classmethod def get_object(cls, obj, transposed: bool): return obj def write(self, obj, data_columns=None, **kwargs) -> None: if not isinstance(obj, DataFrame): name = obj.name or 'values' obj = obj.to_frame(name) super().write(obj=obj, data_columns=obj.columns.tolist(), **kwargs) def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None) -> Series: is_multi_index = self.is_multi_index if columns is not None and is_multi_index: assert isinstance(self.levels, list) for n in self.levels: if n not in columns: columns.insert(0, n) s = super().read(where=where, columns=columns, start=start, stop=stop) if is_multi_index: s.set_index(self.levels, inplace=True) s = s.iloc[:, 0] if s.name == 'values': s.name = None return s class AppendableMultiSeriesTable(AppendableSeriesTable): pandas_kind = 'series_table' table_type = 'appendable_multiseries' def write(self, obj, **kwargs) -> None: name = obj.name or 'values' (newobj, self.levels) = self.validate_multiindex(obj) assert isinstance(self.levels, list) cols = list(self.levels) cols.append(name) newobj.columns = Index(cols) super().write(obj=newobj, **kwargs) class GenericTable(AppendableFrameTable): pandas_kind = 'frame_table' table_type = 'generic_table' ndim = 2 obj_type = DataFrame levels: list[Hashable] @property def pandas_type(self) -> str: return self.pandas_kind @property def storable(self): return getattr(self.group, 'table', None) or self.group def get_attrs(self) -> None: self.non_index_axes = [] self.nan_rep = None self.levels = [] self.index_axes = [a for a in self.indexables if a.is_an_indexable] self.values_axes = [a for a in self.indexables if not a.is_an_indexable] self.data_columns = [a.name for a in self.values_axes] @cache_readonly def indexables(self): d = self.description md = self.read_metadata('index') meta = 'category' if md is not None else None index_col = GenericIndexCol(name='index', axis=0, table=self.table, meta=meta, metadata=md) _indexables: list[GenericIndexCol | GenericDataIndexableCol] = [index_col] for (i, n) in enumerate(d._v_names): assert isinstance(n, str) atom = getattr(d, n) md = self.read_metadata(n) meta = 'category' if md is not None else None dc = GenericDataIndexableCol(name=n, pos=i, values=[n], typ=atom, table=self.table, meta=meta, metadata=md) _indexables.append(dc) return _indexables def write(self, **kwargs) -> None: raise NotImplementedError('cannot write on an generic table') class AppendableMultiFrameTable(AppendableFrameTable): table_type = 'appendable_multiframe' obj_type = DataFrame ndim = 2 _re_levels = re.compile('^level_\\d+$') @property def table_type_short(self) -> str: return 'appendable_multi' def write(self, obj, data_columns=None, **kwargs) -> None: if data_columns is None: data_columns = [] elif data_columns is True: data_columns = obj.columns.tolist() (obj, self.levels) = self.validate_multiindex(obj) assert isinstance(self.levels, list) for n in self.levels: if n not in data_columns: data_columns.insert(0, n) super().write(obj=obj, data_columns=data_columns, **kwargs) def read(self, where=None, columns=None, start: int | None=None, stop: int | None=None) -> DataFrame: df = super().read(where=where, columns=columns, start=start, stop=stop) df = df.set_index(self.levels) df.index = df.index.set_names([None if self._re_levels.search(name) else name for name in df.index.names]) return df def _reindex_axis(obj: DataFrame, axis: AxisInt, labels: Index, other=None) -> DataFrame: ax = obj._get_axis(axis) labels = ensure_index(labels) if other is not None: other = ensure_index(other) if (other is None or labels.equals(other)) and labels.equals(ax): return obj labels = ensure_index(labels.unique()) if other is not None: labels = ensure_index(other.unique()).intersection(labels, sort=False) if not labels.equals(ax): slicer: list[slice | Index] = [slice(None, None)] * obj.ndim slicer[axis] = labels obj = obj.loc[tuple(slicer)] return obj def _get_tz(tz: tzinfo) -> str | tzinfo: zone = timezones.get_timezone(tz) return zone def _set_tz(values: npt.NDArray[np.int64], tz: str | tzinfo | None, datetime64_dtype: str) -> DatetimeArray: assert values.dtype == 'i8', values.dtype (unit, _) = np.datetime_data(datetime64_dtype) dtype = tz_to_dtype(tz=tz, unit=unit) dta = DatetimeArray._from_sequence(values, dtype=dtype) return dta def _convert_index(name: str, index: Index, encoding: str, errors: str) -> IndexCol: assert isinstance(name, str) index_name = index.name (converted, dtype_name) = _get_data_and_dtype_name(index) kind = _dtype_to_kind(dtype_name) atom = DataIndexableCol._get_atom(converted) if lib.is_np_dtype(index.dtype, 'iu') or needs_i8_conversion(index.dtype) or is_bool_dtype(index.dtype): return IndexCol(name, values=converted, kind=kind, typ=atom, freq=getattr(index, 'freq', None), tz=getattr(index, 'tz', None), index_name=index_name) if isinstance(index, MultiIndex): raise TypeError('MultiIndex not supported here!') inferred_type = lib.infer_dtype(index, skipna=False) values = np.asarray(index) if inferred_type == 'date': converted = np.asarray([v.toordinal() for v in values], dtype=np.int32) return IndexCol(name, converted, 'date', _tables().Time32Col(), index_name=index_name) elif inferred_type == 'string': converted = _convert_string_array(values, encoding, errors) itemsize = converted.dtype.itemsize return IndexCol(name, converted, 'string', _tables().StringCol(itemsize), index_name=index_name) elif inferred_type in ['integer', 'floating']: return IndexCol(name, values=converted, kind=kind, typ=atom, index_name=index_name) else: assert isinstance(converted, np.ndarray) and converted.dtype == object assert kind == 'object', kind atom = _tables().ObjectAtom() return IndexCol(name, converted, kind, atom, index_name=index_name) def _unconvert_index(data, kind: str, encoding: str, errors: str) -> np.ndarray | Index: index: Index | np.ndarray if kind.startswith('datetime64'): if kind == 'datetime64': index = DatetimeIndex(data) else: index = DatetimeIndex(data.view(kind)) elif kind == 'timedelta64': index = TimedeltaIndex(data) elif kind == 'date': try: index = np.asarray([date.fromordinal(v) for v in data], dtype=object) except ValueError: index = np.asarray([date.fromtimestamp(v) for v in data], dtype=object) elif kind in ('integer', 'float', 'bool'): index = np.asarray(data) elif kind in 'string': index = _unconvert_string_array(data, nan_rep=None, encoding=encoding, errors=errors) elif kind == 'object': index = np.asarray(data[0]) else: raise ValueError(f'unrecognized index type {kind}') return index def _maybe_convert_for_string_atom(name: str, bvalues: ArrayLike, existing_col, min_itemsize, nan_rep, encoding, errors, columns: list[str]): if bvalues.dtype != object: return bvalues bvalues = cast(np.ndarray, bvalues) dtype_name = bvalues.dtype.name inferred_type = lib.infer_dtype(bvalues, skipna=False) if inferred_type == 'date': raise TypeError('[date] is not implemented as a table column') if inferred_type == 'datetime': raise TypeError('too many timezones in this block, create separate data columns') if not (inferred_type == 'string' or dtype_name == 'object'): return bvalues mask = isna(bvalues) data = bvalues.copy() data[mask] = nan_rep inferred_type = lib.infer_dtype(data, skipna=False) if inferred_type != 'string': for i in range(data.shape[0]): col = data[i] inferred_type = lib.infer_dtype(col, skipna=False) if inferred_type != 'string': error_column_label = columns[i] if len(columns) > i else f'No.{i}' raise TypeError(f'Cannot serialize the column [{error_column_label}]\nbecause its data contents are not [string] but [{inferred_type}] object dtype') data_converted = _convert_string_array(data, encoding, errors).reshape(data.shape) itemsize = data_converted.itemsize if isinstance(min_itemsize, dict): min_itemsize = int(min_itemsize.get(name) or min_itemsize.get('values') or 0) itemsize = max(min_itemsize or 0, itemsize) if existing_col is not None: eci = existing_col.validate_col(itemsize) if eci is not None and eci > itemsize: itemsize = eci data_converted = data_converted.astype(f'|S{itemsize}', copy=False) return data_converted def _convert_string_array(data: np.ndarray, encoding: str, errors: str) -> np.ndarray: if len(data): data = Series(data.ravel(), copy=False).str.encode(encoding, errors)._values.reshape(data.shape) ensured = ensure_object(data.ravel()) itemsize = max(1, libwriters.max_len_string_array(ensured)) data = np.asarray(data, dtype=f'S{itemsize}') return data def _unconvert_string_array(data: np.ndarray, nan_rep, encoding: str, errors: str) -> np.ndarray: shape = data.shape data = np.asarray(data.ravel(), dtype=object) if len(data): itemsize = libwriters.max_len_string_array(ensure_object(data)) dtype = f'U{itemsize}' if isinstance(data[0], bytes): data = Series(data, copy=False).str.decode(encoding, errors=errors)._values else: data = data.astype(dtype, copy=False).astype(object, copy=False) if nan_rep is None: nan_rep = 'nan' libwriters.string_array_replace_from_nan_rep(data, nan_rep) return data.reshape(shape) def _maybe_convert(values: np.ndarray, val_kind: str, encoding: str, errors: str): assert isinstance(val_kind, str), type(val_kind) if _need_convert(val_kind): conv = _get_converter(val_kind, encoding, errors) values = conv(values) return values def _get_converter(kind: str, encoding: str, errors: str): if kind == 'datetime64': return lambda x: np.asarray(x, dtype='M8[ns]') elif 'datetime64' in kind: return lambda x: np.asarray(x, dtype=kind) elif kind == 'string': return lambda x: _unconvert_string_array(x, nan_rep=None, encoding=encoding, errors=errors) else: raise ValueError(f'invalid kind {kind}') def _need_convert(kind: str) -> bool: if kind in ('datetime64', 'string') or 'datetime64' in kind: return True return False def _maybe_adjust_name(name: str, version: Sequence[int]) -> str: if isinstance(version, str) or len(version) < 3: raise ValueError('Version is incorrect, expected sequence of 3 integers.') if version[0] == 0 and version[1] <= 10 and (version[2] == 0): m = re.search('values_block_(\\d+)', name) if m: grp = m.groups()[0] name = f'values_{grp}' return name def _dtype_to_kind(dtype_str: str) -> str: if dtype_str.startswith(('string', 'bytes')): kind = 'string' elif dtype_str.startswith('float'): kind = 'float' elif dtype_str.startswith('complex'): kind = 'complex' elif dtype_str.startswith(('int', 'uint')): kind = 'integer' elif dtype_str.startswith('datetime64'): kind = dtype_str elif dtype_str.startswith('timedelta'): kind = 'timedelta64' elif dtype_str.startswith('bool'): kind = 'bool' elif dtype_str.startswith('category'): kind = 'category' elif dtype_str.startswith('period'): kind = 'integer' elif dtype_str == 'object': kind = 'object' else: raise ValueError(f'cannot interpret dtype of [{dtype_str}]') return kind def _get_data_and_dtype_name(data: ArrayLike): if isinstance(data, Categorical): data = data.codes if isinstance(data.dtype, DatetimeTZDtype): dtype_name = f'datetime64[{data.dtype.unit}]' else: dtype_name = data.dtype.name if data.dtype.kind in 'mM': data = np.asarray(data.view('i8')) elif isinstance(data, PeriodIndex): data = data.asi8 data = np.asarray(data) return (data, dtype_name) class Selection: def __init__(self, table: Table, where=None, start: int | None=None, stop: int | None=None) -> None: self.table = table self.where = where self.start = start self.stop = stop self.condition = None self.filter = None self.terms = None self.coordinates = None if is_list_like(where): with suppress(ValueError): inferred = lib.infer_dtype(where, skipna=False) if inferred in ('integer', 'boolean'): where = np.asarray(where) if where.dtype == np.bool_: (start, stop) = (self.start, self.stop) if start is None: start = 0 if stop is None: stop = self.table.nrows self.coordinates = np.arange(start, stop)[where] elif issubclass(where.dtype.type, np.integer): if self.start is not None and (where < self.start).any() or (self.stop is not None and (where >= self.stop).any()): raise ValueError('where must have index locations >= start and < stop') self.coordinates = where if self.coordinates is None: self.terms = self.generate(where) if self.terms is not None: (self.condition, self.filter) = self.terms.evaluate() @overload def generate(self, where: dict | list | tuple | str) -> PyTablesExpr: ... @overload def generate(self, where: None) -> None: ... def generate(self, where: dict | list | tuple | str | None) -> PyTablesExpr | None: if where is None: return None q = self.table.queryables() try: return PyTablesExpr(where, queryables=q, encoding=self.table.encoding) except NameError as err: qkeys = ','.join(q.keys()) msg = dedent(f" The passed where expression: {where}\n contains an invalid variable reference\n all of the variable references must be a reference to\n an axis (e.g. 'index' or 'columns'), or a data_column\n The currently defined references are: {qkeys}\n ") raise ValueError(msg) from err def select(self): if self.condition is not None: return self.table.table.read_where(self.condition.format(), start=self.start, stop=self.stop) elif self.coordinates is not None: return self.table.table.read_coordinates(self.coordinates) return self.table.table.read(start=self.start, stop=self.stop) def select_coords(self): (start, stop) = (self.start, self.stop) nrows = self.table.nrows if start is None: start = 0 elif start < 0: start += nrows if stop is None: stop = nrows elif stop < 0: stop += nrows if self.condition is not None: return self.table.table.get_where_list(self.condition.format(), start=start, stop=stop, sort=True) elif self.coordinates is not None: return self.coordinates return np.arange(start, stop) # File: pandas-main/pandas/io/sas/sas7bdat.py """""" from __future__ import annotations from datetime import datetime import sys from typing import TYPE_CHECKING import numpy as np from pandas._libs.byteswap import read_double_with_byteswap, read_float_with_byteswap, read_uint16_with_byteswap, read_uint32_with_byteswap, read_uint64_with_byteswap from pandas._libs.sas import Parser, get_subheader_index from pandas._libs.tslibs.conversion import cast_from_unit_vectorized from pandas.errors import EmptyDataError import pandas as pd from pandas import DataFrame, Timestamp from pandas.io.common import get_handle import pandas.io.sas.sas_constants as const from pandas.io.sas.sasreader import SASReader if TYPE_CHECKING: from pandas._typing import CompressionOptions, FilePath, ReadBuffer _unix_origin = Timestamp('1970-01-01') _sas_origin = Timestamp('1960-01-01') def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series: td = (_sas_origin - _unix_origin).as_unit('s') if unit == 's': millis = cast_from_unit_vectorized(sas_datetimes._values, unit='s', out_unit='ms') dt64ms = millis.view('M8[ms]') + td return pd.Series(dt64ms, index=sas_datetimes.index, copy=False) else: vals = np.array(sas_datetimes, dtype='M8[D]') + td return pd.Series(vals, dtype='M8[s]', index=sas_datetimes.index, copy=False) class _Column: col_id: int name: str | bytes label: str | bytes format: str | bytes ctype: bytes length: int def __init__(self, col_id: int, name: str | bytes, label: str | bytes, format: str | bytes, ctype: bytes, length: int) -> None: self.col_id = col_id self.name = name self.label = label self.format = format self.ctype = ctype self.length = length class SAS7BDATReader(SASReader): _int_length: int _cached_page: bytes | None def __init__(self, path_or_buf: FilePath | ReadBuffer[bytes], index=None, convert_dates: bool=True, blank_missing: bool=True, chunksize: int | None=None, encoding: str | None=None, convert_text: bool=True, convert_header_text: bool=True, compression: CompressionOptions='infer') -> None: self.index = index self.convert_dates = convert_dates self.blank_missing = blank_missing self.chunksize = chunksize self.encoding = encoding self.convert_text = convert_text self.convert_header_text = convert_header_text self.default_encoding = 'latin-1' self.compression = b'' self.column_names_raw: list[bytes] = [] self.column_names: list[str | bytes] = [] self.column_formats: list[str | bytes] = [] self.columns: list[_Column] = [] self._current_page_data_subheader_pointers: list[tuple[int, int]] = [] self._cached_page = None self._column_data_lengths: list[int] = [] self._column_data_offsets: list[int] = [] self._column_types: list[bytes] = [] self._current_row_in_file_index = 0 self._current_row_on_page_index = 0 self._current_row_in_file_index = 0 self.handles = get_handle(path_or_buf, 'rb', is_text=False, compression=compression) self._path_or_buf = self.handles.handle self._subheader_processors = [self._process_rowsize_subheader, self._process_columnsize_subheader, self._process_subheader_counts, self._process_columntext_subheader, self._process_columnname_subheader, self._process_columnattributes_subheader, self._process_format_subheader, self._process_columnlist_subheader, None] try: self._get_properties() self._parse_metadata() except Exception: self.close() raise def column_data_lengths(self) -> np.ndarray: return np.asarray(self._column_data_lengths, dtype=np.int64) def column_data_offsets(self) -> np.ndarray: return np.asarray(self._column_data_offsets, dtype=np.int64) def column_types(self) -> np.ndarray: return np.asarray(self._column_types, dtype=np.dtype('S1')) def close(self) -> None: self.handles.close() def _get_properties(self) -> None: self._path_or_buf.seek(0) self._cached_page = self._path_or_buf.read(288) if self._cached_page[0:len(const.magic)] != const.magic: raise ValueError('magic number mismatch (not a SAS file?)') buf = self._read_bytes(const.align_1_offset, const.align_1_length) if buf == const.u64_byte_checker_value: self.U64 = True self._int_length = 8 self._page_bit_offset = const.page_bit_offset_x64 self._subheader_pointer_length = const.subheader_pointer_length_x64 else: self.U64 = False self._page_bit_offset = const.page_bit_offset_x86 self._subheader_pointer_length = const.subheader_pointer_length_x86 self._int_length = 4 buf = self._read_bytes(const.align_2_offset, const.align_2_length) if buf == const.align_1_checker_value: align1 = const.align_2_value else: align1 = 0 buf = self._read_bytes(const.endianness_offset, const.endianness_length) if buf == b'\x01': self.byte_order = '<' self.need_byteswap = sys.byteorder == 'big' else: self.byte_order = '>' self.need_byteswap = sys.byteorder == 'little' buf = self._read_bytes(const.encoding_offset, const.encoding_length)[0] if buf in const.encoding_names: self.inferred_encoding = const.encoding_names[buf] if self.encoding == 'infer': self.encoding = self.inferred_encoding else: self.inferred_encoding = f'unknown (code={buf})' epoch = datetime(1960, 1, 1) x = self._read_float(const.date_created_offset + align1, const.date_created_length) self.date_created = epoch + pd.to_timedelta(x, unit='s') x = self._read_float(const.date_modified_offset + align1, const.date_modified_length) self.date_modified = epoch + pd.to_timedelta(x, unit='s') self.header_length = self._read_uint(const.header_size_offset + align1, const.header_size_length) buf = self._path_or_buf.read(self.header_length - 288) self._cached_page += buf if len(self._cached_page) != self.header_length: raise ValueError('The SAS7BDAT file appears to be truncated.') self._page_length = self._read_uint(const.page_size_offset + align1, const.page_size_length) def __next__(self) -> DataFrame: da = self.read(nrows=self.chunksize or 1) if da.empty: self.close() raise StopIteration return da def _read_float(self, offset: int, width: int) -> float: assert self._cached_page is not None if width == 4: return read_float_with_byteswap(self._cached_page, offset, self.need_byteswap) elif width == 8: return read_double_with_byteswap(self._cached_page, offset, self.need_byteswap) else: self.close() raise ValueError('invalid float width') def _read_uint(self, offset: int, width: int) -> int: assert self._cached_page is not None if width == 1: return self._read_bytes(offset, 1)[0] elif width == 2: return read_uint16_with_byteswap(self._cached_page, offset, self.need_byteswap) elif width == 4: return read_uint32_with_byteswap(self._cached_page, offset, self.need_byteswap) elif width == 8: return read_uint64_with_byteswap(self._cached_page, offset, self.need_byteswap) else: self.close() raise ValueError('invalid int width') def _read_bytes(self, offset: int, length: int): assert self._cached_page is not None if offset + length > len(self._cached_page): self.close() raise ValueError('The cached page is too small.') return self._cached_page[offset:offset + length] def _parse_metadata(self) -> None: done = False while not done: self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: break if len(self._cached_page) != self._page_length: raise ValueError('Failed to read a meta data page from the SAS file.') done = self._process_page_meta() def _process_page_meta(self) -> bool: self._read_page_header() pt = const.page_meta_types + [const.page_amd_type, const.page_mix_type] if self._current_page_type in pt: self._process_page_metadata() is_data_page = self._current_page_type == const.page_data_type is_mix_page = self._current_page_type == const.page_mix_type return bool(is_data_page or is_mix_page or self._current_page_data_subheader_pointers != []) def _read_page_header(self) -> None: bit_offset = self._page_bit_offset tx = const.page_type_offset + bit_offset self._current_page_type = self._read_uint(tx, const.page_type_length) & const.page_type_mask2 tx = const.block_count_offset + bit_offset self._current_page_block_count = self._read_uint(tx, const.block_count_length) tx = const.subheader_count_offset + bit_offset self._current_page_subheaders_count = self._read_uint(tx, const.subheader_count_length) def _process_page_metadata(self) -> None: bit_offset = self._page_bit_offset for i in range(self._current_page_subheaders_count): offset = const.subheader_pointers_offset + bit_offset total_offset = offset + self._subheader_pointer_length * i subheader_offset = self._read_uint(total_offset, self._int_length) total_offset += self._int_length subheader_length = self._read_uint(total_offset, self._int_length) total_offset += self._int_length subheader_compression = self._read_uint(total_offset, 1) total_offset += 1 subheader_type = self._read_uint(total_offset, 1) if subheader_length == 0 or subheader_compression == const.truncated_subheader_id: continue subheader_signature = self._read_bytes(subheader_offset, self._int_length) subheader_index = get_subheader_index(subheader_signature) subheader_processor = self._subheader_processors[subheader_index] if subheader_processor is None: f1 = subheader_compression in (const.compressed_subheader_id, 0) f2 = subheader_type == const.compressed_subheader_type if self.compression and f1 and f2: self._current_page_data_subheader_pointers.append((subheader_offset, subheader_length)) else: self.close() raise ValueError(f'Unknown subheader signature {subheader_signature}') else: subheader_processor(subheader_offset, subheader_length) def _process_rowsize_subheader(self, offset: int, length: int) -> None: int_len = self._int_length lcs_offset = offset lcp_offset = offset if self.U64: lcs_offset += 682 lcp_offset += 706 else: lcs_offset += 354 lcp_offset += 378 self.row_length = self._read_uint(offset + const.row_length_offset_multiplier * int_len, int_len) self.row_count = self._read_uint(offset + const.row_count_offset_multiplier * int_len, int_len) self.col_count_p1 = self._read_uint(offset + const.col_count_p1_multiplier * int_len, int_len) self.col_count_p2 = self._read_uint(offset + const.col_count_p2_multiplier * int_len, int_len) mx = const.row_count_on_mix_page_offset_multiplier * int_len self._mix_page_row_count = self._read_uint(offset + mx, int_len) self._lcs = self._read_uint(lcs_offset, 2) self._lcp = self._read_uint(lcp_offset, 2) def _process_columnsize_subheader(self, offset: int, length: int) -> None: int_len = self._int_length offset += int_len self.column_count = self._read_uint(offset, int_len) if self.col_count_p1 + self.col_count_p2 != self.column_count: print(f'Warning: column count mismatch ({self.col_count_p1} + {self.col_count_p2} != {self.column_count})\n') def _process_subheader_counts(self, offset: int, length: int) -> None: pass def _process_columntext_subheader(self, offset: int, length: int) -> None: offset += self._int_length text_block_size = self._read_uint(offset, const.text_block_size_length) buf = self._read_bytes(offset, text_block_size) cname_raw = buf[0:text_block_size].rstrip(b'\x00 ') self.column_names_raw.append(cname_raw) if len(self.column_names_raw) == 1: compression_literal = b'' for cl in const.compression_literals: if cl in cname_raw: compression_literal = cl self.compression = compression_literal offset -= self._int_length offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) compression_literal = buf.rstrip(b'\x00') if compression_literal == b'': self._lcs = 0 offset1 = offset + 32 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif compression_literal == const.rle_compression: offset1 = offset + 40 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcp) self.creator_proc = buf[0:self._lcp] elif self._lcs > 0: self._lcp = 0 offset1 = offset + 16 if self.U64: offset1 += 4 buf = self._read_bytes(offset1, self._lcs) self.creator_proc = buf[0:self._lcp] if hasattr(self, 'creator_proc'): self.creator_proc = self._convert_header_text(self.creator_proc) def _process_columnname_subheader(self, offset: int, length: int) -> None: int_len = self._int_length offset += int_len column_name_pointers_count = (length - 2 * int_len - 12) // 8 for i in range(column_name_pointers_count): text_subheader = offset + const.column_name_pointer_length * (i + 1) + const.column_name_text_subheader_offset col_name_offset = offset + const.column_name_pointer_length * (i + 1) + const.column_name_offset_offset col_name_length = offset + const.column_name_pointer_length * (i + 1) + const.column_name_length_offset idx = self._read_uint(text_subheader, const.column_name_text_subheader_length) col_offset = self._read_uint(col_name_offset, const.column_name_offset_length) col_len = self._read_uint(col_name_length, const.column_name_length_length) name_raw = self.column_names_raw[idx] cname = name_raw[col_offset:col_offset + col_len] self.column_names.append(self._convert_header_text(cname)) def _process_columnattributes_subheader(self, offset: int, length: int) -> None: int_len = self._int_length column_attributes_vectors_count = (length - 2 * int_len - 12) // (int_len + 8) for i in range(column_attributes_vectors_count): col_data_offset = offset + int_len + const.column_data_offset_offset + i * (int_len + 8) col_data_len = offset + 2 * int_len + const.column_data_length_offset + i * (int_len + 8) col_types = offset + 2 * int_len + const.column_type_offset + i * (int_len + 8) x = self._read_uint(col_data_offset, int_len) self._column_data_offsets.append(x) x = self._read_uint(col_data_len, const.column_data_length_length) self._column_data_lengths.append(x) x = self._read_uint(col_types, const.column_type_length) self._column_types.append(b'd' if x == 1 else b's') def _process_columnlist_subheader(self, offset: int, length: int) -> None: pass def _process_format_subheader(self, offset: int, length: int) -> None: int_len = self._int_length text_subheader_format = offset + const.column_format_text_subheader_index_offset + 3 * int_len col_format_offset = offset + const.column_format_offset_offset + 3 * int_len col_format_len = offset + const.column_format_length_offset + 3 * int_len text_subheader_label = offset + const.column_label_text_subheader_index_offset + 3 * int_len col_label_offset = offset + const.column_label_offset_offset + 3 * int_len col_label_len = offset + const.column_label_length_offset + 3 * int_len x = self._read_uint(text_subheader_format, const.column_format_text_subheader_index_length) format_idx = min(x, len(self.column_names_raw) - 1) format_start = self._read_uint(col_format_offset, const.column_format_offset_length) format_len = self._read_uint(col_format_len, const.column_format_length_length) label_idx = self._read_uint(text_subheader_label, const.column_label_text_subheader_index_length) label_idx = min(label_idx, len(self.column_names_raw) - 1) label_start = self._read_uint(col_label_offset, const.column_label_offset_length) label_len = self._read_uint(col_label_len, const.column_label_length_length) label_names = self.column_names_raw[label_idx] column_label = self._convert_header_text(label_names[label_start:label_start + label_len]) format_names = self.column_names_raw[format_idx] column_format = self._convert_header_text(format_names[format_start:format_start + format_len]) current_column_number = len(self.columns) col = _Column(current_column_number, self.column_names[current_column_number], column_label, column_format, self._column_types[current_column_number], self._column_data_lengths[current_column_number]) self.column_formats.append(column_format) self.columns.append(col) def read(self, nrows: int | None=None) -> DataFrame: if nrows is None and self.chunksize is not None: nrows = self.chunksize elif nrows is None: nrows = self.row_count if len(self._column_types) == 0: self.close() raise EmptyDataError('No columns to parse from file') if nrows > 0 and self._current_row_in_file_index >= self.row_count: return DataFrame() nrows = min(nrows, self.row_count - self._current_row_in_file_index) nd = self._column_types.count(b'd') ns = self._column_types.count(b's') self._string_chunk = np.empty((ns, nrows), dtype=object) self._byte_chunk = np.zeros((nd, 8 * nrows), dtype=np.uint8) self._current_row_in_chunk_index = 0 p = Parser(self) p.read(nrows) rslt = self._chunk_to_dataframe() if self.index is not None: rslt = rslt.set_index(self.index) return rslt def _read_next_page(self): self._current_page_data_subheader_pointers = [] self._cached_page = self._path_or_buf.read(self._page_length) if len(self._cached_page) <= 0: return True elif len(self._cached_page) != self._page_length: self.close() msg = f'failed to read complete page from file (read {len(self._cached_page):d} of {self._page_length:d} bytes)' raise ValueError(msg) self._read_page_header() if self._current_page_type in const.page_meta_types: self._process_page_metadata() if self._current_page_type not in const.page_meta_types + [const.page_data_type, const.page_mix_type]: return self._read_next_page() return False def _chunk_to_dataframe(self) -> DataFrame: n = self._current_row_in_chunk_index m = self._current_row_in_file_index ix = range(m - n, m) rslt = {} (js, jb) = (0, 0) for j in range(self.column_count): name = self.column_names[j] if self._column_types[j] == b'd': col_arr = self._byte_chunk[jb, :].view(dtype=self.byte_order + 'd') rslt[name] = pd.Series(col_arr, dtype=np.float64, index=ix, copy=False) if self.convert_dates: if self.column_formats[j] in const.sas_date_formats: rslt[name] = _convert_datetimes(rslt[name], 'd') elif self.column_formats[j] in const.sas_datetime_formats: rslt[name] = _convert_datetimes(rslt[name], 's') jb += 1 elif self._column_types[j] == b's': rslt[name] = pd.Series(self._string_chunk[js, :], index=ix, copy=False) if self.convert_text and self.encoding is not None: rslt[name] = self._decode_string(rslt[name].str) js += 1 else: self.close() raise ValueError(f'unknown column type {self._column_types[j]!r}') df = DataFrame(rslt, columns=self.column_names, index=ix, copy=False) return df def _decode_string(self, b): return b.decode(self.encoding or self.default_encoding) def _convert_header_text(self, b: bytes) -> str | bytes: if self.convert_header_text: return self._decode_string(b) else: return b # File: pandas-main/pandas/io/sas/sas_constants.py from __future__ import annotations from typing import Final magic: Final = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\xea\x81`\xb3\x14\x11\xcf\xbd\x92\x08\x00\t\xc71\x8c\x18\x1f\x10\x11' align_1_checker_value: Final = b'3' align_1_offset: Final = 32 align_1_length: Final = 1 align_1_value: Final = 4 u64_byte_checker_value: Final = b'3' align_2_offset: Final = 35 align_2_length: Final = 1 align_2_value: Final = 4 endianness_offset: Final = 37 endianness_length: Final = 1 platform_offset: Final = 39 platform_length: Final = 1 encoding_offset: Final = 70 encoding_length: Final = 1 dataset_offset: Final = 92 dataset_length: Final = 64 file_type_offset: Final = 156 file_type_length: Final = 8 date_created_offset: Final = 164 date_created_length: Final = 8 date_modified_offset: Final = 172 date_modified_length: Final = 8 header_size_offset: Final = 196 header_size_length: Final = 4 page_size_offset: Final = 200 page_size_length: Final = 4 page_count_offset: Final = 204 page_count_length: Final = 4 sas_release_offset: Final = 216 sas_release_length: Final = 8 sas_server_type_offset: Final = 224 sas_server_type_length: Final = 16 os_version_number_offset: Final = 240 os_version_number_length: Final = 16 os_maker_offset: Final = 256 os_maker_length: Final = 16 os_name_offset: Final = 272 os_name_length: Final = 16 page_bit_offset_x86: Final = 16 page_bit_offset_x64: Final = 32 subheader_pointer_length_x86: Final = 12 subheader_pointer_length_x64: Final = 24 page_type_offset: Final = 0 page_type_length: Final = 2 block_count_offset: Final = 2 block_count_length: Final = 2 subheader_count_offset: Final = 4 subheader_count_length: Final = 2 page_type_mask: Final = 3840 page_type_mask2: Final = 61440 | page_type_mask page_meta_type: Final = 0 page_data_type: Final = 256 page_mix_type: Final = 512 page_amd_type: Final = 1024 page_meta2_type: Final = 16384 page_comp_type: Final = 36864 page_meta_types: Final = [page_meta_type, page_meta2_type] subheader_pointers_offset: Final = 8 truncated_subheader_id: Final = 1 compressed_subheader_id: Final = 4 compressed_subheader_type: Final = 1 text_block_size_length: Final = 2 row_length_offset_multiplier: Final = 5 row_count_offset_multiplier: Final = 6 col_count_p1_multiplier: Final = 9 col_count_p2_multiplier: Final = 10 row_count_on_mix_page_offset_multiplier: Final = 15 column_name_pointer_length: Final = 8 column_name_text_subheader_offset: Final = 0 column_name_text_subheader_length: Final = 2 column_name_offset_offset: Final = 2 column_name_offset_length: Final = 2 column_name_length_offset: Final = 4 column_name_length_length: Final = 2 column_data_offset_offset: Final = 8 column_data_length_offset: Final = 8 column_data_length_length: Final = 4 column_type_offset: Final = 14 column_type_length: Final = 1 column_format_text_subheader_index_offset: Final = 22 column_format_text_subheader_index_length: Final = 2 column_format_offset_offset: Final = 24 column_format_offset_length: Final = 2 column_format_length_offset: Final = 26 column_format_length_length: Final = 2 column_label_text_subheader_index_offset: Final = 28 column_label_text_subheader_index_length: Final = 2 column_label_offset_offset: Final = 30 column_label_offset_length: Final = 2 column_label_length_offset: Final = 32 column_label_length_length: Final = 2 rle_compression: Final = b'SASYZCRL' rdc_compression: Final = b'SASYZCR2' compression_literals: Final = [rle_compression, rdc_compression] encoding_names: Final = {20: 'utf-8', 29: 'latin1', 30: 'latin2', 31: 'latin3', 32: 'latin4', 33: 'cyrillic', 34: 'arabic', 35: 'greek', 36: 'hebrew', 37: 'latin5', 38: 'latin6', 39: 'cp874', 40: 'latin9', 41: 'cp437', 42: 'cp850', 43: 'cp852', 44: 'cp857', 45: 'cp858', 46: 'cp862', 47: 'cp864', 48: 'cp865', 49: 'cp866', 50: 'cp869', 51: 'cp874', 55: 'cp720', 56: 'cp737', 57: 'cp775', 58: 'cp860', 59: 'cp863', 60: 'cp1250', 61: 'cp1251', 62: 'cp1252', 63: 'cp1253', 64: 'cp1254', 65: 'cp1255', 66: 'cp1256', 67: 'cp1257', 68: 'cp1258', 118: 'cp950', 123: 'big5', 125: 'gb2312', 126: 'cp936', 134: 'euc_jp', 136: 'cp932', 138: 'shift_jis', 140: 'euc-kr', 141: 'cp949', 227: 'latin8'} class SASIndex: row_size_index: Final = 0 column_size_index: Final = 1 subheader_counts_index: Final = 2 column_text_index: Final = 3 column_name_index: Final = 4 column_attributes_index: Final = 5 format_and_label_index: Final = 6 column_list_index: Final = 7 data_subheader_index: Final = 8 subheader_signature_to_index: Final = {b'\xf7\xf7\xf7\xf7': SASIndex.row_size_index, b'\x00\x00\x00\x00\xf7\xf7\xf7\xf7': SASIndex.row_size_index, b'\xf7\xf7\xf7\xf7\x00\x00\x00\x00': SASIndex.row_size_index, b'\xf7\xf7\xf7\xf7\xff\xff\xfb\xfe': SASIndex.row_size_index, b'\xf6\xf6\xf6\xf6': SASIndex.column_size_index, b'\x00\x00\x00\x00\xf6\xf6\xf6\xf6': SASIndex.column_size_index, b'\xf6\xf6\xf6\xf6\x00\x00\x00\x00': SASIndex.column_size_index, b'\xf6\xf6\xf6\xf6\xff\xff\xfb\xfe': SASIndex.column_size_index, b'\x00\xfc\xff\xff': SASIndex.subheader_counts_index, b'\xff\xff\xfc\x00': SASIndex.subheader_counts_index, b'\x00\xfc\xff\xff\xff\xff\xff\xff': SASIndex.subheader_counts_index, b'\xff\xff\xff\xff\xff\xff\xfc\x00': SASIndex.subheader_counts_index, b'\xfd\xff\xff\xff': SASIndex.column_text_index, b'\xff\xff\xff\xfd': SASIndex.column_text_index, b'\xfd\xff\xff\xff\xff\xff\xff\xff': SASIndex.column_text_index, b'\xff\xff\xff\xff\xff\xff\xff\xfd': SASIndex.column_text_index, b'\xff\xff\xff\xff': SASIndex.column_name_index, b'\xff\xff\xff\xff\xff\xff\xff\xff': SASIndex.column_name_index, b'\xfc\xff\xff\xff': SASIndex.column_attributes_index, b'\xff\xff\xff\xfc': SASIndex.column_attributes_index, b'\xfc\xff\xff\xff\xff\xff\xff\xff': SASIndex.column_attributes_index, b'\xff\xff\xff\xff\xff\xff\xff\xfc': SASIndex.column_attributes_index, b'\xfe\xfb\xff\xff': SASIndex.format_and_label_index, b'\xff\xff\xfb\xfe': SASIndex.format_and_label_index, b'\xfe\xfb\xff\xff\xff\xff\xff\xff': SASIndex.format_and_label_index, b'\xff\xff\xff\xff\xff\xff\xfb\xfe': SASIndex.format_and_label_index, b'\xfe\xff\xff\xff': SASIndex.column_list_index, b'\xff\xff\xff\xfe': SASIndex.column_list_index, b'\xfe\xff\xff\xff\xff\xff\xff\xff': SASIndex.column_list_index, b'\xff\xff\xff\xff\xff\xff\xff\xfe': SASIndex.column_list_index} sas_date_formats: Final = ('DATE', 'DAY', 'DDMMYY', 'DOWNAME', 'JULDAY', 'JULIAN', 'MMDDYY', 'MMYY', 'MMYYC', 'MMYYD', 'MMYYP', 'MMYYS', 'MMYYN', 'MONNAME', 'MONTH', 'MONYY', 'QTR', 'QTRR', 'NENGO', 'WEEKDATE', 'WEEKDATX', 'WEEKDAY', 'WEEKV', 'WORDDATE', 'WORDDATX', 'YEAR', 'YYMM', 'YYMMC', 'YYMMD', 'YYMMP', 'YYMMS', 'YYMMN', 'YYMON', 'YYMMDD', 'YYQ', 'YYQC', 'YYQD', 'YYQP', 'YYQS', 'YYQN', 'YYQR', 'YYQRC', 'YYQRD', 'YYQRP', 'YYQRS', 'YYQRN', 'YYMMDDP', 'YYMMDDC', 'E8601DA', 'YYMMDDN', 'MMDDYYC', 'MMDDYYS', 'MMDDYYD', 'YYMMDDS', 'B8601DA', 'DDMMYYN', 'YYMMDDD', 'DDMMYYB', 'DDMMYYP', 'MMDDYYP', 'YYMMDDB', 'MMDDYYN', 'DDMMYYC', 'DDMMYYD', 'DDMMYYS', 'MINGUO') sas_datetime_formats: Final = ('DATETIME', 'DTWKDATX', 'B8601DN', 'B8601DT', 'B8601DX', 'B8601DZ', 'B8601LX', 'E8601DN', 'E8601DT', 'E8601DX', 'E8601DZ', 'E8601LX', 'DATEAMPM', 'DTDATE', 'DTMONYY', 'DTMONYY', 'DTWKDATX', 'DTYEAR', 'TOD', 'MDYAMPM') # File: pandas-main/pandas/io/sas/sas_xport.py """""" from __future__ import annotations from datetime import datetime import struct from typing import TYPE_CHECKING import warnings import numpy as np from pandas.util._decorators import Appender from pandas.util._exceptions import find_stack_level import pandas as pd from pandas.io.common import get_handle from pandas.io.sas.sasreader import SASReader if TYPE_CHECKING: from pandas._typing import CompressionOptions, DatetimeNaTType, FilePath, ReadBuffer _correct_line1 = 'HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!000000000000000000000000000000 ' _correct_header1 = 'HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!000000000000000001600000000' _correct_header2 = 'HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!000000000000000000000000000000 ' _correct_obs_header = 'HEADER RECORD*******OBS HEADER RECORD!!!!!!!000000000000000000000000000000 ' _fieldkeys = ['ntype', 'nhfun', 'field_length', 'nvar0', 'name', 'label', 'nform', 'nfl', 'num_decimals', 'nfj', 'nfill', 'niform', 'nifl', 'nifd', 'npos', '_'] _base_params_doc = 'Parameters\n----------\nfilepath_or_buffer : str or file-like object\n Path to SAS file or object implementing binary read method.' _params2_doc = 'index : identifier of index column\n Identifier of column that should be used as index of the DataFrame.\nencoding : str\n Encoding for text data.\nchunksize : int\n Read file `chunksize` lines at a time, returns iterator.' _format_params_doc = 'format : str\n File format, only `xport` is currently supported.' _iterator_doc = 'iterator : bool, default False\n Return XportReader object for reading file incrementally.' _read_sas_doc = f"Read a SAS file into a DataFrame.\n\n{_base_params_doc}\n{_format_params_doc}\n{_params2_doc}\n{_iterator_doc}\n\nReturns\n-------\nDataFrame or XportReader\n\nExamples\n--------\nRead a SAS Xport file:\n\n>>> df = pd.read_sas('filename.XPT')\n\nRead a Xport file in 10,000 line chunks:\n\n>>> itr = pd.read_sas('filename.XPT', chunksize=10000)\n>>> for chunk in itr:\n>>> do_something(chunk)\n\n" _xport_reader_doc = f'Class for reading SAS Xport files.\n\n{_base_params_doc}\n{_params2_doc}\n\nAttributes\n----------\nmember_info : list\n Contains information about the file\nfields : list\n Contains information about the variables in the file\n' _read_method_doc = 'Read observations from SAS Xport file, returning as data frame.\n\nParameters\n----------\nnrows : int\n Number of rows to read from data file; if None, read whole\n file.\n\nReturns\n-------\nA DataFrame.\n' def _parse_date(datestr: str) -> DatetimeNaTType: try: return datetime.strptime(datestr, '%d%b%y:%H:%M:%S') except ValueError: return pd.NaT def _split_line(s: str, parts): out = {} start = 0 for (name, length) in parts: out[name] = s[start:start + length].strip() start += length del out['_'] return out def _handle_truncated_float_vec(vec, nbytes): if nbytes != 8: vec1 = np.zeros(len(vec), np.dtype('S8')) dtype = np.dtype(f'S{nbytes},S{8 - nbytes}') vec2 = vec1.view(dtype=dtype) vec2['f0'] = vec return vec2 return vec def _parse_float_vec(vec): dtype = np.dtype('>u4,>u4') vec1 = vec.view(dtype=dtype) xport1 = vec1['f0'] xport2 = vec1['f1'] ieee1 = xport1 & 16777215 shift = np.zeros(len(vec), dtype=np.uint8) shift[np.where(xport1 & 2097152)] = 1 shift[np.where(xport1 & 4194304)] = 2 shift[np.where(xport1 & 8388608)] = 3 ieee1 >>= shift ieee2 = xport2 >> shift | (xport1 & 7) << 29 + (3 - shift) ieee1 &= 4293918719 ieee1 |= ((xport1 >> 24 & 127) - 65 << 2) + shift + 1023 << 20 | xport1 & 2147483648 ieee = np.empty((len(ieee1),), dtype='>u4,>u4') ieee['f0'] = ieee1 ieee['f1'] = ieee2 ieee = ieee.view(dtype='>f8') ieee = ieee.astype('f8') return ieee class XportReader(SASReader): __doc__ = _xport_reader_doc def __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], index=None, encoding: str | None='ISO-8859-1', chunksize: int | None=None, compression: CompressionOptions='infer') -> None: self._encoding = encoding self._lines_read = 0 self._index = index self._chunksize = chunksize self.handles = get_handle(filepath_or_buffer, 'rb', encoding=encoding, is_text=False, compression=compression) self.filepath_or_buffer = self.handles.handle try: self._read_header() except Exception: self.close() raise def close(self) -> None: self.handles.close() def _get_row(self): return self.filepath_or_buffer.read(80).decode() def _read_header(self) -> None: self.filepath_or_buffer.seek(0) line1 = self._get_row() if line1 != _correct_line1: if '**COMPRESSED**' in line1: raise ValueError('Header record indicates a CPORT file, which is not readable.') raise ValueError('Header record is not an XPORT file.') line2 = self._get_row() fif = [['prefix', 24], ['version', 8], ['OS', 8], ['_', 24], ['created', 16]] file_info = _split_line(line2, fif) if file_info['prefix'] != 'SAS SAS SASLIB': raise ValueError('Header record has invalid prefix.') file_info['created'] = _parse_date(file_info['created']) self.file_info = file_info line3 = self._get_row() file_info['modified'] = _parse_date(line3[:16]) header1 = self._get_row() header2 = self._get_row() headflag1 = header1.startswith(_correct_header1) headflag2 = header2 == _correct_header2 if not (headflag1 and headflag2): raise ValueError('Member header not found') fieldnamelength = int(header1[-5:-2]) mem = [['prefix', 8], ['set_name', 8], ['sasdata', 8], ['version', 8], ['OS', 8], ['_', 24], ['created', 16]] member_info = _split_line(self._get_row(), mem) mem = [['modified', 16], ['_', 16], ['label', 40], ['type', 8]] member_info.update(_split_line(self._get_row(), mem)) member_info['modified'] = _parse_date(member_info['modified']) member_info['created'] = _parse_date(member_info['created']) self.member_info = member_info types = {1: 'numeric', 2: 'char'} fieldcount = int(self._get_row()[54:58]) datalength = fieldnamelength * fieldcount if datalength % 80: datalength += 80 - datalength % 80 fielddata = self.filepath_or_buffer.read(datalength) fields = [] obs_length = 0 while len(fielddata) >= fieldnamelength: (fieldbytes, fielddata) = (fielddata[:fieldnamelength], fielddata[fieldnamelength:]) fieldbytes = fieldbytes.ljust(140) fieldstruct = struct.unpack('>hhhh8s40s8shhh2s8shhl52s', fieldbytes) field = dict(zip(_fieldkeys, fieldstruct)) del field['_'] field['ntype'] = types[field['ntype']] fl = field['field_length'] if field['ntype'] == 'numeric' and (fl < 2 or fl > 8): msg = f'Floating field width {fl} is not between 2 and 8.' raise TypeError(msg) for (k, v) in field.items(): try: field[k] = v.strip() except AttributeError: pass obs_length += field['field_length'] fields += [field] header = self._get_row() if not header == _correct_obs_header: raise ValueError('Observation header not found.') self.fields = fields self.record_length = obs_length self.record_start = self.filepath_or_buffer.tell() self.nobs = self._record_count() self.columns = [x['name'].decode() for x in self.fields] dtypel = [('s' + str(i), 'S' + str(field['field_length'])) for (i, field) in enumerate(self.fields)] dtype = np.dtype(dtypel) self._dtype = dtype def __next__(self) -> pd.DataFrame: return self.read(nrows=self._chunksize or 1) def _record_count(self) -> int: self.filepath_or_buffer.seek(0, 2) total_records_length = self.filepath_or_buffer.tell() - self.record_start if total_records_length % 80 != 0: warnings.warn('xport file may be corrupted.', stacklevel=find_stack_level()) if self.record_length > 80: self.filepath_or_buffer.seek(self.record_start) return total_records_length // self.record_length self.filepath_or_buffer.seek(-80, 2) last_card_bytes = self.filepath_or_buffer.read(80) last_card = np.frombuffer(last_card_bytes, dtype=np.uint64) ix = np.flatnonzero(last_card == 2314885530818453536) if len(ix) == 0: tail_pad = 0 else: tail_pad = 8 * len(ix) self.filepath_or_buffer.seek(self.record_start) return (total_records_length - tail_pad) // self.record_length def get_chunk(self, size: int | None=None) -> pd.DataFrame: if size is None: size = self._chunksize return self.read(nrows=size) def _missing_double(self, vec): v = vec.view(dtype='u1,u1,u2,u4') miss = (v['f1'] == 0) & (v['f2'] == 0) & (v['f3'] == 0) miss1 = (v['f0'] >= 65) & (v['f0'] <= 90) | (v['f0'] == 95) | (v['f0'] == 46) miss &= miss1 return miss @Appender(_read_method_doc) def read(self, nrows: int | None=None) -> pd.DataFrame: if nrows is None: nrows = self.nobs read_lines = min(nrows, self.nobs - self._lines_read) read_len = read_lines * self.record_length if read_len <= 0: self.close() raise StopIteration raw = self.filepath_or_buffer.read(read_len) data = np.frombuffer(raw, dtype=self._dtype, count=read_lines) df_data = {} for (j, x) in enumerate(self.columns): vec = data['s' + str(j)] ntype = self.fields[j]['ntype'] if ntype == 'numeric': vec = _handle_truncated_float_vec(vec, self.fields[j]['field_length']) miss = self._missing_double(vec) v = _parse_float_vec(vec) v[miss] = np.nan elif self.fields[j]['ntype'] == 'char': v = [y.rstrip() for y in vec] if self._encoding is not None: v = [y.decode(self._encoding) for y in v] df_data.update({x: v}) df = pd.DataFrame(df_data) if self._index is None: df.index = pd.Index(range(self._lines_read, self._lines_read + read_lines)) else: df = df.set_index(self._index) self._lines_read += read_lines return df # File: pandas-main/pandas/io/sas/sasreader.py """""" from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Iterator from typing import TYPE_CHECKING, overload from pandas.util._decorators import doc from pandas.core.shared_docs import _shared_docs from pandas.io.common import stringify_path if TYPE_CHECKING: from collections.abc import Hashable from types import TracebackType from pandas._typing import CompressionOptions, FilePath, ReadBuffer, Self from pandas import DataFrame class SASReader(Iterator['DataFrame'], ABC): @abstractmethod def read(self, nrows: int | None=None) -> DataFrame: ... @abstractmethod def close(self) -> None: ... def __enter__(self) -> Self: return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: self.close() @overload def read_sas(filepath_or_buffer: FilePath | ReadBuffer[bytes], *, format: str | None=..., index: Hashable | None=..., encoding: str | None=..., chunksize: int=..., iterator: bool=..., compression: CompressionOptions=...) -> SASReader: ... @overload def read_sas(filepath_or_buffer: FilePath | ReadBuffer[bytes], *, format: str | None=..., index: Hashable | None=..., encoding: str | None=..., chunksize: None=..., iterator: bool=..., compression: CompressionOptions=...) -> DataFrame | SASReader: ... @doc(decompression_options=_shared_docs['decompression_options'] % 'filepath_or_buffer') def read_sas(filepath_or_buffer: FilePath | ReadBuffer[bytes], *, format: str | None=None, index: Hashable | None=None, encoding: str | None=None, chunksize: int | None=None, iterator: bool=False, compression: CompressionOptions='infer') -> DataFrame | SASReader: if format is None: buffer_error_msg = 'If this is a buffer object rather than a string name, you must specify a format string' filepath_or_buffer = stringify_path(filepath_or_buffer) if not isinstance(filepath_or_buffer, str): raise ValueError(buffer_error_msg) fname = filepath_or_buffer.lower() if '.xpt' in fname: format = 'xport' elif '.sas7bdat' in fname: format = 'sas7bdat' else: raise ValueError(f'unable to infer format of SAS file from filename: {fname!r}') reader: SASReader if format.lower() == 'xport': from pandas.io.sas.sas_xport import XportReader reader = XportReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize, compression=compression) elif format.lower() == 'sas7bdat': from pandas.io.sas.sas7bdat import SAS7BDATReader reader = SAS7BDATReader(filepath_or_buffer, index=index, encoding=encoding, chunksize=chunksize, compression=compression) else: raise ValueError('unknown SAS format') if iterator or chunksize: return reader with reader: return reader.read() # File: pandas-main/pandas/io/spss.py from __future__ import annotations from typing import TYPE_CHECKING, Any from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.inference import is_list_like from pandas.io.common import stringify_path if TYPE_CHECKING: from collections.abc import Sequence from pathlib import Path from pandas._typing import DtypeBackend from pandas import DataFrame def read_spss(path: str | Path, usecols: Sequence[str] | None=None, convert_categoricals: bool=True, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, **kwargs: Any) -> DataFrame: pyreadstat = import_optional_dependency('pyreadstat') check_dtype_backend(dtype_backend) if usecols is not None: if not is_list_like(usecols): raise TypeError('usecols must be list-like.') usecols = list(usecols) (df, metadata) = pyreadstat.read_sav(stringify_path(path), usecols=usecols, apply_value_formats=convert_categoricals, **kwargs) df.attrs = metadata.__dict__ if dtype_backend is not lib.no_default: df = df.convert_dtypes(dtype_backend=dtype_backend) return df # File: pandas-main/pandas/io/sql.py """""" from __future__ import annotations from abc import ABC, abstractmethod from contextlib import ExitStack, contextmanager from datetime import date, datetime, time from functools import partial import re from typing import TYPE_CHECKING, Any, Literal, cast, overload import warnings import numpy as np from pandas._config import using_string_dtype from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError, DatabaseError from pandas.util._exceptions import find_stack_level from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import is_dict_like, is_list_like from pandas.core.dtypes.dtypes import ArrowDtype, DatetimeTZDtype from pandas.core.dtypes.missing import isna from pandas import get_option from pandas.core.api import DataFrame, Series from pandas.core.arrays import ArrowExtensionArray from pandas.core.base import PandasObject import pandas.core.common as com from pandas.core.common import maybe_make_list from pandas.core.internals.construction import convert_object_array from pandas.core.tools.datetimes import to_datetime if TYPE_CHECKING: from collections.abc import Callable, Generator, Iterator, Mapping from sqlalchemy import Table from sqlalchemy.sql.expression import Select, TextClause from pandas._typing import DtypeArg, DtypeBackend, IndexLabel, Self from pandas import Index def _process_parse_dates_argument(parse_dates): if parse_dates is True or parse_dates is None or parse_dates is False: parse_dates = [] elif not hasattr(parse_dates, '__iter__'): parse_dates = [parse_dates] return parse_dates def _handle_date_column(col, utc: bool=False, format: str | dict[str, Any] | None=None): if isinstance(format, dict): return to_datetime(col, **format) else: if format is None and (issubclass(col.dtype.type, np.floating) or issubclass(col.dtype.type, np.integer)): format = 's' if format in ['D', 'd', 'h', 'm', 's', 'ms', 'us', 'ns']: return to_datetime(col, errors='coerce', unit=format, utc=utc) elif isinstance(col.dtype, DatetimeTZDtype): return to_datetime(col, utc=True) else: return to_datetime(col, errors='coerce', format=format, utc=utc) def _parse_date_columns(data_frame: DataFrame, parse_dates) -> DataFrame: parse_dates = _process_parse_dates_argument(parse_dates) for (i, (col_name, df_col)) in enumerate(data_frame.items()): if isinstance(df_col.dtype, DatetimeTZDtype) or col_name in parse_dates: try: fmt = parse_dates[col_name] except (KeyError, TypeError): fmt = None data_frame.isetitem(i, _handle_date_column(df_col, format=fmt)) return data_frame def _convert_arrays_to_dataframe(data, columns, coerce_float: bool=True, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame: content = lib.to_object_array_tuples(data) idx_len = content.shape[0] arrays = convert_object_array(list(content.T), dtype=None, coerce_float=coerce_float, dtype_backend=dtype_backend) if dtype_backend == 'pyarrow': pa = import_optional_dependency('pyarrow') result_arrays = [] for arr in arrays: pa_array = pa.array(arr, from_pandas=True) if arr.dtype == 'string': pa_array = pa_array.cast(pa.string()) result_arrays.append(ArrowExtensionArray(pa_array)) arrays = result_arrays if arrays: return DataFrame._from_arrays(arrays, columns=columns, index=range(idx_len), verify_integrity=False) else: return DataFrame(columns=columns) def _wrap_result(data, columns, index_col=None, coerce_float: bool=True, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame: frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend) if dtype: frame = frame.astype(dtype) frame = _parse_date_columns(frame, parse_dates) if index_col is not None: frame = frame.set_index(index_col) return frame def _wrap_result_adbc(df: DataFrame, *, index_col=None, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame: if dtype: df = df.astype(dtype) df = _parse_date_columns(df, parse_dates) if index_col is not None: df = df.set_index(index_col) return df @overload def read_sql_table(table_name: str, con, schema=..., index_col: str | list[str] | None=..., coerce_float=..., parse_dates: list[str] | dict[str, str] | None=..., columns: list[str] | None=..., chunksize: None=..., dtype_backend: DtypeBackend | lib.NoDefault=...) -> DataFrame: ... @overload def read_sql_table(table_name: str, con, schema=..., index_col: str | list[str] | None=..., coerce_float=..., parse_dates: list[str] | dict[str, str] | None=..., columns: list[str] | None=..., chunksize: int=..., dtype_backend: DtypeBackend | lib.NoDefault=...) -> Iterator[DataFrame]: ... def read_sql_table(table_name: str, con, schema: str | None=None, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates: list[str] | dict[str, str] | None=None, columns: list[str] | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame | Iterator[DataFrame]: check_dtype_backend(dtype_backend) if dtype_backend is lib.no_default: dtype_backend = 'numpy' assert dtype_backend is not lib.no_default with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: if not pandas_sql.has_table(table_name): raise ValueError(f'Table {table_name} not found') table = pandas_sql.read_table(table_name, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend) if table is not None: return table else: raise ValueError(f'Table {table_name} not found', con) @overload def read_sql_query(sql, con, index_col: str | list[str] | None=..., coerce_float=..., params: list[Any] | Mapping[str, Any] | None=..., parse_dates: list[str] | dict[str, str] | None=..., chunksize: None=..., dtype: DtypeArg | None=..., dtype_backend: DtypeBackend | lib.NoDefault=...) -> DataFrame: ... @overload def read_sql_query(sql, con, index_col: str | list[str] | None=..., coerce_float=..., params: list[Any] | Mapping[str, Any] | None=..., parse_dates: list[str] | dict[str, str] | None=..., chunksize: int=..., dtype: DtypeArg | None=..., dtype_backend: DtypeBackend | lib.NoDefault=...) -> Iterator[DataFrame]: ... def read_sql_query(sql, con, index_col: str | list[str] | None=None, coerce_float: bool=True, params: list[Any] | Mapping[str, Any] | None=None, parse_dates: list[str] | dict[str, str] | None=None, chunksize: int | None=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame | Iterator[DataFrame]: check_dtype_backend(dtype_backend) if dtype_backend is lib.no_default: dtype_backend = 'numpy' assert dtype_backend is not lib.no_default with pandasSQL_builder(con) as pandas_sql: return pandas_sql.read_query(sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, dtype=dtype, dtype_backend=dtype_backend) @overload def read_sql(sql, con, index_col: str | list[str] | None=..., coerce_float=..., params=..., parse_dates=..., columns: list[str]=..., chunksize: None=..., dtype_backend: DtypeBackend | lib.NoDefault=..., dtype: DtypeArg | None=None) -> DataFrame: ... @overload def read_sql(sql, con, index_col: str | list[str] | None=..., coerce_float=..., params=..., parse_dates=..., columns: list[str]=..., chunksize: int=..., dtype_backend: DtypeBackend | lib.NoDefault=..., dtype: DtypeArg | None=None) -> Iterator[DataFrame]: ... def read_sql(sql, con, index_col: str | list[str] | None=None, coerce_float: bool=True, params=None, parse_dates=None, columns: list[str] | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, dtype: DtypeArg | None=None) -> DataFrame | Iterator[DataFrame]: check_dtype_backend(dtype_backend) if dtype_backend is lib.no_default: dtype_backend = 'numpy' assert dtype_backend is not lib.no_default with pandasSQL_builder(con) as pandas_sql: if isinstance(pandas_sql, SQLiteDatabase): return pandas_sql.read_query(sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, dtype_backend=dtype_backend, dtype=dtype) try: _is_table_name = pandas_sql.has_table(sql) except Exception: _is_table_name = False if _is_table_name: return pandas_sql.read_table(sql, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend) else: return pandas_sql.read_query(sql, index_col=index_col, params=params, coerce_float=coerce_float, parse_dates=parse_dates, chunksize=chunksize, dtype_backend=dtype_backend, dtype=dtype) def to_sql(frame, name: str, con, schema: str | None=None, if_exists: Literal['fail', 'replace', 'append']='fail', index: bool=True, index_label: IndexLabel | None=None, chunksize: int | None=None, dtype: DtypeArg | None=None, method: Literal['multi'] | Callable | None=None, engine: str='auto', **engine_kwargs) -> int | None: if if_exists not in ('fail', 'replace', 'append'): raise ValueError(f"'{if_exists}' is not valid for if_exists") if isinstance(frame, Series): frame = frame.to_frame() elif not isinstance(frame, DataFrame): raise NotImplementedError("'frame' argument should be either a Series or a DataFrame") with pandasSQL_builder(con, schema=schema, need_transaction=True) as pandas_sql: return pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, chunksize=chunksize, dtype=dtype, method=method, engine=engine, **engine_kwargs) def has_table(table_name: str, con, schema: str | None=None) -> bool: with pandasSQL_builder(con, schema=schema) as pandas_sql: return pandas_sql.has_table(table_name) table_exists = has_table def pandasSQL_builder(con, schema: str | None=None, need_transaction: bool=False) -> PandasSQL: import sqlite3 if isinstance(con, sqlite3.Connection) or con is None: return SQLiteDatabase(con) sqlalchemy = import_optional_dependency('sqlalchemy', errors='ignore') if isinstance(con, str) and sqlalchemy is None: raise ImportError('Using URI string without sqlalchemy installed.') if sqlalchemy is not None and isinstance(con, (str, sqlalchemy.engine.Connectable)): return SQLDatabase(con, schema, need_transaction) adbc = import_optional_dependency('adbc_driver_manager.dbapi', errors='ignore') if adbc and isinstance(con, adbc.Connection): return ADBCDatabase(con) warnings.warn('pandas only supports SQLAlchemy connectable (engine/connection) or database string URI or sqlite3 DBAPI2 connection. Other DBAPI2 objects are not tested. Please consider using SQLAlchemy.', UserWarning, stacklevel=find_stack_level()) return SQLiteDatabase(con) class SQLTable(PandasObject): def __init__(self, name: str, pandas_sql_engine, frame=None, index: bool | str | list[str] | None=True, if_exists: Literal['fail', 'replace', 'append']='fail', prefix: str='pandas', index_label=None, schema=None, keys=None, dtype: DtypeArg | None=None) -> None: self.name = name self.pd_sql = pandas_sql_engine self.prefix = prefix self.frame = frame self.index = self._index_name(index, index_label) self.schema = schema self.if_exists = if_exists self.keys = keys self.dtype = dtype if frame is not None: self.table = self._create_table_setup() else: self.table = self.pd_sql.get_table(self.name, self.schema) if self.table is None: raise ValueError(f"Could not init table '{name}'") if not len(self.name): raise ValueError('Empty table name specified') def exists(self): return self.pd_sql.has_table(self.name, self.schema) def sql_schema(self) -> str: from sqlalchemy.schema import CreateTable return str(CreateTable(self.table).compile(self.pd_sql.con)) def _execute_create(self) -> None: self.table = self.table.to_metadata(self.pd_sql.meta) with self.pd_sql.run_transaction(): self.table.create(bind=self.pd_sql.con) def create(self) -> None: if self.exists(): if self.if_exists == 'fail': raise ValueError(f"Table '{self.name}' already exists.") if self.if_exists == 'replace': self.pd_sql.drop_table(self.name, self.schema) self._execute_create() elif self.if_exists == 'append': pass else: raise ValueError(f"'{self.if_exists}' is not valid for if_exists") else: self._execute_create() def _execute_insert(self, conn, keys: list[str], data_iter) -> int: data = [dict(zip(keys, row)) for row in data_iter] result = conn.execute(self.table.insert(), data) return result.rowcount def _execute_insert_multi(self, conn, keys: list[str], data_iter) -> int: from sqlalchemy import insert data = [dict(zip(keys, row)) for row in data_iter] stmt = insert(self.table).values(data) result = conn.execute(stmt) return result.rowcount def insert_data(self) -> tuple[list[str], list[np.ndarray]]: if self.index is not None: temp = self.frame.copy(deep=False) temp.index.names = self.index try: temp.reset_index(inplace=True) except ValueError as err: raise ValueError(f'duplicate name in index/columns: {err}') from err else: temp = self.frame column_names = list(map(str, temp.columns)) ncols = len(column_names) data_list: list[np.ndarray] = [None] * ncols for (i, (_, ser)) in enumerate(temp.items()): if ser.dtype.kind == 'M': if isinstance(ser._values, ArrowExtensionArray): import pyarrow as pa if pa.types.is_date(ser.dtype.pyarrow_dtype): d = ser._values.to_numpy(dtype=object) else: d = ser.dt.to_pydatetime()._values else: d = ser._values.to_pydatetime() elif ser.dtype.kind == 'm': vals = ser._values if isinstance(vals, ArrowExtensionArray): vals = vals.to_numpy(dtype=np.dtype('m8[ns]')) d = vals.view('i8').astype(object) else: d = ser._values.astype(object) assert isinstance(d, np.ndarray), type(d) if ser._can_hold_na: mask = isna(d) d[mask] = None data_list[i] = d return (column_names, data_list) def insert(self, chunksize: int | None=None, method: Literal['multi'] | Callable | None=None) -> int | None: if method is None: exec_insert = self._execute_insert elif method == 'multi': exec_insert = self._execute_insert_multi elif callable(method): exec_insert = partial(method, self) else: raise ValueError(f'Invalid parameter `method`: {method}') (keys, data_list) = self.insert_data() nrows = len(self.frame) if nrows == 0: return 0 if chunksize is None: chunksize = nrows elif chunksize == 0: raise ValueError('chunksize argument should be non-zero') chunks = nrows // chunksize + 1 total_inserted = None with self.pd_sql.run_transaction() as conn: for i in range(chunks): start_i = i * chunksize end_i = min((i + 1) * chunksize, nrows) if start_i >= end_i: break chunk_iter = zip(*(arr[start_i:end_i] for arr in data_list)) num_inserted = exec_insert(conn, keys, chunk_iter) if num_inserted is not None: if total_inserted is None: total_inserted = num_inserted else: total_inserted += num_inserted return total_inserted def _query_iterator(self, result, exit_stack: ExitStack, chunksize: int | None, columns, coerce_float: bool=True, parse_dates=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> Generator[DataFrame, None, None]: has_read_data = False with exit_stack: while True: data = result.fetchmany(chunksize) if not data: if not has_read_data: yield DataFrame.from_records([], columns=columns, coerce_float=coerce_float) break has_read_data = True self.frame = _convert_arrays_to_dataframe(data, columns, coerce_float, dtype_backend) self._harmonize_columns(parse_dates=parse_dates, dtype_backend=dtype_backend) if self.index is not None: self.frame.set_index(self.index, inplace=True) yield self.frame def read(self, exit_stack: ExitStack, coerce_float: bool=True, parse_dates=None, columns=None, chunksize: int | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: from sqlalchemy import select if columns is not None and len(columns) > 0: cols = [self.table.c[n] for n in columns] if self.index is not None: for idx in self.index[::-1]: cols.insert(0, self.table.c[idx]) sql_select = select(*cols) else: sql_select = select(self.table) result = self.pd_sql.execute(sql_select) column_names = result.keys() if chunksize is not None: return self._query_iterator(result, exit_stack, chunksize, column_names, coerce_float=coerce_float, parse_dates=parse_dates, dtype_backend=dtype_backend) else: data = result.fetchall() self.frame = _convert_arrays_to_dataframe(data, column_names, coerce_float, dtype_backend) self._harmonize_columns(parse_dates=parse_dates, dtype_backend=dtype_backend) if self.index is not None: self.frame.set_index(self.index, inplace=True) return self.frame def _index_name(self, index, index_label): if index is True: nlevels = self.frame.index.nlevels if index_label is not None: if not isinstance(index_label, list): index_label = [index_label] if len(index_label) != nlevels: raise ValueError(f"Length of 'index_label' should match number of levels, which is {nlevels}") return index_label if nlevels == 1 and 'index' not in self.frame.columns and (self.frame.index.name is None): return ['index'] else: return com.fill_missing_names(self.frame.index.names) elif isinstance(index, str): return [index] elif isinstance(index, list): return index else: return None def _get_column_names_and_types(self, dtype_mapper): column_names_and_types = [] if self.index is not None: for (i, idx_label) in enumerate(self.index): idx_type = dtype_mapper(self.frame.index._get_level_values(i)) column_names_and_types.append((str(idx_label), idx_type, True)) column_names_and_types += [(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False) for i in range(len(self.frame.columns))] return column_names_and_types def _create_table_setup(self): from sqlalchemy import Column, PrimaryKeyConstraint, Table from sqlalchemy.schema import MetaData column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type) columns: list[Any] = [Column(name, typ, index=is_index) for (name, typ, is_index) in column_names_and_types] if self.keys is not None: if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk') columns.append(pkc) schema = self.schema or self.pd_sql.meta.schema meta = MetaData() return Table(self.name, meta, *columns, schema=schema) def _harmonize_columns(self, parse_dates=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> None: parse_dates = _process_parse_dates_argument(parse_dates) for sql_col in self.table.columns: col_name = sql_col.name try: df_col = self.frame[col_name] if col_name in parse_dates: try: fmt = parse_dates[col_name] except TypeError: fmt = None self.frame[col_name] = _handle_date_column(df_col, format=fmt) continue col_type = self._get_dtype(sql_col.type) if col_type is datetime or col_type is date or col_type is DatetimeTZDtype: utc = col_type is DatetimeTZDtype self.frame[col_name] = _handle_date_column(df_col, utc=utc) elif dtype_backend == 'numpy' and col_type is float: self.frame[col_name] = df_col.astype(col_type) elif dtype_backend == 'numpy' and len(df_col) == df_col.count(): if col_type is np.dtype('int64') or col_type is bool: self.frame[col_name] = df_col.astype(col_type) except KeyError: pass def _sqlalchemy_type(self, col: Index | Series): dtype: DtypeArg = self.dtype or {} if is_dict_like(dtype): dtype = cast(dict, dtype) if col.name in dtype: return dtype[col.name] col_type = lib.infer_dtype(col, skipna=True) from sqlalchemy.types import TIMESTAMP, BigInteger, Boolean, Date, DateTime, Float, Integer, SmallInteger, Text, Time if col_type in ('datetime64', 'datetime'): try: if col.dt.tz is not None: return TIMESTAMP(timezone=True) except AttributeError: if getattr(col, 'tz', None) is not None: return TIMESTAMP(timezone=True) return DateTime if col_type == 'timedelta64': warnings.warn("the 'timedelta' type is not supported, and will be written as integer values (ns frequency) to the database.", UserWarning, stacklevel=find_stack_level()) return BigInteger elif col_type == 'floating': if col.dtype == 'float32': return Float(precision=23) else: return Float(precision=53) elif col_type == 'integer': if col.dtype.name.lower() in ('int8', 'uint8', 'int16'): return SmallInteger elif col.dtype.name.lower() in ('uint16', 'int32'): return Integer elif col.dtype.name.lower() == 'uint64': raise ValueError('Unsigned 64 bit integer datatype is not supported') else: return BigInteger elif col_type == 'boolean': return Boolean elif col_type == 'date': return Date elif col_type == 'time': return Time elif col_type == 'complex': raise ValueError('Complex datatypes not supported') return Text def _get_dtype(self, sqltype): from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer if isinstance(sqltype, Float): return float elif isinstance(sqltype, Integer): return np.dtype('int64') elif isinstance(sqltype, TIMESTAMP): if not sqltype.timezone: return datetime return DatetimeTZDtype elif isinstance(sqltype, DateTime): return datetime elif isinstance(sqltype, Date): return date elif isinstance(sqltype, Boolean): return bool return object class PandasSQL(PandasObject, ABC): def __enter__(self) -> Self: return self def __exit__(self, *args) -> None: pass def read_table(self, table_name: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, columns=None, schema: str | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: raise NotImplementedError @abstractmethod def read_query(self, sql: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, params=None, chunksize: int | None=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: pass @abstractmethod def to_sql(self, frame, name: str, if_exists: Literal['fail', 'replace', 'append']='fail', index: bool=True, index_label=None, schema=None, chunksize: int | None=None, dtype: DtypeArg | None=None, method: Literal['multi'] | Callable | None=None, engine: str='auto', **engine_kwargs) -> int | None: pass @abstractmethod def execute(self, sql: str | Select | TextClause, params=None): pass @abstractmethod def has_table(self, name: str, schema: str | None=None) -> bool: pass @abstractmethod def _create_sql_schema(self, frame: DataFrame, table_name: str, keys: list[str] | None=None, dtype: DtypeArg | None=None, schema: str | None=None) -> str: pass class BaseEngine: def insert_records(self, table: SQLTable, con, frame, name: str, index: bool | str | list[str] | None=True, schema=None, chunksize: int | None=None, method=None, **engine_kwargs) -> int | None: raise AbstractMethodError(self) class SQLAlchemyEngine(BaseEngine): def __init__(self) -> None: import_optional_dependency('sqlalchemy', extra='sqlalchemy is required for SQL support.') def insert_records(self, table: SQLTable, con, frame, name: str, index: bool | str | list[str] | None=True, schema=None, chunksize: int | None=None, method=None, **engine_kwargs) -> int | None: from sqlalchemy import exc try: return table.insert(chunksize=chunksize, method=method) except exc.StatementError as err: msg = '(\\(1054, "Unknown column \'inf(e0)?\' in \'field list\'"\\))(?#\n )|inf can not be used with MySQL' err_text = str(err.orig) if re.search(msg, err_text): raise ValueError('inf cannot be used with MySQL') from err raise err def get_engine(engine: str) -> BaseEngine: if engine == 'auto': engine = get_option('io.sql.engine') if engine == 'auto': engine_classes = [SQLAlchemyEngine] error_msgs = '' for engine_class in engine_classes: try: return engine_class() except ImportError as err: error_msgs += '\n - ' + str(err) raise ImportError(f"Unable to find a usable engine; tried using: 'sqlalchemy'.\nA suitable version of sqlalchemy is required for sql I/O support.\nTrying to import the above resulted in these errors:{error_msgs}") if engine == 'sqlalchemy': return SQLAlchemyEngine() raise ValueError("engine must be one of 'auto', 'sqlalchemy'") class SQLDatabase(PandasSQL): def __init__(self, con, schema: str | None=None, need_transaction: bool=False) -> None: from sqlalchemy import create_engine from sqlalchemy.engine import Engine from sqlalchemy.schema import MetaData self.exit_stack = ExitStack() if isinstance(con, str): con = create_engine(con) self.exit_stack.callback(con.dispose) if isinstance(con, Engine): con = self.exit_stack.enter_context(con.connect()) if need_transaction and (not con.in_transaction()): self.exit_stack.enter_context(con.begin()) self.con = con self.meta = MetaData(schema=schema) self.returns_generator = False def __exit__(self, *args) -> None: if not self.returns_generator: self.exit_stack.close() @contextmanager def run_transaction(self): if not self.con.in_transaction(): with self.con.begin(): yield self.con else: yield self.con def execute(self, sql: str | Select | TextClause, params=None): args = [] if params is None else [params] if isinstance(sql, str): return self.con.exec_driver_sql(sql, *args) return self.con.execute(sql, *args) def read_table(self, table_name: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, columns=None, schema: str | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: self.meta.reflect(bind=self.con, only=[table_name], views=True) table = SQLTable(table_name, self, index=index_col, schema=schema) if chunksize is not None: self.returns_generator = True return table.read(self.exit_stack, coerce_float=coerce_float, parse_dates=parse_dates, columns=columns, chunksize=chunksize, dtype_backend=dtype_backend) @staticmethod def _query_iterator(result, exit_stack: ExitStack, chunksize: int, columns, index_col=None, coerce_float: bool=True, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> Generator[DataFrame, None, None]: has_read_data = False with exit_stack: while True: data = result.fetchmany(chunksize) if not data: if not has_read_data: yield _wrap_result([], columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend) break has_read_data = True yield _wrap_result(data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend) def read_query(self, sql: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, params=None, chunksize: int | None=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: result = self.execute(sql, params) columns = result.keys() if chunksize is not None: self.returns_generator = True return self._query_iterator(result, self.exit_stack, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend) else: data = result.fetchall() frame = _wrap_result(data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend) return frame read_sql = read_query def prep_table(self, frame, name: str, if_exists: Literal['fail', 'replace', 'append']='fail', index: bool | str | list[str] | None=True, index_label=None, schema=None, dtype: DtypeArg | None=None) -> SQLTable: if dtype: if not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} else: dtype = cast(dict, dtype) from sqlalchemy.types import TypeEngine for (col, my_type) in dtype.items(): if isinstance(my_type, type) and issubclass(my_type, TypeEngine): pass elif isinstance(my_type, TypeEngine): pass else: raise ValueError(f'The type of {col} is not a SQLAlchemy type') table = SQLTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, schema=schema, dtype=dtype) table.create() return table def check_case_sensitive(self, name: str, schema: str | None) -> None: if not name.isdigit() and (not name.islower()): from sqlalchemy import inspect as sqlalchemy_inspect insp = sqlalchemy_inspect(self.con) table_names = insp.get_table_names(schema=schema or self.meta.schema) if name not in table_names: msg = f"The provided table name '{name}' is not found exactly as such in the database after writing the table, possibly due to case sensitivity issues. Consider using lower case table names." warnings.warn(msg, UserWarning, stacklevel=find_stack_level()) def to_sql(self, frame, name: str, if_exists: Literal['fail', 'replace', 'append']='fail', index: bool=True, index_label=None, schema: str | None=None, chunksize: int | None=None, dtype: DtypeArg | None=None, method: Literal['multi'] | Callable | None=None, engine: str='auto', **engine_kwargs) -> int | None: sql_engine = get_engine(engine) table = self.prep_table(frame=frame, name=name, if_exists=if_exists, index=index, index_label=index_label, schema=schema, dtype=dtype) total_inserted = sql_engine.insert_records(table=table, con=self.con, frame=frame, name=name, index=index, schema=schema, chunksize=chunksize, method=method, **engine_kwargs) self.check_case_sensitive(name=name, schema=schema) return total_inserted @property def tables(self): return self.meta.tables def has_table(self, name: str, schema: str | None=None) -> bool: from sqlalchemy import inspect as sqlalchemy_inspect insp = sqlalchemy_inspect(self.con) return insp.has_table(name, schema or self.meta.schema) def get_table(self, table_name: str, schema: str | None=None) -> Table: from sqlalchemy import Numeric, Table schema = schema or self.meta.schema tbl = Table(table_name, self.meta, autoload_with=self.con, schema=schema) for column in tbl.columns: if isinstance(column.type, Numeric): column.type.asdecimal = False return tbl def drop_table(self, table_name: str, schema: str | None=None) -> None: schema = schema or self.meta.schema if self.has_table(table_name, schema): self.meta.reflect(bind=self.con, only=[table_name], schema=schema, views=True) with self.run_transaction(): self.get_table(table_name, schema).drop(bind=self.con) self.meta.clear() def _create_sql_schema(self, frame: DataFrame, table_name: str, keys: list[str] | None=None, dtype: DtypeArg | None=None, schema: str | None=None) -> str: table = SQLTable(table_name, self, frame=frame, index=False, keys=keys, dtype=dtype, schema=schema) return str(table.sql_schema()) class ADBCDatabase(PandasSQL): def __init__(self, con) -> None: self.con = con @contextmanager def run_transaction(self): with self.con.cursor() as cur: try: yield cur except Exception: self.con.rollback() raise self.con.commit() def execute(self, sql: str | Select | TextClause, params=None): if not isinstance(sql, str): raise TypeError('Query must be a string unless using sqlalchemy.') args = [] if params is None else [params] cur = self.con.cursor() try: cur.execute(sql, *args) return cur except Exception as exc: try: self.con.rollback() except Exception as inner_exc: ex = DatabaseError(f'Execution failed on sql: {sql}\n{exc}\nunable to rollback') raise ex from inner_exc ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}") raise ex from exc def read_table(self, table_name: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, columns=None, schema: str | None=None, chunksize: int | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: if coerce_float is not True: raise NotImplementedError("'coerce_float' is not implemented for ADBC drivers") if chunksize: raise NotImplementedError("'chunksize' is not implemented for ADBC drivers") if columns: if index_col: index_select = maybe_make_list(index_col) else: index_select = [] to_select = index_select + columns select_list = ', '.join((f'"{x}"' for x in to_select)) else: select_list = '*' if schema: stmt = f'SELECT {select_list} FROM {schema}.{table_name}' else: stmt = f'SELECT {select_list} FROM {table_name}' mapping: type[ArrowDtype] | None | Callable if dtype_backend == 'pyarrow': mapping = ArrowDtype elif dtype_backend == 'numpy_nullable': from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping().get elif using_string_dtype(): from pandas.io._util import arrow_string_types_mapper arrow_string_types_mapper() else: mapping = None with self.con.cursor() as cur: cur.execute(stmt) df = cur.fetch_arrow_table().to_pandas(types_mapper=mapping) return _wrap_result_adbc(df, index_col=index_col, parse_dates=parse_dates) def read_query(self, sql: str, index_col: str | list[str] | None=None, coerce_float: bool=True, parse_dates=None, params=None, chunksize: int | None=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: if coerce_float is not True: raise NotImplementedError("'coerce_float' is not implemented for ADBC drivers") if params: raise NotImplementedError("'params' is not implemented for ADBC drivers") if chunksize: raise NotImplementedError("'chunksize' is not implemented for ADBC drivers") mapping: type[ArrowDtype] | None | Callable if dtype_backend == 'pyarrow': mapping = ArrowDtype elif dtype_backend == 'numpy_nullable': from pandas.io._util import _arrow_dtype_mapping mapping = _arrow_dtype_mapping().get else: mapping = None with self.con.cursor() as cur: cur.execute(sql) df = cur.fetch_arrow_table().to_pandas(types_mapper=mapping) return _wrap_result_adbc(df, index_col=index_col, parse_dates=parse_dates, dtype=dtype) read_sql = read_query def to_sql(self, frame, name: str, if_exists: Literal['fail', 'replace', 'append']='fail', index: bool=True, index_label=None, schema: str | None=None, chunksize: int | None=None, dtype: DtypeArg | None=None, method: Literal['multi'] | Callable | None=None, engine: str='auto', **engine_kwargs) -> int | None: if index_label: raise NotImplementedError("'index_label' is not implemented for ADBC drivers") if chunksize: raise NotImplementedError("'chunksize' is not implemented for ADBC drivers") if dtype: raise NotImplementedError("'dtype' is not implemented for ADBC drivers") if method: raise NotImplementedError("'method' is not implemented for ADBC drivers") if engine != 'auto': raise NotImplementedError("engine != 'auto' not implemented for ADBC drivers") if schema: table_name = f'{schema}.{name}' else: table_name = name mode = 'create' if self.has_table(name, schema): if if_exists == 'fail': raise ValueError(f"Table '{table_name}' already exists.") elif if_exists == 'replace': with self.con.cursor() as cur: cur.execute(f'DROP TABLE {table_name}') elif if_exists == 'append': mode = 'append' import pyarrow as pa try: tbl = pa.Table.from_pandas(frame, preserve_index=index) except pa.ArrowNotImplementedError as exc: raise ValueError('datatypes not supported') from exc with self.con.cursor() as cur: total_inserted = cur.adbc_ingest(table_name=name, data=tbl, mode=mode, db_schema_name=schema) self.con.commit() return total_inserted def has_table(self, name: str, schema: str | None=None) -> bool: meta = self.con.adbc_get_objects(db_schema_filter=schema, table_name_filter=name).read_all() for catalog_schema in meta['catalog_db_schemas'].to_pylist(): if not catalog_schema: continue for schema_record in catalog_schema: if not schema_record: continue for table_record in schema_record['db_schema_tables']: if table_record['table_name'] == name: return True return False def _create_sql_schema(self, frame: DataFrame, table_name: str, keys: list[str] | None=None, dtype: DtypeArg | None=None, schema: str | None=None) -> str: raise NotImplementedError('not implemented for adbc') _SQL_TYPES = {'string': 'TEXT', 'floating': 'REAL', 'integer': 'INTEGER', 'datetime': 'TIMESTAMP', 'date': 'DATE', 'time': 'TIME', 'boolean': 'INTEGER'} def _get_unicode_name(name: object) -> str: try: uname = str(name).encode('utf-8', 'strict').decode('utf-8') except UnicodeError as err: raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err return uname def _get_valid_sqlite_name(name: object) -> str: uname = _get_unicode_name(name) if not len(uname): raise ValueError('Empty table or column name specified') nul_index = uname.find('\x00') if nul_index >= 0: raise ValueError('SQLite identifier cannot contain NULs') return '"' + uname.replace('"', '""') + '"' class SQLiteTable(SQLTable): def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self._register_date_adapters() def _register_date_adapters(self) -> None: import sqlite3 def _adapt_time(t) -> str: return f'{t.hour:02d}:{t.minute:02d}:{t.second:02d}.{t.microsecond:06d}' adapt_date_iso = lambda val: val.isoformat() adapt_datetime_iso = lambda val: val.isoformat(' ') sqlite3.register_adapter(time, _adapt_time) sqlite3.register_adapter(date, adapt_date_iso) sqlite3.register_adapter(datetime, adapt_datetime_iso) convert_date = lambda val: date.fromisoformat(val.decode()) convert_timestamp = lambda val: datetime.fromisoformat(val.decode()) sqlite3.register_converter('date', convert_date) sqlite3.register_converter('timestamp', convert_timestamp) def sql_schema(self) -> str: return str(';\n'.join(self.table)) def _execute_create(self) -> None: with self.pd_sql.run_transaction() as conn: for stmt in self.table: conn.execute(stmt) def insert_statement(self, *, num_rows: int) -> str: names = list(map(str, self.frame.columns)) wld = '?' escape = _get_valid_sqlite_name if self.index is not None: for idx in self.index[::-1]: names.insert(0, idx) bracketed_names = [escape(column) for column in names] col_names = ','.join(bracketed_names) row_wildcards = ','.join([wld] * len(names)) wildcards = ','.join([f'({row_wildcards})' for _ in range(num_rows)]) insert_statement = f'INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}' return insert_statement def _execute_insert(self, conn, keys, data_iter) -> int: data_list = list(data_iter) conn.executemany(self.insert_statement(num_rows=1), data_list) return conn.rowcount def _execute_insert_multi(self, conn, keys, data_iter) -> int: data_list = list(data_iter) flattened_data = [x for row in data_list for x in row] conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data) return conn.rowcount def _create_table_setup(self): column_names_and_types = self._get_column_names_and_types(self._sql_type_name) escape = _get_valid_sqlite_name create_tbl_stmts = [escape(cname) + ' ' + ctype for (cname, ctype, _) in column_names_and_types] if self.keys is not None and len(self.keys): if not is_list_like(self.keys): keys = [self.keys] else: keys = self.keys cnames_br = ', '.join([escape(c) for c in keys]) create_tbl_stmts.append(f'CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})') if self.schema: schema_name = self.schema + '.' else: schema_name = '' create_stmts = ['CREATE TABLE ' + schema_name + escape(self.name) + ' (\n' + ',\n '.join(create_tbl_stmts) + '\n)'] ix_cols = [cname for (cname, _, is_index) in column_names_and_types if is_index] if len(ix_cols): cnames = '_'.join(ix_cols) cnames_br = ','.join([escape(c) for c in ix_cols]) create_stmts.append('CREATE INDEX ' + escape('ix_' + self.name + '_' + cnames) + 'ON ' + escape(self.name) + ' (' + cnames_br + ')') return create_stmts def _sql_type_name(self, col): dtype: DtypeArg = self.dtype or {} if is_dict_like(dtype): dtype = cast(dict, dtype) if col.name in dtype: return dtype[col.name] col_type = lib.infer_dtype(col, skipna=True) if col_type == 'timedelta64': warnings.warn("the 'timedelta' type is not supported, and will be written as integer values (ns frequency) to the database.", UserWarning, stacklevel=find_stack_level()) col_type = 'integer' elif col_type == 'datetime64': col_type = 'datetime' elif col_type == 'empty': col_type = 'string' elif col_type == 'complex': raise ValueError('Complex datatypes not supported') if col_type not in _SQL_TYPES: col_type = 'string' return _SQL_TYPES[col_type] class SQLiteDatabase(PandasSQL): def __init__(self, con) -> None: self.con = con @contextmanager def run_transaction(self): cur = self.con.cursor() try: yield cur self.con.commit() except Exception: self.con.rollback() raise finally: cur.close() def execute(self, sql: str | Select | TextClause, params=None): if not isinstance(sql, str): raise TypeError('Query must be a string unless using sqlalchemy.') args = [] if params is None else [params] cur = self.con.cursor() try: cur.execute(sql, *args) return cur except Exception as exc: try: self.con.rollback() except Exception as inner_exc: ex = DatabaseError(f'Execution failed on sql: {sql}\n{exc}\nunable to rollback') raise ex from inner_exc ex = DatabaseError(f"Execution failed on sql '{sql}': {exc}") raise ex from exc @staticmethod def _query_iterator(cursor, chunksize: int, columns, index_col=None, coerce_float: bool=True, parse_dates=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> Generator[DataFrame, None, None]: has_read_data = False while True: data = cursor.fetchmany(chunksize) if type(data) == tuple: data = list(data) if not data: cursor.close() if not has_read_data: result = DataFrame.from_records([], columns=columns, coerce_float=coerce_float) if dtype: result = result.astype(dtype) yield result break has_read_data = True yield _wrap_result(data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend) def read_query(self, sql, index_col=None, coerce_float: bool=True, parse_dates=None, params=None, chunksize: int | None=None, dtype: DtypeArg | None=None, dtype_backend: DtypeBackend | Literal['numpy']='numpy') -> DataFrame | Iterator[DataFrame]: cursor = self.execute(sql, params) columns = [col_desc[0] for col_desc in cursor.description] if chunksize is not None: return self._query_iterator(cursor, chunksize, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend) else: data = self._fetchall_as_list(cursor) cursor.close() frame = _wrap_result(data, columns, index_col=index_col, coerce_float=coerce_float, parse_dates=parse_dates, dtype=dtype, dtype_backend=dtype_backend) return frame def _fetchall_as_list(self, cur): result = cur.fetchall() if not isinstance(result, list): result = list(result) return result def to_sql(self, frame, name: str, if_exists: str='fail', index: bool=True, index_label=None, schema=None, chunksize: int | None=None, dtype: DtypeArg | None=None, method: Literal['multi'] | Callable | None=None, engine: str='auto', **engine_kwargs) -> int | None: if dtype: if not is_dict_like(dtype): dtype = {col_name: dtype for col_name in frame} else: dtype = cast(dict, dtype) for (col, my_type) in dtype.items(): if not isinstance(my_type, str): raise ValueError(f'{col} ({my_type}) not a string') table = SQLiteTable(name, self, frame=frame, index=index, if_exists=if_exists, index_label=index_label, dtype=dtype) table.create() return table.insert(chunksize, method) def has_table(self, name: str, schema: str | None=None) -> bool: wld = '?' query = f"\n SELECT\n name\n FROM\n sqlite_master\n WHERE\n type IN ('table', 'view')\n AND name={wld};\n " return len(self.execute(query, [name]).fetchall()) > 0 def get_table(self, table_name: str, schema: str | None=None) -> None: return None def drop_table(self, name: str, schema: str | None=None) -> None: drop_sql = f'DROP TABLE {_get_valid_sqlite_name(name)}' self.execute(drop_sql) def _create_sql_schema(self, frame, table_name: str, keys=None, dtype: DtypeArg | None=None, schema: str | None=None) -> str: table = SQLiteTable(table_name, self, frame=frame, index=False, keys=keys, dtype=dtype, schema=schema) return str(table.sql_schema()) def get_schema(frame, name: str, keys=None, con=None, dtype: DtypeArg | None=None, schema: str | None=None) -> str: with pandasSQL_builder(con=con) as pandas_sql: return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype, schema=schema) # File: pandas-main/pandas/io/stata.py """""" from __future__ import annotations from collections import abc from datetime import datetime, timedelta from io import BytesIO import os import struct import sys from typing import IO, TYPE_CHECKING, AnyStr, Final, cast import warnings import numpy as np from pandas._libs import lib from pandas._libs.lib import infer_dtype from pandas._libs.writers import max_len_string_array from pandas.errors import CategoricalConversionWarning, InvalidColumnName, PossiblePrecisionLoss, ValueLabelTypeMismatch from pandas.util._decorators import Appender, doc from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.base import ExtensionDtype from pandas.core.dtypes.common import ensure_object, is_numeric_dtype, is_string_dtype from pandas.core.dtypes.dtypes import CategoricalDtype from pandas import Categorical, DatetimeIndex, NaT, Timestamp, isna, to_datetime from pandas.core.frame import DataFrame from pandas.core.indexes.base import Index from pandas.core.indexes.range import RangeIndex from pandas.core.series import Series from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle if TYPE_CHECKING: from collections.abc import Callable, Hashable, Sequence from types import TracebackType from typing import Literal from pandas._typing import CompressionOptions, FilePath, ReadBuffer, Self, StorageOptions, WriteBuffer _version_error = 'Version of given Stata file is {version}. pandas supports importing versions 102, 103, 104, 105, 108, 110 (Stata 7), 111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), 115 (Stata 12), 117 (Stata 13), 118 (Stata 14/15/16), and 119 (Stata 15/16, over 32,767 variables).' _statafile_processing_params1 = 'convert_dates : bool, default True\n Convert date variables to DataFrame time values.\nconvert_categoricals : bool, default True\n Read value labels and convert columns to Categorical/Factor variables.' _statafile_processing_params2 = 'index_col : str, optional\n Column to set as index.\nconvert_missing : bool, default False\n Flag indicating whether to convert missing values to their Stata\n representations. If False, missing values are replaced with nan.\n If True, columns containing missing values are returned with\n object data types and missing values are represented by\n StataMissingValue objects.\npreserve_dtypes : bool, default True\n Preserve Stata datatypes. If False, numeric data are upcast to pandas\n default types for foreign data (float64 or int64).\ncolumns : list or None\n Columns to retain. Columns will be returned in the given order. None\n returns all columns.\norder_categoricals : bool, default True\n Flag indicating whether converted categorical data are ordered.' _chunksize_params = 'chunksize : int, default None\n Return StataReader object for iterations, returns chunks with\n given number of lines.' _iterator_params = 'iterator : bool, default False\n Return StataReader object.' _reader_notes = 'Notes\n-----\nCategorical variables read through an iterator may not have the same\ncategories and dtype. This occurs when a variable stored in a DTA\nfile is associated to an incomplete set of value labels that only\nlabel a strict subset of the values.' _read_stata_doc = f"""\nRead Stata file into DataFrame.\n\nParameters\n----------\nfilepath_or_buffer : str, path object or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: ``file://localhost/path/to/table.dta``.\n\n If you want to pass in a path object, pandas accepts any ``os.PathLike``.\n\n By file-like object, we refer to objects with a ``read()`` method,\n such as a file handle (e.g. via builtin ``open`` function)\n or ``StringIO``.\n{_statafile_processing_params1}\n{_statafile_processing_params2}\n{_chunksize_params}\n{_iterator_params}\n{_shared_docs['decompression_options'] % 'filepath_or_buffer'}\n{_shared_docs['storage_options']}\n\nReturns\n-------\nDataFrame, pandas.api.typing.StataReader\n If iterator or chunksize, returns StataReader, else DataFrame.\n\nSee Also\n--------\nio.stata.StataReader : Low-level reader for Stata data files.\nDataFrame.to_stata: Export Stata data files.\n\n{_reader_notes}\n\nExamples\n--------\n\nCreating a dummy stata for this example\n\n>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon', 'parrot'],\n... 'speed': [350, 18, 361, 15]}}) # doctest: +SKIP\n>>> df.to_stata('animals.dta') # doctest: +SKIP\n\nRead a Stata dta file:\n\n>>> df = pd.read_stata('animals.dta') # doctest: +SKIP\n\nRead a Stata dta file in 10,000 line chunks:\n\n>>> values = np.random.randint(0, 10, size=(20_000, 1), dtype="uint8") # doctest: +SKIP\n>>> df = pd.DataFrame(values, columns=["i"]) # doctest: +SKIP\n>>> df.to_stata('filename.dta') # doctest: +SKIP\n\n>>> with pd.read_stata('filename.dta', chunksize=10000) as itr: # doctest: +SKIP\n>>> for chunk in itr:\n... # Operate on a single chunk, e.g., chunk.mean()\n... pass # doctest: +SKIP\n""" _read_method_doc = f'Reads observations from Stata file, converting them into a dataframe\n\nParameters\n----------\nnrows : int\n Number of lines to read from data file, if None read whole file.\n{_statafile_processing_params1}\n{_statafile_processing_params2}\n\nReturns\n-------\nDataFrame\n' _stata_reader_doc = f"Class for reading Stata dta files.\n\nParameters\n----------\npath_or_buf : path (string), buffer or path object\n string, pathlib.Path or object\n implementing a binary read() functions.\n{_statafile_processing_params1}\n{_statafile_processing_params2}\n{_chunksize_params}\n{_shared_docs['decompression_options']}\n{_shared_docs['storage_options']}\n\n{_reader_notes}\n" _date_formats = ['%tc', '%tC', '%td', '%d', '%tw', '%tm', '%tq', '%th', '%ty'] stata_epoch: Final = datetime(1960, 1, 1) unix_epoch: Final = datetime(1970, 1, 1) def _stata_elapsed_date_to_datetime_vec(dates: Series, fmt: str) -> Series: if fmt.startswith(('%tc', 'tc')): td = np.timedelta64(stata_epoch - unix_epoch, 'ms') res = np.array(dates._values, dtype='M8[ms]') + td return Series(res, index=dates.index) elif fmt.startswith(('%td', 'td', '%d', 'd')): td = np.timedelta64(stata_epoch - unix_epoch, 'D') res = np.array(dates._values, dtype='M8[D]') + td return Series(res, index=dates.index) elif fmt.startswith(('%tm', 'tm')): ordinals = dates + (stata_epoch.year - unix_epoch.year) * 12 res = np.array(ordinals, dtype='M8[M]').astype('M8[s]') return Series(res, index=dates.index) elif fmt.startswith(('%tq', 'tq')): ordinals = dates + (stata_epoch.year - unix_epoch.year) * 4 res = np.array(ordinals, dtype='M8[3M]').astype('M8[s]') return Series(res, index=dates.index) elif fmt.startswith(('%th', 'th')): ordinals = dates + (stata_epoch.year - unix_epoch.year) * 2 res = np.array(ordinals, dtype='M8[6M]').astype('M8[s]') return Series(res, index=dates.index) elif fmt.startswith(('%ty', 'ty')): ordinals = dates - 1970 res = np.array(ordinals, dtype='M8[Y]').astype('M8[s]') return Series(res, index=dates.index) bad_locs = np.isnan(dates) has_bad_values = False if bad_locs.any(): has_bad_values = True dates._values[bad_locs] = 1.0 dates = dates.astype(np.int64) if fmt.startswith(('%tC', 'tC')): warnings.warn('Encountered %tC format. Leaving in Stata Internal Format.', stacklevel=find_stack_level()) conv_dates = Series(dates, dtype=object) if has_bad_values: conv_dates[bad_locs] = NaT return conv_dates elif fmt.startswith(('%tw', 'tw')): year = stata_epoch.year + dates // 52 days = dates % 52 * 7 per_y = (year - 1970).array.view('Period[Y]') per_d = per_y.asfreq('D', how='S') per_d_shifted = per_d + days._values per_s = per_d_shifted.asfreq('s', how='S') conv_dates_arr = per_s.view('M8[s]') conv_dates = Series(conv_dates_arr, index=dates.index) else: raise ValueError(f'Date fmt {fmt} not understood') if has_bad_values: conv_dates[bad_locs] = NaT return conv_dates def _datetime_to_stata_elapsed_vec(dates: Series, fmt: str) -> Series: index = dates.index NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000 US_PER_DAY = NS_PER_DAY / 1000 MS_PER_DAY = NS_PER_DAY / 1000000 def parse_dates_safe(dates: Series, delta: bool=False, year: bool=False, days: bool=False) -> DataFrame: d = {} if lib.is_np_dtype(dates.dtype, 'M'): if delta: time_delta = dates.dt.as_unit('ms') - Timestamp(stata_epoch).as_unit('ms') d['delta'] = time_delta._values.view(np.int64) if days or year: date_index = DatetimeIndex(dates) d['year'] = date_index._data.year d['month'] = date_index._data.month if days: year_start = np.asarray(dates).astype('M8[Y]').astype(dates.dtype) diff = dates - year_start d['days'] = np.asarray(diff).astype('m8[D]').view('int64') elif infer_dtype(dates, skipna=False) == 'datetime': if delta: delta = dates._values - stata_epoch def f(x: timedelta) -> float: return US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds v = np.vectorize(f) d['delta'] = v(delta) if year: year_month = dates.apply(lambda x: 100 * x.year + x.month) d['year'] = year_month._values // 100 d['month'] = year_month._values - d['year'] * 100 if days: def g(x: datetime) -> int: return (x - datetime(x.year, 1, 1)).days v = np.vectorize(g) d['days'] = v(dates) else: raise ValueError('Columns containing dates must contain either datetime64, datetime or null values.') return DataFrame(d, index=index) bad_loc = isna(dates) index = dates.index if bad_loc.any(): if lib.is_np_dtype(dates.dtype, 'M'): dates._values[bad_loc] = to_datetime(stata_epoch) else: dates._values[bad_loc] = stata_epoch if fmt in ['%tc', 'tc']: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta elif fmt in ['%tC', 'tC']: warnings.warn('Stata Internal Format tC not supported.', stacklevel=find_stack_level()) conv_dates = dates elif fmt in ['%td', 'td']: d = parse_dates_safe(dates, delta=True) conv_dates = d.delta // MS_PER_DAY elif fmt in ['%tw', 'tw']: d = parse_dates_safe(dates, year=True, days=True) conv_dates = 52 * (d.year - stata_epoch.year) + d.days // 7 elif fmt in ['%tm', 'tm']: d = parse_dates_safe(dates, year=True) conv_dates = 12 * (d.year - stata_epoch.year) + d.month - 1 elif fmt in ['%tq', 'tq']: d = parse_dates_safe(dates, year=True) conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3 elif fmt in ['%th', 'th']: d = parse_dates_safe(dates, year=True) conv_dates = 2 * (d.year - stata_epoch.year) + (d.month > 6).astype(int) elif fmt in ['%ty', 'ty']: d = parse_dates_safe(dates, year=True) conv_dates = d.year else: raise ValueError(f'Format {fmt} is not a known Stata date format') conv_dates = Series(conv_dates, dtype=np.float64, copy=False) missing_value = struct.unpack(' DataFrame: ws = '' conversion_data: tuple[tuple[type, type, type], tuple[type, type, type], tuple[type, type, type], tuple[type, type, type], tuple[type, type, type]] = ((np.bool_, np.int8, np.int8), (np.uint8, np.int8, np.int16), (np.uint16, np.int16, np.int32), (np.uint32, np.int32, np.int64), (np.uint64, np.int64, np.float64)) float32_max = struct.unpack('= 2 ** 53: ws = precision_loss_doc.format('uint64', 'float64') data[col] = data[col].astype(dtype) if dtype == np.int8 and (not empty_df): if data[col].max() > 100 or data[col].min() < -127: data[col] = data[col].astype(np.int16) elif dtype == np.int16 and (not empty_df): if data[col].max() > 32740 or data[col].min() < -32767: data[col] = data[col].astype(np.int32) elif dtype == np.int64: if empty_df or (data[col].max() <= 2147483620 and data[col].min() >= -2147483647): data[col] = data[col].astype(np.int32) else: data[col] = data[col].astype(np.float64) if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53: ws = precision_loss_doc.format('int64', 'float64') elif dtype in (np.float32, np.float64): if np.isinf(data[col]).any(): raise ValueError(f'Column {col} contains infinity or -infinitywhich is outside the range supported by Stata.') value = data[col].max() if dtype == np.float32 and value > float32_max: data[col] = data[col].astype(np.float64) elif dtype == np.float64: if value > float64_max: raise ValueError(f'Column {col} has a maximum value ({value}) outside the range supported by Stata ({float64_max})') if is_nullable_int: if orig_missing.any(): sentinel = StataMissingValue.BASE_MISSING_VALUES[data[col].dtype.name] data.loc[orig_missing, col] = sentinel if ws: warnings.warn(ws, PossiblePrecisionLoss, stacklevel=find_stack_level()) return data class StataValueLabel: def __init__(self, catarray: Series, encoding: Literal['latin-1', 'utf-8']='latin-1') -> None: if encoding not in ('latin-1', 'utf-8'): raise ValueError('Only latin-1 and utf-8 are supported.') self.labname = catarray.name self._encoding = encoding categories = catarray.cat.categories self.value_labels = enumerate(categories) self._prepare_value_labels() def _prepare_value_labels(self) -> None: self.text_len = 0 self.txt: list[bytes] = [] self.n = 0 self.off = np.array([], dtype=np.int32) self.val = np.array([], dtype=np.int32) self.len = 0 offsets: list[int] = [] values: list[float] = [] for vl in self.value_labels: category: str | bytes = vl[1] if not isinstance(category, str): category = str(category) warnings.warn(value_label_mismatch_doc.format(self.labname), ValueLabelTypeMismatch, stacklevel=find_stack_level()) category = category.encode(self._encoding) offsets.append(self.text_len) self.text_len += len(category) + 1 values.append(vl[0]) self.txt.append(category) self.n += 1 if self.text_len > 32000: raise ValueError('Stata value labels for a single variable must have a combined length less than 32,000 characters.') self.off = np.array(offsets, dtype=np.int32) self.val = np.array(values, dtype=np.int32) self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len def generate_value_label(self, byteorder: str) -> bytes: encoding = self._encoding bio = BytesIO() null_byte = b'\x00' bio.write(struct.pack(byteorder + 'i', self.len)) labname = str(self.labname)[:32].encode(encoding) lab_len = 32 if encoding not in ('utf-8', 'utf8') else 128 labname = _pad_bytes(labname, lab_len + 1) bio.write(labname) for i in range(3): bio.write(struct.pack('c', null_byte)) bio.write(struct.pack(byteorder + 'i', self.n)) bio.write(struct.pack(byteorder + 'i', self.text_len)) for offset in self.off: bio.write(struct.pack(byteorder + 'i', offset)) for value in self.val: bio.write(struct.pack(byteorder + 'i', value)) for text in self.txt: bio.write(text + null_byte) return bio.getvalue() class StataNonCatValueLabel(StataValueLabel): def __init__(self, labname: str, value_labels: dict[float, str], encoding: Literal['latin-1', 'utf-8']='latin-1') -> None: if encoding not in ('latin-1', 'utf-8'): raise ValueError('Only latin-1 and utf-8 are supported.') self.labname = labname self._encoding = encoding self.value_labels = sorted(value_labels.items(), key=lambda x: x[0]) self._prepare_value_labels() class StataMissingValue: MISSING_VALUES: dict[float, str] = {} bases: Final = (101, 32741, 2147483621) for b in bases: MISSING_VALUES[b] = '.' for i in range(1, 27): MISSING_VALUES[i + b] = '.' + chr(96 + i) float32_base: bytes = b'\x00\x00\x00\x7f' increment_32: int = struct.unpack(' 0: MISSING_VALUES[key] += chr(96 + i) int_value = struct.unpack(' 0: MISSING_VALUES[key] += chr(96 + i) int_value = struct.unpack('q', struct.pack(' None: self._value = value value = int(value) if value < 2147483648 else float(value) self._str = self.MISSING_VALUES[value] @property def string(self) -> str: return self._str @property def value(self) -> float: return self._value def __str__(self) -> str: return self.string def __repr__(self) -> str: return f'{type(self)}({self})' def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) and self.string == other.string and (self.value == other.value) @classmethod def get_base_missing_value(cls, dtype: np.dtype) -> float: if dtype.type is np.int8: value = cls.BASE_MISSING_VALUES['int8'] elif dtype.type is np.int16: value = cls.BASE_MISSING_VALUES['int16'] elif dtype.type is np.int32: value = cls.BASE_MISSING_VALUES['int32'] elif dtype.type is np.float32: value = cls.BASE_MISSING_VALUES['float32'] elif dtype.type is np.float64: value = cls.BASE_MISSING_VALUES['float64'] else: raise ValueError('Unsupported dtype') return value class StataParser: def __init__(self) -> None: self.DTYPE_MAP = dict([(i, np.dtype(f'S{i}')) for i in range(1, 245)] + [(251, np.dtype(np.int8)), (252, np.dtype(np.int16)), (253, np.dtype(np.int32)), (254, np.dtype(np.float32)), (255, np.dtype(np.float64))]) self.DTYPE_MAP_XML: dict[int, np.dtype] = {32768: np.dtype(np.uint8), 65526: np.dtype(np.float64), 65527: np.dtype(np.float32), 65528: np.dtype(np.int32), 65529: np.dtype(np.int16), 65530: np.dtype(np.int8)} self.TYPE_MAP = list(tuple(range(251)) + tuple('bhlfd')) self.TYPE_MAP_XML = {32768: 'Q', 65526: 'd', 65527: 'f', 65528: 'l', 65529: 'h', 65530: 'b'} float32_min = b'\xff\xff\xff\xfe' float32_max = b'\xff\xff\xff~' float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff' float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f' self.VALID_RANGE = {'b': (-127, 100), 'h': (-32767, 32740), 'l': (-2147483647, 2147483620), 'f': (np.float32(struct.unpack(' None: super().__init__() self._convert_dates = convert_dates self._convert_categoricals = convert_categoricals self._index_col = index_col self._convert_missing = convert_missing self._preserve_dtypes = preserve_dtypes self._columns = columns self._order_categoricals = order_categoricals self._original_path_or_buf = path_or_buf self._compression = compression self._storage_options = storage_options self._encoding = '' self._chunksize = chunksize self._using_iterator = False self._entered = False if self._chunksize is None: self._chunksize = 1 elif not isinstance(chunksize, int) or chunksize <= 0: raise ValueError('chunksize must be a positive integer when set.') self._close_file: Callable[[], None] | None = None self._column_selector_set = False self._value_label_dict: dict[str, dict[int, str]] = {} self._value_labels_read = False self._dtype: np.dtype | None = None self._lines_read = 0 self._native_byteorder = _set_endianness(sys.byteorder) def _ensure_open(self) -> None: if not hasattr(self, '_path_or_buf'): self._open_file() def _open_file(self) -> None: if not self._entered: warnings.warn('StataReader is being used without using a context manager. Using StataReader as a context manager is the only supported method.', ResourceWarning, stacklevel=find_stack_level()) handles = get_handle(self._original_path_or_buf, 'rb', storage_options=self._storage_options, is_text=False, compression=self._compression) if hasattr(handles.handle, 'seekable') and handles.handle.seekable(): self._path_or_buf = handles.handle self._close_file = handles.close else: with handles: self._path_or_buf = BytesIO(handles.handle.read()) self._close_file = self._path_or_buf.close self._read_header() self._setup_dtype() def __enter__(self) -> Self: self._entered = True return self def __exit__(self, exc_type: type[BaseException] | None, exc_value: BaseException | None, traceback: TracebackType | None) -> None: if self._close_file: self._close_file() def _set_encoding(self) -> None: if self._format_version < 118: self._encoding = 'latin-1' else: self._encoding = 'utf-8' def _read_int8(self) -> int: return struct.unpack('b', self._path_or_buf.read(1))[0] def _read_uint8(self) -> int: return struct.unpack('B', self._path_or_buf.read(1))[0] def _read_uint16(self) -> int: return struct.unpack(f'{self._byteorder}H', self._path_or_buf.read(2))[0] def _read_uint32(self) -> int: return struct.unpack(f'{self._byteorder}I', self._path_or_buf.read(4))[0] def _read_uint64(self) -> int: return struct.unpack(f'{self._byteorder}Q', self._path_or_buf.read(8))[0] def _read_int16(self) -> int: return struct.unpack(f'{self._byteorder}h', self._path_or_buf.read(2))[0] def _read_int32(self) -> int: return struct.unpack(f'{self._byteorder}i', self._path_or_buf.read(4))[0] def _read_int64(self) -> int: return struct.unpack(f'{self._byteorder}q', self._path_or_buf.read(8))[0] def _read_char8(self) -> bytes: return struct.unpack('c', self._path_or_buf.read(1))[0] def _read_int16_count(self, count: int) -> tuple[int, ...]: return struct.unpack(f"{self._byteorder}{'h' * count}", self._path_or_buf.read(2 * count)) def _read_header(self) -> None: first_char = self._read_char8() if first_char == b'<': self._read_new_header() else: self._read_old_header(first_char) def _read_new_header(self) -> None: self._path_or_buf.read(27) self._format_version = int(self._path_or_buf.read(3)) if self._format_version not in [117, 118, 119]: raise ValueError(_version_error.format(version=self._format_version)) self._set_encoding() self._path_or_buf.read(21) self._byteorder = '>' if self._path_or_buf.read(3) == b'MSF' else '<' self._path_or_buf.read(15) self._nvar = self._read_uint16() if self._format_version <= 118 else self._read_uint32() self._path_or_buf.read(7) self._nobs = self._get_nobs() self._path_or_buf.read(11) self._data_label = self._get_data_label() self._path_or_buf.read(19) self._time_stamp = self._get_time_stamp() self._path_or_buf.read(26) self._path_or_buf.read(8) self._path_or_buf.read(8) self._seek_vartypes = self._read_int64() + 16 self._seek_varnames = self._read_int64() + 10 self._seek_sortlist = self._read_int64() + 10 self._seek_formats = self._read_int64() + 9 self._seek_value_label_names = self._read_int64() + 19 self._seek_variable_labels = self._get_seek_variable_labels() self._path_or_buf.read(8) self._data_location = self._read_int64() + 6 self._seek_strls = self._read_int64() + 7 self._seek_value_labels = self._read_int64() + 14 (self._typlist, self._dtyplist) = self._get_dtypes(self._seek_vartypes) self._path_or_buf.seek(self._seek_varnames) self._varlist = self._get_varlist() self._path_or_buf.seek(self._seek_sortlist) self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] self._path_or_buf.seek(self._seek_formats) self._fmtlist = self._get_fmtlist() self._path_or_buf.seek(self._seek_value_label_names) self._lbllist = self._get_lbllist() self._path_or_buf.seek(self._seek_variable_labels) self._variable_labels = self._get_variable_labels() def _get_dtypes(self, seek_vartypes: int) -> tuple[list[int | str], list[str | np.dtype]]: self._path_or_buf.seek(seek_vartypes) typlist = [] dtyplist = [] for _ in range(self._nvar): typ = self._read_uint16() if typ <= 2045: typlist.append(typ) dtyplist.append(str(typ)) else: try: typlist.append(self.TYPE_MAP_XML[typ]) dtyplist.append(self.DTYPE_MAP_XML[typ]) except KeyError as err: raise ValueError(f'cannot convert stata types [{typ}]') from err return (typlist, dtyplist) def _get_varlist(self) -> list[str]: b = 33 if self._format_version < 118 else 129 return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] def _get_fmtlist(self) -> list[str]: if self._format_version >= 118: b = 57 elif self._format_version > 113: b = 49 elif self._format_version > 104: b = 12 else: b = 7 return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] def _get_lbllist(self) -> list[str]: if self._format_version >= 118: b = 129 elif self._format_version > 108: b = 33 else: b = 9 return [self._decode(self._path_or_buf.read(b)) for _ in range(self._nvar)] def _get_variable_labels(self) -> list[str]: if self._format_version >= 118: vlblist = [self._decode(self._path_or_buf.read(321)) for _ in range(self._nvar)] elif self._format_version > 105: vlblist = [self._decode(self._path_or_buf.read(81)) for _ in range(self._nvar)] else: vlblist = [self._decode(self._path_or_buf.read(32)) for _ in range(self._nvar)] return vlblist def _get_nobs(self) -> int: if self._format_version >= 118: return self._read_uint64() elif self._format_version >= 103: return self._read_uint32() else: return self._read_uint16() def _get_data_label(self) -> str: if self._format_version >= 118: strlen = self._read_uint16() return self._decode(self._path_or_buf.read(strlen)) elif self._format_version == 117: strlen = self._read_int8() return self._decode(self._path_or_buf.read(strlen)) elif self._format_version > 105: return self._decode(self._path_or_buf.read(81)) else: return self._decode(self._path_or_buf.read(32)) def _get_time_stamp(self) -> str: if self._format_version >= 118: strlen = self._read_int8() return self._path_or_buf.read(strlen).decode('utf-8') elif self._format_version == 117: strlen = self._read_int8() return self._decode(self._path_or_buf.read(strlen)) elif self._format_version > 104: return self._decode(self._path_or_buf.read(18)) else: raise ValueError def _get_seek_variable_labels(self) -> int: if self._format_version == 117: self._path_or_buf.read(8) return self._seek_value_label_names + 33 * self._nvar + 20 + 17 elif self._format_version >= 118: return self._read_int64() + 17 else: raise ValueError def _read_old_header(self, first_char: bytes) -> None: self._format_version = int(first_char[0]) if self._format_version not in [102, 103, 104, 105, 108, 110, 111, 113, 114, 115]: raise ValueError(_version_error.format(version=self._format_version)) self._set_encoding() self._byteorder = '>' if self._read_int8() == 1 else '<' self._filetype = self._read_int8() self._path_or_buf.read(1) self._nvar = self._read_uint16() self._nobs = self._get_nobs() self._data_label = self._get_data_label() if self._format_version >= 105: self._time_stamp = self._get_time_stamp() if self._format_version >= 111: typlist = [int(c) for c in self._path_or_buf.read(self._nvar)] else: buf = self._path_or_buf.read(self._nvar) typlistb = np.frombuffer(buf, dtype=np.uint8) typlist = [] for tp in typlistb: if tp in self.OLD_TYPE_MAPPING: typlist.append(self.OLD_TYPE_MAPPING[tp]) else: typlist.append(tp - 127) try: self._typlist = [self.TYPE_MAP[typ] for typ in typlist] except ValueError as err: invalid_types = ','.join([str(x) for x in typlist]) raise ValueError(f'cannot convert stata types [{invalid_types}]') from err try: self._dtyplist = [self.DTYPE_MAP[typ] for typ in typlist] except ValueError as err: invalid_dtypes = ','.join([str(x) for x in typlist]) raise ValueError(f'cannot convert stata dtypes [{invalid_dtypes}]') from err if self._format_version > 108: self._varlist = [self._decode(self._path_or_buf.read(33)) for _ in range(self._nvar)] else: self._varlist = [self._decode(self._path_or_buf.read(9)) for _ in range(self._nvar)] self._srtlist = self._read_int16_count(self._nvar + 1)[:-1] self._fmtlist = self._get_fmtlist() self._lbllist = self._get_lbllist() self._variable_labels = self._get_variable_labels() if self._format_version > 104: while True: data_type = self._read_int8() if self._format_version > 108: data_len = self._read_int32() else: data_len = self._read_int16() if data_type == 0: break self._path_or_buf.read(data_len) self._data_location = self._path_or_buf.tell() def _setup_dtype(self) -> np.dtype: if self._dtype is not None: return self._dtype dtypes = [] for (i, typ) in enumerate(self._typlist): if typ in self.NUMPY_TYPE_MAP: typ = cast(str, typ) dtypes.append((f's{i}', f'{self._byteorder}{self.NUMPY_TYPE_MAP[typ]}')) else: dtypes.append((f's{i}', f'S{typ}')) self._dtype = np.dtype(dtypes) return self._dtype def _decode(self, s: bytes) -> str: s = s.partition(b'\x00')[0] try: return s.decode(self._encoding) except UnicodeDecodeError: encoding = self._encoding msg = f'\nOne or more strings in the dta file could not be decoded using {encoding}, and\nso the fallback encoding of latin-1 is being used. This can happen when a file\nhas been incorrectly encoded by Stata or some other software. You should verify\nthe string values returned are correct.' warnings.warn(msg, UnicodeWarning, stacklevel=find_stack_level()) return s.decode('latin-1') def _read_new_value_labels(self) -> None: if self._format_version >= 117: self._path_or_buf.seek(self._seek_value_labels) else: assert self._dtype is not None offset = self._nobs * self._dtype.itemsize self._path_or_buf.seek(self._data_location + offset) while True: if self._format_version >= 117: if self._path_or_buf.read(5) == b'= 117: self._path_or_buf.read(6) def _read_old_value_labels(self) -> None: assert self._dtype is not None offset = self._nobs * self._dtype.itemsize self._path_or_buf.seek(self._data_location + offset) while True: if not self._path_or_buf.read(2): break self._path_or_buf.seek(-2, os.SEEK_CUR) n = self._read_uint16() labname = self._decode(self._path_or_buf.read(9)) self._path_or_buf.read(1) codes = np.frombuffer(self._path_or_buf.read(2 * n), dtype=f'{self._byteorder}i2', count=n) self._value_label_dict[labname] = {} for i in range(n): self._value_label_dict[labname][codes[i]] = self._decode(self._path_or_buf.read(8)) def _read_value_labels(self) -> None: self._ensure_open() if self._value_labels_read: return if self._format_version >= 108: self._read_new_value_labels() else: self._read_old_value_labels() self._value_labels_read = True def _read_strls(self) -> None: self._path_or_buf.seek(self._seek_strls) self.GSO = {'0': ''} while True: if self._path_or_buf.read(3) != b'GSO': break if self._format_version == 117: v_o = self._read_uint64() else: buf = self._path_or_buf.read(12) v_size = 2 if self._format_version == 118 else 3 if self._byteorder == '<': buf = buf[0:v_size] + buf[4:12 - v_size] else: buf = buf[4 - v_size:4] + buf[4 + v_size:] v_o = struct.unpack(f'{self._byteorder}Q', buf)[0] typ = self._read_uint8() length = self._read_uint32() va = self._path_or_buf.read(length) if typ == 130: decoded_va = va[0:-1].decode(self._encoding) else: decoded_va = str(va) self.GSO[str(v_o)] = decoded_va def __next__(self) -> DataFrame: self._using_iterator = True return self.read(nrows=self._chunksize) def get_chunk(self, size: int | None=None) -> DataFrame: if size is None: size = self._chunksize return self.read(nrows=size) @Appender(_read_method_doc) def read(self, nrows: int | None=None, convert_dates: bool | None=None, convert_categoricals: bool | None=None, index_col: str | None=None, convert_missing: bool | None=None, preserve_dtypes: bool | None=None, columns: Sequence[str] | None=None, order_categoricals: bool | None=None) -> DataFrame: self._ensure_open() if convert_dates is None: convert_dates = self._convert_dates if convert_categoricals is None: convert_categoricals = self._convert_categoricals if convert_missing is None: convert_missing = self._convert_missing if preserve_dtypes is None: preserve_dtypes = self._preserve_dtypes if columns is None: columns = self._columns if order_categoricals is None: order_categoricals = self._order_categoricals if index_col is None: index_col = self._index_col if nrows is None: nrows = self._nobs if self._nobs == 0 and nrows == 0: data = DataFrame(columns=self._varlist) for (i, col) in enumerate(data.columns): dt = self._dtyplist[i] if isinstance(dt, np.dtype): if dt.char != 'S': data[col] = data[col].astype(dt) if columns is not None: data = self._do_select_columns(data, columns) return data if self._format_version >= 117 and (not self._value_labels_read): self._read_strls() assert self._dtype is not None dtype = self._dtype max_read_len = (self._nobs - self._lines_read) * dtype.itemsize read_len = nrows * dtype.itemsize read_len = min(read_len, max_read_len) if read_len <= 0: if convert_categoricals: self._read_value_labels() raise StopIteration offset = self._lines_read * dtype.itemsize self._path_or_buf.seek(self._data_location + offset) read_lines = min(nrows, self._nobs - self._lines_read) raw_data = np.frombuffer(self._path_or_buf.read(read_len), dtype=dtype, count=read_lines) self._lines_read += read_lines if self._byteorder != self._native_byteorder: raw_data = raw_data.byteswap().view(raw_data.dtype.newbyteorder()) if convert_categoricals: self._read_value_labels() if len(raw_data) == 0: data = DataFrame(columns=self._varlist) else: data = DataFrame.from_records(raw_data) data.columns = Index(self._varlist) if index_col is None: data.index = RangeIndex(self._lines_read - read_lines, self._lines_read) if columns is not None: data = self._do_select_columns(data, columns) for (col, typ) in zip(data, self._typlist): if isinstance(typ, int): data[col] = data[col].apply(self._decode) data = self._insert_strls(data) valid_dtypes = [i for (i, dtyp) in enumerate(self._dtyplist) if dtyp is not None] object_type = np.dtype(object) for idx in valid_dtypes: dtype = data.iloc[:, idx].dtype if dtype not in (object_type, self._dtyplist[idx]): data.isetitem(idx, data.iloc[:, idx].astype(dtype)) data = self._do_convert_missing(data, convert_missing) if convert_dates: for (i, fmt) in enumerate(self._fmtlist): if any((fmt.startswith(date_fmt) for date_fmt in _date_formats)): data.isetitem(i, _stata_elapsed_date_to_datetime_vec(data.iloc[:, i], fmt)) if convert_categoricals: data = self._do_convert_categoricals(data, self._value_label_dict, self._lbllist, order_categoricals) if not preserve_dtypes: retyped_data = [] convert = False for col in data: dtype = data[col].dtype if dtype in (np.dtype(np.float16), np.dtype(np.float32)): dtype = np.dtype(np.float64) convert = True elif dtype in (np.dtype(np.int8), np.dtype(np.int16), np.dtype(np.int32)): dtype = np.dtype(np.int64) convert = True retyped_data.append((col, data[col].astype(dtype))) if convert: data = DataFrame.from_dict(dict(retyped_data)) if index_col is not None: data = data.set_index(data.pop(index_col)) return data def _do_convert_missing(self, data: DataFrame, convert_missing: bool) -> DataFrame: old_missingdouble = float.fromhex('0x1.0p333') replacements = {} for i in range(len(data.columns)): fmt = self._typlist[i] if self._format_version <= 105 and fmt == 'd': data.iloc[:, i] = data.iloc[:, i].replace(old_missingdouble, self.MISSING_VALUES['d']) if self._format_version <= 111: if fmt not in self.OLD_VALID_RANGE: continue fmt = cast(str, fmt) (nmin, nmax) = self.OLD_VALID_RANGE[fmt] else: if fmt not in self.VALID_RANGE: continue fmt = cast(str, fmt) (nmin, nmax) = self.VALID_RANGE[fmt] series = data.iloc[:, i] svals = series._values missing = (svals < nmin) | (svals > nmax) if not missing.any(): continue if convert_missing: missing_loc = np.nonzero(np.asarray(missing))[0] (umissing, umissing_loc) = np.unique(series[missing], return_inverse=True) replacement = Series(series, dtype=object) for (j, um) in enumerate(umissing): if self._format_version <= 111: missing_value = StataMissingValue(float(self.MISSING_VALUES[fmt])) else: missing_value = StataMissingValue(um) loc = missing_loc[umissing_loc == j] replacement.iloc[loc] = missing_value else: dtype = series.dtype if dtype not in (np.float32, np.float64): dtype = np.float64 replacement = Series(series, dtype=dtype) replacement._values[missing] = np.nan replacements[i] = replacement if replacements: for (idx, value) in replacements.items(): data.isetitem(idx, value) return data def _insert_strls(self, data: DataFrame) -> DataFrame: if not hasattr(self, 'GSO') or len(self.GSO) == 0: return data for (i, typ) in enumerate(self._typlist): if typ != 'Q': continue data.isetitem(i, [self.GSO[str(k)] for k in data.iloc[:, i]]) return data def _do_select_columns(self, data: DataFrame, columns: Sequence[str]) -> DataFrame: if not self._column_selector_set: column_set = set(columns) if len(column_set) != len(columns): raise ValueError('columns contains duplicate entries') unmatched = column_set.difference(data.columns) if unmatched: joined = ', '.join(list(unmatched)) raise ValueError(f'The following columns were not found in the Stata data set: {joined}') dtyplist = [] typlist = [] fmtlist = [] lbllist = [] for col in columns: i = data.columns.get_loc(col) dtyplist.append(self._dtyplist[i]) typlist.append(self._typlist[i]) fmtlist.append(self._fmtlist[i]) lbllist.append(self._lbllist[i]) self._dtyplist = dtyplist self._typlist = typlist self._fmtlist = fmtlist self._lbllist = lbllist self._column_selector_set = True return data[columns] def _do_convert_categoricals(self, data: DataFrame, value_label_dict: dict[str, dict[int, str]], lbllist: Sequence[str], order_categoricals: bool) -> DataFrame: if not value_label_dict: return data cat_converted_data = [] for (col, label) in zip(data, lbllist): if label in value_label_dict: vl = value_label_dict[label] keys = np.array(list(vl.keys())) column = data[col] key_matches = column.isin(keys) if self._using_iterator and key_matches.all(): initial_categories: np.ndarray | None = keys else: if self._using_iterator: warnings.warn(categorical_conversion_warning, CategoricalConversionWarning, stacklevel=find_stack_level()) initial_categories = None cat_data = Categorical(column, categories=initial_categories, ordered=order_categoricals) if initial_categories is None: categories = [] for category in cat_data.categories: if category in vl: categories.append(vl[category]) else: categories.append(category) else: categories = list(vl.values()) try: cat_data = cat_data.rename_categories(categories) except ValueError as err: vc = Series(categories, copy=False).value_counts() repeated_cats = list(vc.index[vc > 1]) repeats = '-' * 80 + '\n' + '\n'.join(repeated_cats) msg = f'\nValue labels for column {col} are not unique. These cannot be converted to\npandas categoricals.\n\nEither read the file with `convert_categoricals` set to False or use the\nlow level interface in `StataReader` to separately read the values and the\nvalue_labels.\n\nThe repeated labels are:\n{repeats}\n' raise ValueError(msg) from err cat_series = Series(cat_data, index=data.index, copy=False) cat_converted_data.append((col, cat_series)) else: cat_converted_data.append((col, data[col])) data = DataFrame(dict(cat_converted_data), copy=False) return data @property def data_label(self) -> str: self._ensure_open() return self._data_label @property def time_stamp(self) -> str: self._ensure_open() return self._time_stamp def variable_labels(self) -> dict[str, str]: self._ensure_open() return dict(zip(self._varlist, self._variable_labels)) def value_labels(self) -> dict[str, dict[int, str]]: if not self._value_labels_read: self._read_value_labels() return self._value_label_dict @Appender(_read_stata_doc) def read_stata(filepath_or_buffer: FilePath | ReadBuffer[bytes], *, convert_dates: bool=True, convert_categoricals: bool=True, index_col: str | None=None, convert_missing: bool=False, preserve_dtypes: bool=True, columns: Sequence[str] | None=None, order_categoricals: bool=True, chunksize: int | None=None, iterator: bool=False, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None) -> DataFrame | StataReader: reader = StataReader(filepath_or_buffer, convert_dates=convert_dates, convert_categoricals=convert_categoricals, index_col=index_col, convert_missing=convert_missing, preserve_dtypes=preserve_dtypes, columns=columns, order_categoricals=order_categoricals, chunksize=chunksize, storage_options=storage_options, compression=compression) if iterator or chunksize: return reader with reader: return reader.read() def _set_endianness(endianness: str) -> str: if endianness.lower() in ['<', 'little']: return '<' elif endianness.lower() in ['>', 'big']: return '>' else: raise ValueError(f'Endianness {endianness} not understood') def _pad_bytes(name: AnyStr, length: int) -> AnyStr: if isinstance(name, bytes): return name + b'\x00' * (length - len(name)) return name + '\x00' * (length - len(name)) def _convert_datetime_to_stata_type(fmt: str) -> np.dtype: if fmt in ['tc', '%tc', 'td', '%td', 'tw', '%tw', 'tm', '%tm', 'tq', '%tq', 'th', '%th', 'ty', '%ty']: return np.dtype(np.float64) else: raise NotImplementedError(f'Format {fmt} not implemented') def _maybe_convert_to_int_keys(convert_dates: dict, varlist: list[Hashable]) -> dict: new_dict = {} for key in convert_dates: if not convert_dates[key].startswith('%'): convert_dates[key] = '%' + convert_dates[key] if key in varlist: new_dict.update({varlist.index(key): convert_dates[key]}) else: if not isinstance(key, int): raise ValueError('convert_dates key must be a column or an integer') new_dict.update({key: convert_dates[key]}) return new_dict def _dtype_to_stata_type(dtype: np.dtype, column: Series) -> int: if dtype.type is np.object_: itemsize = max_len_string_array(ensure_object(column._values)) return max(itemsize, 1) elif dtype.type is np.float64: return 255 elif dtype.type is np.float32: return 254 elif dtype.type is np.int32: return 253 elif dtype.type is np.int16: return 252 elif dtype.type is np.int8: return 251 else: raise NotImplementedError(f'Data type {dtype} not supported.') def _dtype_to_default_stata_fmt(dtype: np.dtype, column: Series, dta_version: int=114, force_strl: bool=False) -> str: if dta_version < 117: max_str_len = 244 else: max_str_len = 2045 if force_strl: return '%9s' if dtype.type is np.object_: itemsize = max_len_string_array(ensure_object(column._values)) if itemsize > max_str_len: if dta_version >= 117: return '%9s' else: raise ValueError(excessive_string_length_error.format(column.name)) return '%' + str(max(itemsize, 1)) + 's' elif dtype == np.float64: return '%10.0g' elif dtype == np.float32: return '%9.0g' elif dtype == np.int32: return '%12.0g' elif dtype in (np.int8, np.int16): return '%8.0g' else: raise NotImplementedError(f'Data type {dtype} not supported.') @doc(storage_options=_shared_docs['storage_options'], compression_options=_shared_docs['compression_options'] % 'fname') class StataWriter(StataParser): _max_string_length = 244 _encoding: Literal['latin-1', 'utf-8'] = 'latin-1' def __init__(self, fname: FilePath | WriteBuffer[bytes], data: DataFrame, convert_dates: dict[Hashable, str] | None=None, write_index: bool=True, byteorder: str | None=None, time_stamp: datetime | None=None, data_label: str | None=None, variable_labels: dict[Hashable, str] | None=None, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None, *, value_labels: dict[Hashable, dict[float, str]] | None=None) -> None: super().__init__() self.data = data self._convert_dates = {} if convert_dates is None else convert_dates self._write_index = write_index self._time_stamp = time_stamp self._data_label = data_label self._variable_labels = variable_labels self._non_cat_value_labels = value_labels self._value_labels: list[StataValueLabel] = [] self._has_value_labels = np.array([], dtype=bool) self._compression = compression self._output_file: IO[bytes] | None = None self._converted_names: dict[Hashable, str] = {} self._prepare_pandas(data) self.storage_options = storage_options if byteorder is None: byteorder = sys.byteorder self._byteorder = _set_endianness(byteorder) self._fname = fname self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8} def _write(self, to_write: str) -> None: self.handles.handle.write(to_write.encode(self._encoding)) def _write_bytes(self, value: bytes) -> None: self.handles.handle.write(value) def _prepare_non_cat_value_labels(self, data: DataFrame) -> list[StataNonCatValueLabel]: non_cat_value_labels: list[StataNonCatValueLabel] = [] if self._non_cat_value_labels is None: return non_cat_value_labels for (labname, labels) in self._non_cat_value_labels.items(): if labname in self._converted_names: colname = self._converted_names[labname] elif labname in data.columns: colname = str(labname) else: raise KeyError(f"Can't create value labels for {labname}, it wasn't found in the dataset.") if not is_numeric_dtype(data[colname].dtype): raise ValueError(f"Can't create value labels for {labname}, value labels can only be applied to numeric columns.") svl = StataNonCatValueLabel(colname, labels, self._encoding) non_cat_value_labels.append(svl) return non_cat_value_labels def _prepare_categoricals(self, data: DataFrame) -> DataFrame: is_cat = [isinstance(dtype, CategoricalDtype) for dtype in data.dtypes] if not any(is_cat): return data self._has_value_labels |= np.array(is_cat) get_base_missing_value = StataMissingValue.get_base_missing_value data_formatted = [] for (col, col_is_cat) in zip(data, is_cat): if col_is_cat: svl = StataValueLabel(data[col], encoding=self._encoding) self._value_labels.append(svl) dtype = data[col].cat.codes.dtype if dtype == np.int64: raise ValueError('It is not possible to export int64-based categorical data to Stata.') values = data[col].cat.codes._values.copy() if values.max() >= get_base_missing_value(dtype): if dtype == np.int8: dtype = np.dtype(np.int16) elif dtype == np.int16: dtype = np.dtype(np.int32) else: dtype = np.dtype(np.float64) values = np.array(values, dtype=dtype) values[values == -1] = get_base_missing_value(dtype) data_formatted.append((col, values)) else: data_formatted.append((col, data[col])) return DataFrame.from_dict(dict(data_formatted)) def _replace_nans(self, data: DataFrame) -> DataFrame: for c in data: dtype = data[c].dtype if dtype in (np.float32, np.float64): if dtype == np.float32: replacement = self.MISSING_VALUES['f'] else: replacement = self.MISSING_VALUES['d'] data[c] = data[c].fillna(replacement) return data def _update_strl_names(self) -> None: def _validate_variable_name(self, name: str) -> str: for c in name: if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and (c < '0' or c > '9') and (c != '_'): name = name.replace(c, '_') return name def _check_column_names(self, data: DataFrame) -> DataFrame: converted_names: dict[Hashable, str] = {} columns = list(data.columns) original_columns = columns[:] duplicate_var_id = 0 for (j, name) in enumerate(columns): orig_name = name if not isinstance(name, str): name = str(name) name = self._validate_variable_name(name) if name in self.RESERVED_WORDS: name = '_' + name if '0' <= name[0] <= '9': name = '_' + name name = name[:min(len(name), 32)] if not name == orig_name: while columns.count(name) > 0: name = '_' + str(duplicate_var_id) + name name = name[:min(len(name), 32)] duplicate_var_id += 1 converted_names[orig_name] = name columns[j] = name data.columns = Index(columns) if self._convert_dates: for (c, o) in zip(columns, original_columns): if c != o: self._convert_dates[c] = self._convert_dates[o] del self._convert_dates[o] if converted_names: conversion_warning = [] for (orig_name, name) in converted_names.items(): msg = f'{orig_name} -> {name}' conversion_warning.append(msg) ws = invalid_name_doc.format('\n '.join(conversion_warning)) warnings.warn(ws, InvalidColumnName, stacklevel=find_stack_level()) self._converted_names = converted_names self._update_strl_names() return data def _set_formats_and_types(self, dtypes: Series) -> None: self.fmtlist: list[str] = [] self.typlist: list[int] = [] for (col, dtype) in dtypes.items(): self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, self.data[col])) self.typlist.append(_dtype_to_stata_type(dtype, self.data[col])) def _prepare_pandas(self, data: DataFrame) -> None: data = data.copy() if self._write_index: temp = data.reset_index() if isinstance(temp, DataFrame): data = temp data = self._check_column_names(data) data = _cast_to_stata_types(data) data = self._replace_nans(data) self._has_value_labels = np.repeat(False, data.shape[1]) non_cat_value_labels = self._prepare_non_cat_value_labels(data) non_cat_columns = [svl.labname for svl in non_cat_value_labels] has_non_cat_val_labels = data.columns.isin(non_cat_columns) self._has_value_labels |= has_non_cat_val_labels self._value_labels.extend(non_cat_value_labels) data = self._prepare_categoricals(data) (self.nobs, self.nvar) = data.shape self.data = data self.varlist = data.columns.tolist() dtypes = data.dtypes for col in data: if col in self._convert_dates: continue if lib.is_np_dtype(data[col].dtype, 'M'): self._convert_dates[col] = 'tc' self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates, self.varlist) for key in self._convert_dates: new_type = _convert_datetime_to_stata_type(self._convert_dates[key]) dtypes.iloc[key] = np.dtype(new_type) self._encode_strings() self._set_formats_and_types(dtypes) if self._convert_dates is not None: for key in self._convert_dates: if isinstance(key, int): self.fmtlist[key] = self._convert_dates[key] def _encode_strings(self) -> None: convert_dates = self._convert_dates convert_strl = getattr(self, '_convert_strl', []) for (i, col) in enumerate(self.data): if i in convert_dates or col in convert_strl: continue column = self.data[col] dtype = column.dtype if dtype.type is np.object_: inferred_dtype = infer_dtype(column, skipna=True) if not (inferred_dtype == 'string' or len(column) == 0): col = column.name raise ValueError(f'Column `{col}` cannot be exported.\n\nOnly string-like object arrays\ncontaining all strings or a mix of strings and None can be exported.\nObject arrays containing only null values are prohibited. Other object\ntypes cannot be exported and must first be converted to one of the\nsupported types.') encoded = self.data[col].str.encode(self._encoding) if max_len_string_array(ensure_object(encoded._values)) <= self._max_string_length: self.data[col] = encoded def write_file(self) -> None: with get_handle(self._fname, 'wb', compression=self._compression, is_text=False, storage_options=self.storage_options) as self.handles: if self.handles.compression['method'] is not None: (self._output_file, self.handles.handle) = (self.handles.handle, BytesIO()) self.handles.created_handles.append(self.handles.handle) try: self._write_header(data_label=self._data_label, time_stamp=self._time_stamp) self._write_map() self._write_variable_types() self._write_varnames() self._write_sortlist() self._write_formats() self._write_value_label_names() self._write_variable_labels() self._write_expansion_fields() self._write_characteristics() records = self._prepare_data() self._write_data(records) self._write_strls() self._write_value_labels() self._write_file_close_tag() self._write_map() self._close() except Exception as exc: self.handles.close() if isinstance(self._fname, (str, os.PathLike)) and os.path.isfile(self._fname): try: os.unlink(self._fname) except OSError: warnings.warn(f'This save was not successful but {self._fname} could not be deleted. This file is not valid.', ResourceWarning, stacklevel=find_stack_level()) raise exc def _close(self) -> None: if self._output_file is not None: assert isinstance(self.handles.handle, BytesIO) (bio, self.handles.handle) = (self.handles.handle, self._output_file) self.handles.handle.write(bio.getvalue()) def _write_map(self) -> None: def _write_file_close_tag(self) -> None: def _write_characteristics(self) -> None: def _write_strls(self) -> None: def _write_expansion_fields(self) -> None: self._write(_pad_bytes('', 5)) def _write_value_labels(self) -> None: for vl in self._value_labels: self._write_bytes(vl.generate_value_label(self._byteorder)) def _write_header(self, data_label: str | None=None, time_stamp: datetime | None=None) -> None: byteorder = self._byteorder self._write_bytes(struct.pack('b', 114)) self._write(byteorder == '>' and '\x01' or '\x02') self._write('\x01') self._write('\x00') self._write_bytes(struct.pack(byteorder + 'h', self.nvar)[:2]) self._write_bytes(struct.pack(byteorder + 'i', self.nobs)[:4]) if data_label is None: self._write_bytes(self._null_terminate_bytes(_pad_bytes('', 80))) else: self._write_bytes(self._null_terminate_bytes(_pad_bytes(data_label[:80], 80))) if time_stamp is None: time_stamp = datetime.now() elif not isinstance(time_stamp, datetime): raise ValueError('time_stamp should be datetime type') months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] month_lookup = {i + 1: month for (i, month) in enumerate(months)} ts = time_stamp.strftime('%d ') + month_lookup[time_stamp.month] + time_stamp.strftime(' %Y %H:%M') self._write_bytes(self._null_terminate_bytes(ts)) def _write_variable_types(self) -> None: for typ in self.typlist: self._write_bytes(struct.pack('B', typ)) def _write_varnames(self) -> None: for name in self.varlist: name = self._null_terminate_str(name) name = _pad_bytes(name[:32], 33) self._write(name) def _write_sortlist(self) -> None: srtlist = _pad_bytes('', 2 * (self.nvar + 1)) self._write(srtlist) def _write_formats(self) -> None: for fmt in self.fmtlist: self._write(_pad_bytes(fmt, 49)) def _write_value_label_names(self) -> None: for i in range(self.nvar): if self._has_value_labels[i]: name = self.varlist[i] name = self._null_terminate_str(name) name = _pad_bytes(name[:32], 33) self._write(name) else: self._write(_pad_bytes('', 33)) def _write_variable_labels(self) -> None: blank = _pad_bytes('', 81) if self._variable_labels is None: for i in range(self.nvar): self._write(blank) return for col in self.data: if col in self._variable_labels: label = self._variable_labels[col] if len(label) > 80: raise ValueError('Variable labels must be 80 characters or fewer') is_latin1 = all((ord(c) < 256 for c in label)) if not is_latin1: raise ValueError('Variable labels must contain only characters that can be encoded in Latin-1') self._write(_pad_bytes(label, 81)) else: self._write(blank) def _convert_strls(self, data: DataFrame) -> DataFrame: return data def _prepare_data(self) -> np.rec.recarray: data = self.data typlist = self.typlist convert_dates = self._convert_dates if self._convert_dates is not None: for (i, col) in enumerate(data): if i in convert_dates: data[col] = _datetime_to_stata_elapsed_vec(data[col], self.fmtlist[i]) data = self._convert_strls(data) dtypes = {} native_byteorder = self._byteorder == _set_endianness(sys.byteorder) for (i, col) in enumerate(data): typ = typlist[i] if typ <= self._max_string_length: dc = data[col].fillna('') data[col] = dc.apply(_pad_bytes, args=(typ,)) stype = f'S{typ}' dtypes[col] = stype data[col] = data[col].astype(stype) else: dtype = data[col].dtype if not native_byteorder: dtype = dtype.newbyteorder(self._byteorder) dtypes[col] = dtype return data.to_records(index=False, column_dtypes=dtypes) def _write_data(self, records: np.rec.recarray) -> None: self._write_bytes(records.tobytes()) @staticmethod def _null_terminate_str(s: str) -> str: s += '\x00' return s def _null_terminate_bytes(self, s: str) -> bytes: return self._null_terminate_str(s).encode(self._encoding) def _dtype_to_stata_type_117(dtype: np.dtype, column: Series, force_strl: bool) -> int: if force_strl: return 32768 if dtype.type is np.object_: itemsize = max_len_string_array(ensure_object(column._values)) itemsize = max(itemsize, 1) if itemsize <= 2045: return itemsize return 32768 elif dtype.type is np.float64: return 65526 elif dtype.type is np.float32: return 65527 elif dtype.type is np.int32: return 65528 elif dtype.type is np.int16: return 65529 elif dtype.type is np.int8: return 65530 else: raise NotImplementedError(f'Data type {dtype} not supported.') def _pad_bytes_new(name: str | bytes, length: int) -> bytes: if isinstance(name, str): name = bytes(name, 'utf-8') return name + b'\x00' * (length - len(name)) class StataStrLWriter: def __init__(self, df: DataFrame, columns: Sequence[str], version: int=117, byteorder: str | None=None) -> None: if version not in (117, 118, 119): raise ValueError('Only dta versions 117, 118 and 119 supported') self._dta_ver = version self.df = df self.columns = columns self._gso_table = {'': (0, 0)} if byteorder is None: byteorder = sys.byteorder self._byteorder = _set_endianness(byteorder) self._native_byteorder = self._byteorder == _set_endianness(sys.byteorder) gso_v_type = 'I' gso_o_type = 'Q' self._encoding = 'utf-8' if version == 117: o_size = 4 gso_o_type = 'I' self._encoding = 'latin-1' elif version == 118: o_size = 6 else: o_size = 5 if self._native_byteorder: self._o_offet = 2 ** (8 * (8 - o_size)) else: self._o_offet = 2 ** (8 * o_size) self._gso_o_type = gso_o_type self._gso_v_type = gso_v_type def _convert_key(self, key: tuple[int, int]) -> int: (v, o) = key if self._native_byteorder: return v + self._o_offet * o else: return o + self._o_offet * v def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]: gso_table = self._gso_table gso_df = self.df columns = list(gso_df.columns) selected = gso_df[self.columns] col_index = [(col, columns.index(col)) for col in self.columns] keys = np.empty(selected.shape, dtype=np.uint64) for (o, (idx, row)) in enumerate(selected.iterrows()): for (j, (col, v)) in enumerate(col_index): val = row[col] val = '' if val is None else val key = gso_table.get(val, None) if key is None: key = (v + 1, o + 1) gso_table[val] = key keys[o, j] = self._convert_key(key) for (i, col) in enumerate(self.columns): gso_df[col] = keys[:, i] return (gso_table, gso_df) def generate_blob(self, gso_table: dict[str, tuple[int, int]]) -> bytes: bio = BytesIO() gso = bytes('GSO', 'ascii') gso_type = struct.pack(self._byteorder + 'B', 130) null = struct.pack(self._byteorder + 'B', 0) v_type = self._byteorder + self._gso_v_type o_type = self._byteorder + self._gso_o_type len_type = self._byteorder + 'I' for (strl, vo) in gso_table.items(): if vo == (0, 0): continue (v, o) = vo bio.write(gso) bio.write(struct.pack(v_type, v)) bio.write(struct.pack(o_type, o)) bio.write(gso_type) utf8_string = bytes(strl, 'utf-8') bio.write(struct.pack(len_type, len(utf8_string) + 1)) bio.write(utf8_string) bio.write(null) return bio.getvalue() class StataWriter117(StataWriter): _max_string_length = 2045 _dta_version = 117 def __init__(self, fname: FilePath | WriteBuffer[bytes], data: DataFrame, convert_dates: dict[Hashable, str] | None=None, write_index: bool=True, byteorder: str | None=None, time_stamp: datetime | None=None, data_label: str | None=None, variable_labels: dict[Hashable, str] | None=None, convert_strl: Sequence[Hashable] | None=None, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None, *, value_labels: dict[Hashable, dict[float, str]] | None=None) -> None: self._convert_strl: list[Hashable] = [] if convert_strl is not None: self._convert_strl.extend(convert_strl) super().__init__(fname, data, convert_dates, write_index, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, variable_labels=variable_labels, value_labels=value_labels, compression=compression, storage_options=storage_options) self._map: dict[str, int] = {} self._strl_blob = b'' @staticmethod def _tag(val: str | bytes, tag: str) -> bytes: if isinstance(val, str): val = bytes(val, 'utf-8') return bytes('<' + tag + '>', 'utf-8') + val + bytes('', 'utf-8') def _update_map(self, tag: str) -> None: assert self.handles.handle is not None self._map[tag] = self.handles.handle.tell() def _write_header(self, data_label: str | None=None, time_stamp: datetime | None=None) -> None: byteorder = self._byteorder self._write_bytes(bytes('', 'utf-8')) bio = BytesIO() bio.write(self._tag(bytes(str(self._dta_version), 'utf-8'), 'release')) bio.write(self._tag(byteorder == '>' and 'MSF' or 'LSF', 'byteorder')) nvar_type = 'H' if self._dta_version <= 118 else 'I' bio.write(self._tag(struct.pack(byteorder + nvar_type, self.nvar), 'K')) nobs_size = 'I' if self._dta_version == 117 else 'Q' bio.write(self._tag(struct.pack(byteorder + nobs_size, self.nobs), 'N')) label = data_label[:80] if data_label is not None else '' encoded_label = label.encode(self._encoding) label_size = 'B' if self._dta_version == 117 else 'H' label_len = struct.pack(byteorder + label_size, len(encoded_label)) encoded_label = label_len + encoded_label bio.write(self._tag(encoded_label, 'label')) if time_stamp is None: time_stamp = datetime.now() elif not isinstance(time_stamp, datetime): raise ValueError('time_stamp should be datetime type') months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] month_lookup = {i + 1: month for (i, month) in enumerate(months)} ts = time_stamp.strftime('%d ') + month_lookup[time_stamp.month] + time_stamp.strftime(' %Y %H:%M') stata_ts = b'\x11' + bytes(ts, 'utf-8') bio.write(self._tag(stata_ts, 'timestamp')) self._write_bytes(self._tag(bio.getvalue(), 'header')) def _write_map(self) -> None: if not self._map: self._map = {'stata_data': 0, 'map': self.handles.handle.tell(), 'variable_types': 0, 'varnames': 0, 'sortlist': 0, 'formats': 0, 'value_label_names': 0, 'variable_labels': 0, 'characteristics': 0, 'data': 0, 'strls': 0, 'value_labels': 0, 'stata_data_close': 0, 'end-of-file': 0} self.handles.handle.seek(self._map['map']) bio = BytesIO() for val in self._map.values(): bio.write(struct.pack(self._byteorder + 'Q', val)) self._write_bytes(self._tag(bio.getvalue(), 'map')) def _write_variable_types(self) -> None: self._update_map('variable_types') bio = BytesIO() for typ in self.typlist: bio.write(struct.pack(self._byteorder + 'H', typ)) self._write_bytes(self._tag(bio.getvalue(), 'variable_types')) def _write_varnames(self) -> None: self._update_map('varnames') bio = BytesIO() vn_len = 32 if self._dta_version == 117 else 128 for name in self.varlist: name = self._null_terminate_str(name) name = _pad_bytes_new(name[:32].encode(self._encoding), vn_len + 1) bio.write(name) self._write_bytes(self._tag(bio.getvalue(), 'varnames')) def _write_sortlist(self) -> None: self._update_map('sortlist') sort_size = 2 if self._dta_version < 119 else 4 self._write_bytes(self._tag(b'\x00' * sort_size * (self.nvar + 1), 'sortlist')) def _write_formats(self) -> None: self._update_map('formats') bio = BytesIO() fmt_len = 49 if self._dta_version == 117 else 57 for fmt in self.fmtlist: bio.write(_pad_bytes_new(fmt.encode(self._encoding), fmt_len)) self._write_bytes(self._tag(bio.getvalue(), 'formats')) def _write_value_label_names(self) -> None: self._update_map('value_label_names') bio = BytesIO() vl_len = 32 if self._dta_version == 117 else 128 for i in range(self.nvar): name = '' if self._has_value_labels[i]: name = self.varlist[i] name = self._null_terminate_str(name) encoded_name = _pad_bytes_new(name[:32].encode(self._encoding), vl_len + 1) bio.write(encoded_name) self._write_bytes(self._tag(bio.getvalue(), 'value_label_names')) def _write_variable_labels(self) -> None: self._update_map('variable_labels') bio = BytesIO() vl_len = 80 if self._dta_version == 117 else 320 blank = _pad_bytes_new('', vl_len + 1) if self._variable_labels is None: for _ in range(self.nvar): bio.write(blank) self._write_bytes(self._tag(bio.getvalue(), 'variable_labels')) return for col in self.data: if col in self._variable_labels: label = self._variable_labels[col] if len(label) > 80: raise ValueError('Variable labels must be 80 characters or fewer') try: encoded = label.encode(self._encoding) except UnicodeEncodeError as err: raise ValueError(f'Variable labels must contain only characters that can be encoded in {self._encoding}') from err bio.write(_pad_bytes_new(encoded, vl_len + 1)) else: bio.write(blank) self._write_bytes(self._tag(bio.getvalue(), 'variable_labels')) def _write_characteristics(self) -> None: self._update_map('characteristics') self._write_bytes(self._tag(b'', 'characteristics')) def _write_data(self, records: np.rec.recarray) -> None: self._update_map('data') self._write_bytes(b'') self._write_bytes(records.tobytes()) self._write_bytes(b'') def _write_strls(self) -> None: self._update_map('strls') self._write_bytes(self._tag(self._strl_blob, 'strls')) def _write_expansion_fields(self) -> None: def _write_value_labels(self) -> None: self._update_map('value_labels') bio = BytesIO() for vl in self._value_labels: lab = vl.generate_value_label(self._byteorder) lab = self._tag(lab, 'lbl') bio.write(lab) self._write_bytes(self._tag(bio.getvalue(), 'value_labels')) def _write_file_close_tag(self) -> None: self._update_map('stata_data_close') self._write_bytes(bytes('', 'utf-8')) self._update_map('end-of-file') def _update_strl_names(self) -> None: for (orig, new) in self._converted_names.items(): if orig in self._convert_strl: idx = self._convert_strl.index(orig) self._convert_strl[idx] = new def _convert_strls(self, data: DataFrame) -> DataFrame: convert_cols = [col for (i, col) in enumerate(data) if self.typlist[i] == 32768 or col in self._convert_strl] if convert_cols: ssw = StataStrLWriter(data, convert_cols, version=self._dta_version, byteorder=self._byteorder) (tab, new_data) = ssw.generate_table() data = new_data self._strl_blob = ssw.generate_blob(tab) return data def _set_formats_and_types(self, dtypes: Series) -> None: self.typlist = [] self.fmtlist = [] for (col, dtype) in dtypes.items(): force_strl = col in self._convert_strl fmt = _dtype_to_default_stata_fmt(dtype, self.data[col], dta_version=self._dta_version, force_strl=force_strl) self.fmtlist.append(fmt) self.typlist.append(_dtype_to_stata_type_117(dtype, self.data[col], force_strl)) class StataWriterUTF8(StataWriter117): _encoding: Literal['utf-8'] = 'utf-8' def __init__(self, fname: FilePath | WriteBuffer[bytes], data: DataFrame, convert_dates: dict[Hashable, str] | None=None, write_index: bool=True, byteorder: str | None=None, time_stamp: datetime | None=None, data_label: str | None=None, variable_labels: dict[Hashable, str] | None=None, convert_strl: Sequence[Hashable] | None=None, version: int | None=None, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None, *, value_labels: dict[Hashable, dict[float, str]] | None=None) -> None: if version is None: version = 118 if data.shape[1] <= 32767 else 119 elif version not in (118, 119): raise ValueError('version must be either 118 or 119.') elif version == 118 and data.shape[1] > 32767: raise ValueError('You must use version 119 for data sets containing more than32,767 variables') super().__init__(fname, data, convert_dates=convert_dates, write_index=write_index, byteorder=byteorder, time_stamp=time_stamp, data_label=data_label, variable_labels=variable_labels, value_labels=value_labels, convert_strl=convert_strl, compression=compression, storage_options=storage_options) self._dta_version = version def _validate_variable_name(self, name: str) -> str: for c in name: if ord(c) < 128 and (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and (c < '0' or c > '9') and (c != '_') or 128 <= ord(c) < 192 or c in {'×', '÷'}: name = name.replace(c, '_') return name # File: pandas-main/pandas/io/xml.py """""" from __future__ import annotations import io from os import PathLike from typing import TYPE_CHECKING, Any from pandas._libs import lib from pandas.compat._optional import import_optional_dependency from pandas.errors import AbstractMethodError, ParserError from pandas.util._decorators import doc from pandas.util._validators import check_dtype_backend from pandas.core.dtypes.common import is_list_like from pandas.core.shared_docs import _shared_docs from pandas.io.common import get_handle, infer_compression, is_fsspec_url, is_url, stringify_path from pandas.io.parsers import TextParser if TYPE_CHECKING: from collections.abc import Callable, Sequence from xml.etree.ElementTree import Element from lxml import etree from pandas._typing import CompressionOptions, ConvertersArg, DtypeArg, DtypeBackend, FilePath, ParseDatesArg, ReadBuffer, StorageOptions, XMLParsers from pandas import DataFrame @doc(storage_options=_shared_docs['storage_options'], decompression_options=_shared_docs['decompression_options'] % 'path_or_buffer') class _XMLFrameParser: def __init__(self, path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], xpath: str, namespaces: dict[str, str] | None, elems_only: bool, attrs_only: bool, names: Sequence[str] | None, dtype: DtypeArg | None, converters: ConvertersArg | None, parse_dates: ParseDatesArg | None, encoding: str | None, stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None, iterparse: dict[str, list[str]] | None, compression: CompressionOptions, storage_options: StorageOptions) -> None: self.path_or_buffer = path_or_buffer self.xpath = xpath self.namespaces = namespaces self.elems_only = elems_only self.attrs_only = attrs_only self.names = names self.dtype = dtype self.converters = converters self.parse_dates = parse_dates self.encoding = encoding self.stylesheet = stylesheet self.iterparse = iterparse self.compression: CompressionOptions = compression self.storage_options = storage_options def parse_data(self) -> list[dict[str, str | None]]: raise AbstractMethodError(self) def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]: dicts: list[dict[str, str | None]] if self.elems_only and self.attrs_only: raise ValueError('Either element or attributes can be parsed not both.') if self.elems_only: if self.names: dicts = [{**({el.tag: el.text} if el.text and (not el.text.isspace()) else {}), **{nm: ch.text if ch.text else None for (nm, ch) in zip(self.names, el.findall('*'))}} for el in elems] else: dicts = [{ch.tag: ch.text if ch.text else None for ch in el.findall('*')} for el in elems] elif self.attrs_only: dicts = [{k: v if v else None for (k, v) in el.attrib.items()} for el in elems] elif self.names: dicts = [{**el.attrib, **({el.tag: el.text} if el.text and (not el.text.isspace()) else {}), **{nm: ch.text if ch.text else None for (nm, ch) in zip(self.names, el.findall('*'))}} for el in elems] else: dicts = [{**el.attrib, **({el.tag: el.text} if el.text and (not el.text.isspace()) else {}), **{ch.tag: ch.text if ch.text else None for ch in el.findall('*')}} for el in elems] dicts = [{k.split('}')[1] if '}' in k else k: v for (k, v) in d.items()} for d in dicts] keys = list(dict.fromkeys([k for d in dicts for k in d.keys()])) dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts] if self.names: dicts = [dict(zip(self.names, d.values())) for d in dicts] return dicts def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]: dicts: list[dict[str, str | None]] = [] row: dict[str, str | None] | None = None if not isinstance(self.iterparse, dict): raise TypeError(f'{type(self.iterparse).__name__} is not a valid type for iterparse') row_node = next(iter(self.iterparse.keys())) if self.iterparse else '' if not is_list_like(self.iterparse[row_node]): raise TypeError(f'{type(self.iterparse[row_node])} is not a valid type for value in iterparse') if not hasattr(self.path_or_buffer, 'read') and (not isinstance(self.path_or_buffer, (str, PathLike)) or is_url(self.path_or_buffer) or is_fsspec_url(self.path_or_buffer) or (isinstance(self.path_or_buffer, str) and self.path_or_buffer.startswith((' list[Any]: raise AbstractMethodError(self) def _validate_names(self) -> None: raise AbstractMethodError(self) def _parse_doc(self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]) -> Element | etree._Element: raise AbstractMethodError(self) class _EtreeFrameParser(_XMLFrameParser): def parse_data(self) -> list[dict[str, str | None]]: from xml.etree.ElementTree import iterparse if self.stylesheet is not None: raise ValueError('To use stylesheet, you need lxml installed and selected as parser.') if self.iterparse is None: self.xml_doc = self._parse_doc(self.path_or_buffer) elems = self._validate_path() self._validate_names() xml_dicts: list[dict[str, str | None]] = self._parse_nodes(elems) if self.iterparse is None else self._iterparse_nodes(iterparse) return xml_dicts def _validate_path(self) -> list[Any]: msg = 'xpath does not return any nodes or attributes. Be sure to specify in `xpath` the parent nodes of children and attributes to parse. If document uses namespaces denoted with xmlns, be sure to define namespaces and use them in xpath.' try: elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces) children = [ch for el in elems for ch in el.findall('*')] attrs = {k: v for el in elems for (k, v) in el.attrib.items()} if elems is None: raise ValueError(msg) if elems is not None: if self.elems_only and children == []: raise ValueError(msg) if self.attrs_only and attrs == {}: raise ValueError(msg) if children == [] and attrs == {}: raise ValueError(msg) except (KeyError, SyntaxError) as err: raise SyntaxError('You have used an incorrect or unsupported XPath expression for etree library or you used an undeclared namespace prefix.') from err return elems def _validate_names(self) -> None: children: list[Any] if self.names: if self.iterparse: children = self.iterparse[next(iter(self.iterparse))] else: parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces) children = parent.findall('*') if parent is not None else [] if is_list_like(self.names): if len(self.names) < len(children): raise ValueError('names does not match length of child elements in xpath.') else: raise TypeError(f'{type(self.names).__name__} is not a valid type for names') def _parse_doc(self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]) -> Element: from xml.etree.ElementTree import XMLParser, parse handle_data = get_data_from_filepath(filepath_or_buffer=raw_doc, encoding=self.encoding, compression=self.compression, storage_options=self.storage_options) with handle_data as xml_data: curr_parser = XMLParser(encoding=self.encoding) document = parse(xml_data, parser=curr_parser) return document.getroot() class _LxmlFrameParser(_XMLFrameParser): def parse_data(self) -> list[dict[str, str | None]]: from lxml.etree import iterparse if self.iterparse is None: self.xml_doc = self._parse_doc(self.path_or_buffer) if self.stylesheet: self.xsl_doc = self._parse_doc(self.stylesheet) self.xml_doc = self._transform_doc() elems = self._validate_path() self._validate_names() xml_dicts: list[dict[str, str | None]] = self._parse_nodes(elems) if self.iterparse is None else self._iterparse_nodes(iterparse) return xml_dicts def _validate_path(self) -> list[Any]: msg = 'xpath does not return any nodes or attributes. Be sure to specify in `xpath` the parent nodes of children and attributes to parse. If document uses namespaces denoted with xmlns, be sure to define namespaces and use them in xpath.' elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces) children = [ch for el in elems for ch in el.xpath('*')] attrs = {k: v for el in elems for (k, v) in el.attrib.items()} if elems == []: raise ValueError(msg) if elems != []: if self.elems_only and children == []: raise ValueError(msg) if self.attrs_only and attrs == {}: raise ValueError(msg) if children == [] and attrs == {}: raise ValueError(msg) return elems def _validate_names(self) -> None: children: list[Any] if self.names: if self.iterparse: children = self.iterparse[next(iter(self.iterparse))] else: children = self.xml_doc.xpath(self.xpath + '[1]/*', namespaces=self.namespaces) if is_list_like(self.names): if len(self.names) < len(children): raise ValueError('names does not match length of child elements in xpath.') else: raise TypeError(f'{type(self.names).__name__} is not a valid type for names') def _parse_doc(self, raw_doc: FilePath | ReadBuffer[bytes] | ReadBuffer[str]) -> etree._Element: from lxml.etree import XMLParser, fromstring, parse handle_data = get_data_from_filepath(filepath_or_buffer=raw_doc, encoding=self.encoding, compression=self.compression, storage_options=self.storage_options) with handle_data as xml_data: curr_parser = XMLParser(encoding=self.encoding) if isinstance(xml_data, io.StringIO): if self.encoding is None: raise TypeError('Can not pass encoding None when input is StringIO.') document = fromstring(xml_data.getvalue().encode(self.encoding), parser=curr_parser) else: document = parse(xml_data, parser=curr_parser) return document def _transform_doc(self) -> etree._XSLTResultTree: from lxml.etree import XSLT transformer = XSLT(self.xsl_doc) new_doc = transformer(self.xml_doc) return new_doc def get_data_from_filepath(filepath_or_buffer: FilePath | bytes | ReadBuffer[bytes] | ReadBuffer[str], encoding: str | None, compression: CompressionOptions, storage_options: StorageOptions): filepath_or_buffer = stringify_path(filepath_or_buffer) with get_handle(filepath_or_buffer, 'r', encoding=encoding, compression=compression, storage_options=storage_options) as handle_obj: return preprocess_data(handle_obj.handle.read()) if hasattr(handle_obj.handle, 'read') else handle_obj.handle def preprocess_data(data) -> io.StringIO | io.BytesIO: if isinstance(data, str): data = io.StringIO(data) elif isinstance(data, bytes): data = io.BytesIO(data) return data def _data_to_frame(data, **kwargs) -> DataFrame: tags = next(iter(data)) nodes = [list(d.values()) for d in data] try: with TextParser(nodes, names=tags, **kwargs) as tp: return tp.read() except ParserError as err: raise ParserError('XML document may be too complex for import. Try to flatten document and use distinct element and attribute names.') from err def _parse(path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], xpath: str, namespaces: dict[str, str] | None, elems_only: bool, attrs_only: bool, names: Sequence[str] | None, dtype: DtypeArg | None, converters: ConvertersArg | None, parse_dates: ParseDatesArg | None, encoding: str | None, parser: XMLParsers, stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None, iterparse: dict[str, list[str]] | None, compression: CompressionOptions, storage_options: StorageOptions, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default, **kwargs) -> DataFrame: p: _EtreeFrameParser | _LxmlFrameParser if parser == 'lxml': lxml = import_optional_dependency('lxml.etree', errors='ignore') if lxml is not None: p = _LxmlFrameParser(path_or_buffer, xpath, namespaces, elems_only, attrs_only, names, dtype, converters, parse_dates, encoding, stylesheet, iterparse, compression, storage_options) else: raise ImportError('lxml not found, please install or use the etree parser.') elif parser == 'etree': p = _EtreeFrameParser(path_or_buffer, xpath, namespaces, elems_only, attrs_only, names, dtype, converters, parse_dates, encoding, stylesheet, iterparse, compression, storage_options) else: raise ValueError('Values for parser can only be lxml or etree.') data_dicts = p.parse_data() return _data_to_frame(data=data_dicts, dtype=dtype, converters=converters, parse_dates=parse_dates, dtype_backend=dtype_backend, **kwargs) @doc(storage_options=_shared_docs['storage_options'], decompression_options=_shared_docs['decompression_options'] % 'path_or_buffer') def read_xml(path_or_buffer: FilePath | ReadBuffer[bytes] | ReadBuffer[str], *, xpath: str='./*', namespaces: dict[str, str] | None=None, elems_only: bool=False, attrs_only: bool=False, names: Sequence[str] | None=None, dtype: DtypeArg | None=None, converters: ConvertersArg | None=None, parse_dates: ParseDatesArg | None=None, encoding: str | None='utf-8', parser: XMLParsers='lxml', stylesheet: FilePath | ReadBuffer[bytes] | ReadBuffer[str] | None=None, iterparse: dict[str, list[str]] | None=None, compression: CompressionOptions='infer', storage_options: StorageOptions | None=None, dtype_backend: DtypeBackend | lib.NoDefault=lib.no_default) -> DataFrame: check_dtype_backend(dtype_backend) return _parse(path_or_buffer=path_or_buffer, xpath=xpath, namespaces=namespaces, elems_only=elems_only, attrs_only=attrs_only, names=names, dtype=dtype, converters=converters, parse_dates=parse_dates, encoding=encoding, parser=parser, stylesheet=stylesheet, iterparse=iterparse, compression=compression, storage_options=storage_options, dtype_backend=dtype_backend) # File: pandas-main/pandas/plotting/__init__.py """""" from pandas.plotting._core import PlotAccessor, boxplot, boxplot_frame, boxplot_frame_groupby, hist_frame, hist_series from pandas.plotting._misc import andrews_curves, autocorrelation_plot, bootstrap_plot, deregister as deregister_matplotlib_converters, lag_plot, parallel_coordinates, plot_params, radviz, register as register_matplotlib_converters, scatter_matrix, table __all__ = ['PlotAccessor', 'boxplot', 'boxplot_frame', 'boxplot_frame_groupby', 'hist_frame', 'hist_series', 'scatter_matrix', 'radviz', 'andrews_curves', 'bootstrap_plot', 'parallel_coordinates', 'lag_plot', 'autocorrelation_plot', 'table', 'plot_params', 'register_matplotlib_converters', 'deregister_matplotlib_converters'] # File: pandas-main/pandas/plotting/_core.py from __future__ import annotations import importlib from typing import TYPE_CHECKING, Literal from pandas._config import get_option from pandas.util._decorators import Appender, Substitution from pandas.core.dtypes.common import is_integer, is_list_like from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.base import PandasObject if TYPE_CHECKING: from collections.abc import Callable, Hashable, Sequence import types from matplotlib.axes import Axes import numpy as np from pandas._typing import IndexLabel from pandas import DataFrame, Index, Series from pandas.core.groupby.generic import DataFrameGroupBy def holds_integer(column: Index) -> bool: return column.inferred_type in {'integer', 'mixed-integer'} def hist_series(self: Series, by=None, ax=None, grid: bool=True, xlabelsize: int | None=None, xrot: float | None=None, ylabelsize: int | None=None, yrot: float | None=None, figsize: tuple[int, int] | None=None, bins: int | Sequence[int]=10, backend: str | None=None, legend: bool=False, **kwargs): plot_backend = _get_plot_backend(backend) return plot_backend.hist_series(self, by=by, ax=ax, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, figsize=figsize, bins=bins, legend=legend, **kwargs) def hist_frame(data: DataFrame, column: IndexLabel | None=None, by=None, grid: bool=True, xlabelsize: int | None=None, xrot: float | None=None, ylabelsize: int | None=None, yrot: float | None=None, ax=None, sharex: bool=False, sharey: bool=False, figsize: tuple[int, int] | None=None, layout: tuple[int, int] | None=None, bins: int | Sequence[int]=10, backend: str | None=None, legend: bool=False, **kwargs): plot_backend = _get_plot_backend(backend) return plot_backend.hist_frame(data, column=column, by=by, grid=grid, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout, legend=legend, bins=bins, **kwargs) _boxplot_doc = "\nMake a box plot from DataFrame columns.\n\nMake a box-and-whisker plot from DataFrame columns, optionally grouped\nby some other columns. A box plot is a method for graphically depicting\ngroups of numerical data through their quartiles.\nThe box extends from the Q1 to Q3 quartile values of the data,\nwith a line at the median (Q2). The whiskers extend from the edges\nof box to show the range of the data. By default, they extend no more than\n`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest\ndata point within that interval. Outliers are plotted as separate dots.\n\nFor further details see\nWikipedia's entry for `boxplot `_.\n\nParameters\n----------\n%(data)scolumn : str or list of str, optional\n Column name or list of names, or vector.\n Can be any valid input to :meth:`pandas.DataFrame.groupby`.\nby : str or array-like, optional\n Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.\n One box-plot will be done per value of columns in `by`.\nax : object of class matplotlib.axes.Axes, optional\n The matplotlib axes to be used by boxplot.\nfontsize : float or str\n Tick label font size in points or as a string (e.g., `large`).\nrot : float, default 0\n The rotation angle of labels (in degrees)\n with respect to the screen coordinate system.\ngrid : bool, default True\n Setting this to True will show the grid.\nfigsize : A tuple (width, height) in inches\n The size of the figure to create in matplotlib.\nlayout : tuple (rows, columns), optional\n For example, (3, 5) will display the subplots\n using 3 rows and 5 columns, starting from the top-left.\nreturn_type : {'axes', 'dict', 'both'} or None, default 'axes'\n The kind of object to return. The default is ``axes``.\n\n * 'axes' returns the matplotlib axes the boxplot is drawn on.\n * 'dict' returns a dictionary whose values are the matplotlib\n Lines of the boxplot.\n * 'both' returns a namedtuple with the axes and dict.\n * when grouping with ``by``, a Series mapping columns to\n ``return_type`` is returned.\n\n If ``return_type`` is `None`, a NumPy array\n of axes with the same shape as ``layout`` is returned.\n%(backend)s\n**kwargs\n All other plotting keyword arguments to be passed to\n :func:`matplotlib.pyplot.boxplot`.\n\nReturns\n-------\nresult\n See Notes.\n\nSee Also\n--------\nSeries.plot.hist: Make a histogram.\nmatplotlib.pyplot.boxplot : Matplotlib equivalent plot.\n\nNotes\n-----\nThe return type depends on the `return_type` parameter:\n\n* 'axes' : object of class matplotlib.axes.Axes\n* 'dict' : dict of matplotlib.lines.Line2D objects\n* 'both' : a namedtuple with structure (ax, lines)\n\nFor data grouped with ``by``, return a Series of the above or a numpy\narray:\n\n* :class:`~pandas.Series`\n* :class:`~numpy.array` (for ``return_type = None``)\n\nUse ``return_type='dict'`` when you want to tweak the appearance\nof the lines after plotting. In this case a dict containing the Lines\nmaking up the boxes, caps, fliers, medians, and whiskers is returned.\n\nExamples\n--------\n\nBoxplots can be created for every column in the dataframe\nby ``df.boxplot()`` or indicating the columns to be used:\n\n.. plot::\n :context: close-figs\n\n >>> np.random.seed(1234)\n >>> df = pd.DataFrame(np.random.randn(10, 4),\n ... columns=['Col1', 'Col2', 'Col3', 'Col4'])\n >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) # doctest: +SKIP\n\nBoxplots of variables distributions grouped by the values of a third\nvariable can be created using the option ``by``. For instance:\n\n.. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randn(10, 2),\n ... columns=['Col1', 'Col2'])\n >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',\n ... 'B', 'B', 'B', 'B', 'B'])\n >>> boxplot = df.boxplot(by='X')\n\nA list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot\nin order to group the data by combination of the variables in the x-axis:\n\n.. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame(np.random.randn(10, 3),\n ... columns=['Col1', 'Col2', 'Col3'])\n >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',\n ... 'B', 'B', 'B', 'B', 'B'])\n >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',\n ... 'B', 'A', 'B', 'A', 'B'])\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])\n\nThe layout of boxplot can be adjusted giving a tuple to ``layout``:\n\n.. plot::\n :context: close-figs\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... layout=(2, 1))\n\nAdditional formatting can be done to the boxplot, like suppressing the grid\n(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)\nor changing the fontsize (i.e. ``fontsize=15``):\n\n.. plot::\n :context: close-figs\n\n >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) # doctest: +SKIP\n\nThe parameter ``return_type`` can be used to select the type of element\nreturned by `boxplot`. When ``return_type='axes'`` is selected,\nthe matplotlib axes on which the boxplot is drawn are returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')\n >>> type(boxplot)\n \n\nWhen grouping with ``by``, a Series mapping columns to ``return_type``\nis returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... return_type='axes')\n >>> type(boxplot)\n \n\nIf ``return_type`` is `None`, a NumPy array of axes with the same shape\nas ``layout`` is returned:\n\n >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',\n ... return_type=None)\n >>> type(boxplot)\n \n" _backend_doc = "backend : str, default None\n Backend to use instead of the backend specified in the option\n ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to\n specify the ``plotting.backend`` for the whole session, set\n ``pd.options.plotting.backend``.\n" _bar_or_line_doc = "\n Parameters\n ----------\n x : label or position, optional\n Allows plotting of one column versus another. If not specified,\n the index of the DataFrame is used.\n y : label or position, optional\n Allows plotting of one column versus another. If not specified,\n all numerical columns are used.\n color : str, array-like, or dict, optional\n The color for each of the DataFrame's columns. Possible values are:\n\n - A single color string referred to by name, RGB or RGBA code,\n for instance 'red' or '#a98d19'.\n\n - A sequence of color strings referred to by name, RGB or RGBA\n code, which will be used for each column recursively. For\n instance ['green','yellow'] each column's %(kind)s will be filled in\n green or yellow, alternatively. If there is only a single column to\n be plotted, then only the first color from the color list will be\n used.\n\n - A dict of the form {column name : color}, so that each column will be\n colored accordingly. For example, if your columns are called `a` and\n `b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for\n column `a` in green and %(kind)ss for column `b` in red.\n\n **kwargs\n Additional keyword arguments are documented in\n :meth:`DataFrame.plot`.\n\n Returns\n -------\n matplotlib.axes.Axes or np.ndarray of them\n An ndarray is returned with one :class:`matplotlib.axes.Axes`\n per column when ``subplots=True``.\n" @Substitution(data='data : DataFrame\n The data to visualize.\n', backend='') @Appender(_boxplot_doc) def boxplot(data: DataFrame, column: str | list[str] | None=None, by: str | list[str] | None=None, ax: Axes | None=None, fontsize: float | str | None=None, rot: int=0, grid: bool=True, figsize: tuple[float, float] | None=None, layout: tuple[int, int] | None=None, return_type: str | None=None, **kwargs): plot_backend = _get_plot_backend('matplotlib') return plot_backend.boxplot(data, column=column, by=by, ax=ax, fontsize=fontsize, rot=rot, grid=grid, figsize=figsize, layout=layout, return_type=return_type, **kwargs) @Substitution(data='', backend=_backend_doc) @Appender(_boxplot_doc) def boxplot_frame(self: DataFrame, column=None, by=None, ax=None, fontsize: int | None=None, rot: int=0, grid: bool=True, figsize: tuple[float, float] | None=None, layout=None, return_type=None, backend=None, **kwargs): plot_backend = _get_plot_backend(backend) return plot_backend.boxplot_frame(self, column=column, by=by, ax=ax, fontsize=fontsize, rot=rot, grid=grid, figsize=figsize, layout=layout, return_type=return_type, **kwargs) def boxplot_frame_groupby(grouped: DataFrameGroupBy, subplots: bool=True, column=None, fontsize: int | None=None, rot: int=0, grid: bool=True, ax=None, figsize: tuple[float, float] | None=None, layout=None, sharex: bool=False, sharey: bool=True, backend=None, **kwargs): plot_backend = _get_plot_backend(backend) return plot_backend.boxplot_frame_groupby(grouped, subplots=subplots, column=column, fontsize=fontsize, rot=rot, grid=grid, ax=ax, figsize=figsize, layout=layout, sharex=sharex, sharey=sharey, **kwargs) class PlotAccessor(PandasObject): _common_kinds = ('line', 'bar', 'barh', 'kde', 'density', 'area', 'hist', 'box') _series_kinds = ('pie',) _dataframe_kinds = ('scatter', 'hexbin') _kind_aliases = {'density': 'kde'} _all_kinds = _common_kinds + _series_kinds + _dataframe_kinds def __init__(self, data: Series | DataFrame) -> None: self._parent = data @staticmethod def _get_call_args(backend_name: str, data: Series | DataFrame, args, kwargs): if isinstance(data, ABCSeries): arg_def = [('kind', 'line'), ('ax', None), ('figsize', None), ('use_index', True), ('title', None), ('grid', None), ('legend', False), ('style', None), ('logx', False), ('logy', False), ('loglog', False), ('xticks', None), ('yticks', None), ('xlim', None), ('ylim', None), ('rot', None), ('fontsize', None), ('colormap', None), ('table', False), ('yerr', None), ('xerr', None), ('label', None), ('secondary_y', False), ('xlabel', None), ('ylabel', None)] elif isinstance(data, ABCDataFrame): arg_def = [('x', None), ('y', None), ('kind', 'line'), ('ax', None), ('subplots', False), ('sharex', None), ('sharey', False), ('layout', None), ('figsize', None), ('use_index', True), ('title', None), ('grid', None), ('legend', True), ('style', None), ('logx', False), ('logy', False), ('loglog', False), ('xticks', None), ('yticks', None), ('xlim', None), ('ylim', None), ('rot', None), ('fontsize', None), ('colormap', None), ('table', False), ('yerr', None), ('xerr', None), ('secondary_y', False), ('xlabel', None), ('ylabel', None)] else: raise TypeError(f'Called plot accessor for type {type(data).__name__}, expected Series or DataFrame') if args and isinstance(data, ABCSeries): positional_args = str(args)[1:-1] keyword_args = ', '.join([f'{name}={value!r}' for ((name, _), value) in zip(arg_def, args)]) msg = f'`Series.plot()` should not be called with positional arguments, only keyword arguments. The order of positional arguments will change in the future. Use `Series.plot({keyword_args})` instead of `Series.plot({positional_args})`.' raise TypeError(msg) pos_args = {name: value for ((name, _), value) in zip(arg_def, args)} if backend_name == 'pandas.plotting._matplotlib': kwargs = dict(arg_def, **pos_args, **kwargs) else: kwargs = dict(pos_args, **kwargs) x = kwargs.pop('x', None) y = kwargs.pop('y', None) kind = kwargs.pop('kind', 'line') return (x, y, kind, kwargs) def __call__(self, *args, **kwargs): plot_backend = _get_plot_backend(kwargs.pop('backend', None)) (x, y, kind, kwargs) = self._get_call_args(plot_backend.__name__, self._parent, args, kwargs) kind = self._kind_aliases.get(kind, kind) if plot_backend.__name__ != 'pandas.plotting._matplotlib': return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs) if kind not in self._all_kinds: raise ValueError(f'{kind} is not a valid plot kind Valid plot kinds: {self._all_kinds}') data = self._parent if isinstance(data, ABCSeries): kwargs['reuse_plot'] = True if kind in self._dataframe_kinds: if isinstance(data, ABCDataFrame): return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs) else: raise ValueError(f'plot kind {kind} can only be used for data frames') elif kind in self._series_kinds: if isinstance(data, ABCDataFrame): if y is None and kwargs.get('subplots') is False: raise ValueError(f"{kind} requires either y column or 'subplots=True'") if y is not None: if is_integer(y) and (not holds_integer(data.columns)): y = data.columns[y] data = data[y].copy(deep=False) data.index.name = y elif isinstance(data, ABCDataFrame): data_cols = data.columns if x is not None: if is_integer(x) and (not holds_integer(data.columns)): x = data_cols[x] elif not isinstance(data[x], ABCSeries): raise ValueError('x must be a label or position') data = data.set_index(x) if y is not None: int_ylist = is_list_like(y) and all((is_integer(c) for c in y)) int_y_arg = is_integer(y) or int_ylist if int_y_arg and (not holds_integer(data.columns)): y = data_cols[y] label_kw = kwargs['label'] if 'label' in kwargs else False for kw in ['xerr', 'yerr']: if kw in kwargs and (isinstance(kwargs[kw], str) or is_integer(kwargs[kw])): try: kwargs[kw] = data[kwargs[kw]] except (IndexError, KeyError, TypeError): pass data = data[y] if isinstance(data, ABCSeries): label_name = label_kw or y data.name = label_name else: match = is_list_like(label_kw) and len(label_kw) == len(y) if label_kw and (not match): raise ValueError('label should be list-like and same length as y') label_name = label_kw or data.columns data.columns = label_name return plot_backend.plot(data, kind=kind, **kwargs) __call__.__doc__ = __doc__ @Appender('\n See Also\n --------\n matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.\n\n Examples\n --------\n\n .. plot::\n :context: close-figs\n\n >>> s = pd.Series([1, 3, 2])\n >>> s.plot.line() # doctest: +SKIP\n\n .. plot::\n :context: close-figs\n\n The following example shows the populations for some animals\n over the years.\n\n >>> df = pd.DataFrame({\n ... \'pig\': [20, 18, 489, 675, 1776],\n ... \'horse\': [4, 25, 281, 600, 1900]\n ... }, index=[1990, 1997, 2003, 2009, 2014])\n >>> lines = df.plot.line()\n\n .. plot::\n :context: close-figs\n\n An example with subplots, so an array of axes is returned.\n\n >>> axes = df.plot.line(subplots=True)\n >>> type(axes)\n \n\n .. plot::\n :context: close-figs\n\n Let\'s repeat the same example, but specifying colors for\n each column (in this case, for each animal).\n\n >>> axes = df.plot.line(\n ... subplots=True, color={"pig": "pink", "horse": "#742802"}\n ... )\n\n .. plot::\n :context: close-figs\n\n The following example shows the relationship between both\n populations.\n\n >>> lines = df.plot.line(x=\'pig\', y=\'horse\')\n ') @Substitution(kind='line') @Appender(_bar_or_line_doc) def line(self, x: Hashable | None=None, y: Hashable | None=None, color: str | Sequence[str] | dict | None=None, **kwargs) -> PlotAccessor: if color is not None: kwargs['color'] = color return self(kind='line', x=x, y=y, **kwargs) @Appender('\n See Also\n --------\n DataFrame.plot.barh : Horizontal bar plot.\n DataFrame.plot : Make plots of a DataFrame.\n matplotlib.pyplot.bar : Make a bar plot with matplotlib.\n\n Examples\n --------\n Basic plot.\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\'lab\': [\'A\', \'B\', \'C\'], \'val\': [10, 30, 20]})\n >>> ax = df.plot.bar(x=\'lab\', y=\'val\', rot=0)\n\n Plot a whole dataframe to a bar plot. Each column is assigned a\n distinct color, and each row is nested in a group along the\n horizontal axis.\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = [\'snail\', \'pig\', \'elephant\',\n ... \'rabbit\', \'giraffe\', \'coyote\', \'horse\']\n >>> df = pd.DataFrame({\'speed\': speed,\n ... \'lifespan\': lifespan}, index=index)\n >>> ax = df.plot.bar(rot=0)\n\n Plot stacked bar charts for the DataFrame\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(stacked=True)\n\n Instead of nesting, the figure can be split by column with\n ``subplots=True``. In this case, a :class:`numpy.ndarray` of\n :class:`matplotlib.axes.Axes` are returned.\n\n .. plot::\n :context: close-figs\n\n >>> axes = df.plot.bar(rot=0, subplots=True)\n >>> axes[1].legend(loc=2) # doctest: +SKIP\n\n If you don\'t like the default colours, you can specify how you\'d\n like each column to be colored.\n\n .. plot::\n :context: close-figs\n\n >>> axes = df.plot.bar(\n ... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"}\n ... )\n >>> axes[1].legend(loc=2) # doctest: +SKIP\n\n Plot a single column.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(y=\'speed\', rot=0)\n\n Plot only selected categories for the DataFrame.\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.bar(x=\'lifespan\', rot=0)\n ') @Substitution(kind='bar') @Appender(_bar_or_line_doc) def bar(self, x: Hashable | None=None, y: Hashable | None=None, color: str | Sequence[str] | dict | None=None, **kwargs) -> PlotAccessor: if color is not None: kwargs['color'] = color return self(kind='bar', x=x, y=y, **kwargs) @Appender('\n See Also\n --------\n DataFrame.plot.bar : Vertical bar plot.\n DataFrame.plot : Make plots of DataFrame using matplotlib.\n matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.\n\n Examples\n --------\n Basic example\n\n .. plot::\n :context: close-figs\n\n >>> df = pd.DataFrame({\'lab\': [\'A\', \'B\', \'C\'], \'val\': [10, 30, 20]})\n >>> ax = df.plot.barh(x=\'lab\', y=\'val\')\n\n Plot a whole DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = [\'snail\', \'pig\', \'elephant\',\n ... \'rabbit\', \'giraffe\', \'coyote\', \'horse\']\n >>> df = pd.DataFrame({\'speed\': speed,\n ... \'lifespan\': lifespan}, index=index)\n >>> ax = df.plot.barh()\n\n Plot stacked barh charts for the DataFrame\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.barh(stacked=True)\n\n We can specify colors for each column\n\n .. plot::\n :context: close-figs\n\n >>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})\n\n Plot a column of the DataFrame to a horizontal bar plot\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = [\'snail\', \'pig\', \'elephant\',\n ... \'rabbit\', \'giraffe\', \'coyote\', \'horse\']\n >>> df = pd.DataFrame({\'speed\': speed,\n ... \'lifespan\': lifespan}, index=index)\n >>> ax = df.plot.barh(y=\'speed\')\n\n Plot DataFrame versus the desired column\n\n .. plot::\n :context: close-figs\n\n >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]\n >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]\n >>> index = [\'snail\', \'pig\', \'elephant\',\n ... \'rabbit\', \'giraffe\', \'coyote\', \'horse\']\n >>> df = pd.DataFrame({\'speed\': speed,\n ... \'lifespan\': lifespan}, index=index)\n >>> ax = df.plot.barh(x=\'lifespan\')\n ') @Substitution(kind='bar') @Appender(_bar_or_line_doc) def barh(self, x: Hashable | None=None, y: Hashable | None=None, color: str | Sequence[str] | dict | None=None, **kwargs) -> PlotAccessor: if color is not None: kwargs['color'] = color return self(kind='barh', x=x, y=y, **kwargs) def box(self, by: IndexLabel | None=None, **kwargs) -> PlotAccessor: return self(kind='box', by=by, **kwargs) def hist(self, by: IndexLabel | None=None, bins: int=10, **kwargs) -> PlotAccessor: return self(kind='hist', by=by, bins=bins, **kwargs) def kde(self, bw_method: Literal['scott', 'silverman'] | float | Callable | None=None, ind: np.ndarray | int | None=None, weights: np.ndarray | None=None, **kwargs) -> PlotAccessor: return self(kind='kde', bw_method=bw_method, ind=ind, weights=weights, **kwargs) density = kde def area(self, x: Hashable | None=None, y: Hashable | None=None, stacked: bool=True, **kwargs) -> PlotAccessor: return self(kind='area', x=x, y=y, stacked=stacked, **kwargs) def pie(self, y: IndexLabel | None=None, **kwargs) -> PlotAccessor: if y is not None: kwargs['y'] = y if isinstance(self._parent, ABCDataFrame) and kwargs.get('y', None) is None and (not kwargs.get('subplots', False)): raise ValueError("pie requires either y column or 'subplots=True'") return self(kind='pie', **kwargs) def scatter(self, x: Hashable, y: Hashable, s: Hashable | Sequence[Hashable] | None=None, c: Hashable | Sequence[Hashable] | None=None, **kwargs) -> PlotAccessor: return self(kind='scatter', x=x, y=y, s=s, c=c, **kwargs) def hexbin(self, x: Hashable, y: Hashable, C: Hashable | None=None, reduce_C_function: Callable | None=None, gridsize: int | tuple[int, int] | None=None, **kwargs) -> PlotAccessor: if reduce_C_function is not None: kwargs['reduce_C_function'] = reduce_C_function if gridsize is not None: kwargs['gridsize'] = gridsize return self(kind='hexbin', x=x, y=y, C=C, **kwargs) _backends: dict[str, types.ModuleType] = {} def _load_backend(backend: str) -> types.ModuleType: from importlib.metadata import entry_points if backend == 'matplotlib': try: module = importlib.import_module('pandas.plotting._matplotlib') except ImportError: raise ImportError('matplotlib is required for plotting when the default backend "matplotlib" is selected.') from None return module found_backend = False eps = entry_points() key = 'pandas_plotting_backends' if hasattr(eps, 'select'): entry = eps.select(group=key) else: entry = eps.get(key, ()) for entry_point in entry: found_backend = entry_point.name == backend if found_backend: module = entry_point.load() break if not found_backend: try: module = importlib.import_module(backend) found_backend = True except ImportError: pass if found_backend: if hasattr(module, 'plot'): return module raise ValueError(f"Could not find plotting backend '{backend}'. Ensure that you've installed the package providing the '{backend}' entrypoint, or that the package has a top-level `.plot` method.") def _get_plot_backend(backend: str | None=None): backend_str: str = backend or get_option('plotting.backend') if backend_str in _backends: return _backends[backend_str] module = _load_backend(backend_str) _backends[backend_str] = module return module # File: pandas-main/pandas/plotting/_matplotlib/__init__.py from __future__ import annotations from typing import TYPE_CHECKING from pandas.plotting._matplotlib.boxplot import BoxPlot, boxplot, boxplot_frame, boxplot_frame_groupby from pandas.plotting._matplotlib.converter import deregister, register from pandas.plotting._matplotlib.core import AreaPlot, BarhPlot, BarPlot, HexBinPlot, LinePlot, PiePlot, ScatterPlot from pandas.plotting._matplotlib.hist import HistPlot, KdePlot, hist_frame, hist_series from pandas.plotting._matplotlib.misc import andrews_curves, autocorrelation_plot, bootstrap_plot, lag_plot, parallel_coordinates, radviz, scatter_matrix from pandas.plotting._matplotlib.tools import table if TYPE_CHECKING: from pandas.plotting._matplotlib.core import MPLPlot PLOT_CLASSES: dict[str, type[MPLPlot]] = {'line': LinePlot, 'bar': BarPlot, 'barh': BarhPlot, 'box': BoxPlot, 'hist': HistPlot, 'kde': KdePlot, 'area': AreaPlot, 'pie': PiePlot, 'scatter': ScatterPlot, 'hexbin': HexBinPlot} def plot(data, kind, **kwargs): import matplotlib.pyplot as plt if kwargs.pop('reuse_plot', False): ax = kwargs.get('ax') if ax is None and len(plt.get_fignums()) > 0: with plt.rc_context(): ax = plt.gca() kwargs['ax'] = getattr(ax, 'left_ax', ax) plot_obj = PLOT_CLASSES[kind](data, **kwargs) plot_obj.generate() plt.draw_if_interactive() return plot_obj.result __all__ = ['plot', 'hist_series', 'hist_frame', 'boxplot', 'boxplot_frame', 'boxplot_frame_groupby', 'table', 'andrews_curves', 'autocorrelation_plot', 'bootstrap_plot', 'lag_plot', 'parallel_coordinates', 'radviz', 'scatter_matrix', 'register', 'deregister'] # File: pandas-main/pandas/plotting/_matplotlib/boxplot.py from __future__ import annotations from typing import TYPE_CHECKING, Literal, NamedTuple import warnings import matplotlib as mpl import numpy as np from pandas._libs import lib from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_dict_like from pandas.core.dtypes.generic import ABCSeries from pandas.core.dtypes.missing import remove_na_arraylike import pandas as pd import pandas.core.common as com from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import LinePlot, MPLPlot from pandas.plotting._matplotlib.groupby import create_iter_data_given_by from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import create_subplots, flatten_axes, maybe_adjust_figure if TYPE_CHECKING: from collections.abc import Collection from matplotlib.axes import Axes from matplotlib.figure import Figure from matplotlib.lines import Line2D from pandas._typing import MatplotlibColor def _set_ticklabels(ax: Axes, labels: list[str], is_vertical: bool, **kwargs) -> None: ticks = ax.get_xticks() if is_vertical else ax.get_yticks() if len(ticks) != len(labels): (i, remainder) = divmod(len(ticks), len(labels)) assert remainder == 0, remainder labels *= i if is_vertical: ax.set_xticklabels(labels, **kwargs) else: ax.set_yticklabels(labels, **kwargs) class BoxPlot(LinePlot): @property def _kind(self) -> Literal['box']: return 'box' _layout_type = 'horizontal' _valid_return_types = (None, 'axes', 'dict', 'both') class BP(NamedTuple): ax: Axes lines: dict[str, list[Line2D]] def __init__(self, data, return_type: str='axes', **kwargs) -> None: if return_type not in self._valid_return_types: raise ValueError("return_type must be {None, 'axes', 'dict', 'both'}") self.return_type = return_type MPLPlot.__init__(self, data, **kwargs) if self.subplots: if self.orientation == 'vertical': self.sharex = False else: self.sharey = False @classmethod def _plot(cls, ax: Axes, y: np.ndarray, column_num=None, return_type: str='axes', **kwds): ys: np.ndarray | list[np.ndarray] if y.ndim == 2: ys = [remove_na_arraylike(v) for v in y] ys = [v if v.size > 0 else np.array([np.nan]) for v in ys] else: ys = remove_na_arraylike(y) bp = ax.boxplot(ys, **kwds) if return_type == 'dict': return (bp, bp) elif return_type == 'both': return (cls.BP(ax=ax, lines=bp), bp) else: return (ax, bp) def _validate_color_args(self, color, colormap): if color is lib.no_default: return None if colormap is not None: warnings.warn("'color' and 'colormap' cannot be used simultaneously. Using 'color'", stacklevel=find_stack_level()) if isinstance(color, dict): valid_keys = ['boxes', 'whiskers', 'medians', 'caps'] for key in color: if key not in valid_keys: raise ValueError(f"color dict contains invalid key '{key}'. The key must be either {valid_keys}") return color @cache_readonly def _color_attrs(self): return get_standard_colors(num_colors=3, colormap=self.colormap, color=None) @cache_readonly def _boxes_c(self): return self._color_attrs[0] @cache_readonly def _whiskers_c(self): return self._color_attrs[0] @cache_readonly def _medians_c(self): return self._color_attrs[2] @cache_readonly def _caps_c(self): return self._color_attrs[0] def _get_colors(self, num_colors=None, color_kwds: dict[str, MatplotlibColor] | MatplotlibColor | Collection[MatplotlibColor] | None='color') -> None: pass def maybe_color_bp(self, bp) -> None: if isinstance(self.color, dict): boxes = self.color.get('boxes', self._boxes_c) whiskers = self.color.get('whiskers', self._whiskers_c) medians = self.color.get('medians', self._medians_c) caps = self.color.get('caps', self._caps_c) else: boxes = self.color or self._boxes_c whiskers = self.color or self._whiskers_c medians = self.color or self._medians_c caps = self.color or self._caps_c color_tup = (boxes, whiskers, medians, caps) maybe_color_bp(bp, color_tup=color_tup, **self.kwds) def _make_plot(self, fig: Figure) -> None: if self.subplots: self._return_obj = pd.Series(dtype=object) data = create_iter_data_given_by(self.data, self._kind) if self.by is not None else self.data for (i, (label, y)) in enumerate(self._iter_data(data=data)): ax = self._get_ax(i) kwds = self.kwds.copy() if self.by is not None: y = y.T ax.set_title(pprint_thing(label)) levels = self.data.columns.levels ticklabels = [pprint_thing(col) for col in levels[0]] else: ticklabels = [pprint_thing(label)] (ret, bp) = self._plot(ax, y, column_num=i, return_type=self.return_type, **kwds) self.maybe_color_bp(bp) self._return_obj[label] = ret _set_ticklabels(ax=ax, labels=ticklabels, is_vertical=self.orientation == 'vertical') else: y = self.data.values.T ax = self._get_ax(0) kwds = self.kwds.copy() (ret, bp) = self._plot(ax, y, column_num=0, return_type=self.return_type, **kwds) self.maybe_color_bp(bp) self._return_obj = ret labels = [pprint_thing(left) for left in self.data.columns] if not self.use_index: labels = [pprint_thing(key) for key in range(len(labels))] _set_ticklabels(ax=ax, labels=labels, is_vertical=self.orientation == 'vertical') def _make_legend(self) -> None: pass def _post_plot_logic(self, ax: Axes, data) -> None: if self.xlabel: ax.set_xlabel(pprint_thing(self.xlabel)) if self.ylabel: ax.set_ylabel(pprint_thing(self.ylabel)) @property def orientation(self) -> Literal['horizontal', 'vertical']: if self.kwds.get('vert', True): return 'vertical' else: return 'horizontal' @property def result(self): if self.return_type is None: return super().result else: return self._return_obj def maybe_color_bp(bp, color_tup, **kwds) -> None: if not kwds.get('boxprops'): mpl.artist.setp(bp['boxes'], color=color_tup[0], alpha=1) if not kwds.get('whiskerprops'): mpl.artist.setp(bp['whiskers'], color=color_tup[1], alpha=1) if not kwds.get('medianprops'): mpl.artist.setp(bp['medians'], color=color_tup[2], alpha=1) if not kwds.get('capprops'): mpl.artist.setp(bp['caps'], color=color_tup[3], alpha=1) def _grouped_plot_by_column(plotf, data, columns=None, by=None, numeric_only: bool=True, grid: bool=False, figsize: tuple[float, float] | None=None, ax=None, layout=None, return_type=None, **kwargs): grouped = data.groupby(by, observed=False) if columns is None: if not isinstance(by, (list, tuple)): by = [by] columns = data._get_numeric_data().columns.difference(by) naxes = len(columns) (fig, axes) = create_subplots(naxes=naxes, sharex=kwargs.pop('sharex', True), sharey=kwargs.pop('sharey', True), figsize=figsize, ax=ax, layout=layout) (xlabel, ylabel) = (kwargs.pop('xlabel', None), kwargs.pop('ylabel', None)) if kwargs.get('vert', True): xlabel = xlabel or by else: ylabel = ylabel or by ax_values = [] for (ax, col) in zip(flatten_axes(axes), columns): gp_col = grouped[col] (keys, values) = zip(*gp_col) re_plotf = plotf(keys, values, ax, xlabel=xlabel, ylabel=ylabel, **kwargs) ax.set_title(col) ax_values.append(re_plotf) ax.grid(grid) result = pd.Series(ax_values, index=columns, copy=False) if return_type is None: result = axes byline = by[0] if len(by) == 1 else by fig.suptitle(f'Boxplot grouped by {byline}') maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) return result def boxplot(data, column=None, by=None, ax=None, fontsize: int | None=None, rot: int=0, grid: bool=True, figsize: tuple[float, float] | None=None, layout=None, return_type=None, **kwds): import matplotlib.pyplot as plt if return_type not in BoxPlot._valid_return_types: raise ValueError("return_type must be {'axes', 'dict', 'both'}") if isinstance(data, ABCSeries): data = data.to_frame('x') column = 'x' def _get_colors(): result_list = get_standard_colors(num_colors=3) result = np.take(result_list, [0, 0, 2]) result = np.append(result, 'k') colors = kwds.pop('color', None) if colors: if is_dict_like(colors): valid_keys = ['boxes', 'whiskers', 'medians', 'caps'] key_to_index = dict(zip(valid_keys, range(4))) for (key, value) in colors.items(): if key in valid_keys: result[key_to_index[key]] = value else: raise ValueError(f"color dict contains invalid key '{key}'. The key must be either {valid_keys}") else: result.fill(colors) return result def plot_group(keys, values, ax: Axes, **kwds): (xlabel, ylabel) = (kwds.pop('xlabel', None), kwds.pop('ylabel', None)) if xlabel: ax.set_xlabel(pprint_thing(xlabel)) if ylabel: ax.set_ylabel(pprint_thing(ylabel)) keys = [pprint_thing(x) for x in keys] values = [np.asarray(remove_na_arraylike(v), dtype=object) for v in values] bp = ax.boxplot(values, **kwds) if fontsize is not None: ax.tick_params(axis='both', labelsize=fontsize) _set_ticklabels(ax=ax, labels=keys, is_vertical=kwds.get('vert', True), rotation=rot) maybe_color_bp(bp, color_tup=colors, **kwds) if return_type == 'dict': return bp elif return_type == 'both': return BoxPlot.BP(ax=ax, lines=bp) else: return ax colors = _get_colors() if column is None: columns = None elif isinstance(column, (list, tuple)): columns = column else: columns = [column] if by is not None: result = _grouped_plot_by_column(plot_group, data, columns=columns, by=by, grid=grid, figsize=figsize, ax=ax, layout=layout, return_type=return_type, **kwds) else: if return_type is None: return_type = 'axes' if layout is not None: raise ValueError("The 'layout' keyword is not supported when 'by' is None") if ax is None: rc = {'figure.figsize': figsize} if figsize is not None else {} with mpl.rc_context(rc): ax = plt.gca() data = data._get_numeric_data() naxes = len(data.columns) if naxes == 0: raise ValueError('boxplot method requires numerical columns, nothing to plot.') if columns is None: columns = data.columns else: data = data[columns] result = plot_group(columns, data.values.T, ax, **kwds) ax.grid(grid) return result def boxplot_frame(self, column=None, by=None, ax=None, fontsize: int | None=None, rot: int=0, grid: bool=True, figsize: tuple[float, float] | None=None, layout=None, return_type=None, **kwds): import matplotlib.pyplot as plt ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize, grid=grid, rot=rot, figsize=figsize, layout=layout, return_type=return_type, **kwds) plt.draw_if_interactive() return ax def boxplot_frame_groupby(grouped, subplots: bool=True, column=None, fontsize: int | None=None, rot: int=0, grid: bool=True, ax=None, figsize: tuple[float, float] | None=None, layout=None, sharex: bool=False, sharey: bool=True, **kwds): if subplots is True: naxes = len(grouped) (fig, axes) = create_subplots(naxes=naxes, squeeze=False, ax=ax, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout) data = {} for ((key, group), ax) in zip(grouped, flatten_axes(axes)): d = group.boxplot(ax=ax, column=column, fontsize=fontsize, rot=rot, grid=grid, **kwds) ax.set_title(pprint_thing(key)) data[key] = d ret = pd.Series(data) maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2) else: (keys, frames) = zip(*grouped) df = pd.concat(frames, keys=keys, axis=1) if column is not None: column = com.convert_to_list_like(column) multi_key = pd.MultiIndex.from_product([keys, column]) column = list(multi_key.values) ret = df.boxplot(column=column, fontsize=fontsize, rot=rot, grid=grid, ax=ax, figsize=figsize, layout=layout, **kwds) return ret # File: pandas-main/pandas/plotting/_matplotlib/converter.py from __future__ import annotations import contextlib import datetime as pydt from datetime import datetime, tzinfo import functools from typing import TYPE_CHECKING, Any, cast import warnings import matplotlib as mpl import matplotlib.dates as mdates import matplotlib.units as munits import numpy as np from pandas._libs import lib from pandas._libs.tslibs import Timestamp, to_offset from pandas._libs.tslibs.dtypes import FreqGroup, periods_per_day from pandas._typing import F, npt from pandas.core.dtypes.common import is_float, is_float_dtype, is_integer, is_integer_dtype, is_nested_list_like from pandas import Index, Series, get_option import pandas.core.common as com from pandas.core.indexes.datetimes import date_range from pandas.core.indexes.period import Period, PeriodIndex, period_range import pandas.core.tools.datetimes as tools if TYPE_CHECKING: from collections.abc import Generator from matplotlib.axis import Axis from pandas._libs.tslibs.offsets import BaseOffset _mpl_units = {} def get_pairs(): pairs = [(Timestamp, DatetimeConverter), (Period, PeriodConverter), (pydt.datetime, DatetimeConverter), (pydt.date, DatetimeConverter), (pydt.time, TimeConverter), (np.datetime64, DatetimeConverter)] return pairs def register_pandas_matplotlib_converters(func: F) -> F: @functools.wraps(func) def wrapper(*args, **kwargs): with pandas_converters(): return func(*args, **kwargs) return cast(F, wrapper) @contextlib.contextmanager def pandas_converters() -> Generator[None, None, None]: value = get_option('plotting.matplotlib.register_converters') if value: register() try: yield finally: if value == 'auto': deregister() def register() -> None: pairs = get_pairs() for (type_, cls) in pairs: if type_ in munits.registry and (not isinstance(munits.registry[type_], cls)): previous = munits.registry[type_] _mpl_units[type_] = previous munits.registry[type_] = cls() def deregister() -> None: for (type_, cls) in get_pairs(): if type(munits.registry.get(type_)) is cls: munits.registry.pop(type_) for (unit, formatter) in _mpl_units.items(): if type(formatter) not in {DatetimeConverter, PeriodConverter, TimeConverter}: munits.registry[unit] = formatter def _to_ordinalf(tm: pydt.time) -> float: tot_sec = tm.hour * 3600 + tm.minute * 60 + tm.second + tm.microsecond / 10 ** 6 return tot_sec def time2num(d): if isinstance(d, str): parsed = Timestamp(d) return _to_ordinalf(parsed.time()) if isinstance(d, pydt.time): return _to_ordinalf(d) return d class TimeConverter(munits.ConversionInterface): @staticmethod def convert(value, unit, axis): valid_types = (str, pydt.time) if isinstance(value, valid_types) or is_integer(value) or is_float(value): return time2num(value) if isinstance(value, Index): return value.map(time2num) if isinstance(value, (list, tuple, np.ndarray, Index)): return [time2num(x) for x in value] return value @staticmethod def axisinfo(unit, axis) -> munits.AxisInfo | None: if unit != 'time': return None majloc = mpl.ticker.AutoLocator() majfmt = TimeFormatter(majloc) return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label='time') @staticmethod def default_units(x, axis) -> str: return 'time' class TimeFormatter(mpl.ticker.Formatter): def __init__(self, locs) -> None: self.locs = locs def __call__(self, x, pos: int | None=0) -> str: fmt = '%H:%M:%S.%f' s = int(x) msus = round((x - s) * 10 ** 6) ms = msus // 1000 us = msus % 1000 (m, s) = divmod(s, 60) (h, m) = divmod(m, 60) (_, h) = divmod(h, 24) if us != 0: return pydt.time(h, m, s, msus).strftime(fmt) elif ms != 0: return pydt.time(h, m, s, msus).strftime(fmt)[:-3] elif s != 0: return pydt.time(h, m, s).strftime('%H:%M:%S') return pydt.time(h, m).strftime('%H:%M') class PeriodConverter(mdates.DateConverter): @staticmethod def convert(values, units, axis): if is_nested_list_like(values): values = [PeriodConverter._convert_1d(v, units, axis) for v in values] else: values = PeriodConverter._convert_1d(values, units, axis) return values @staticmethod def _convert_1d(values, units, axis): if not hasattr(axis, 'freq'): raise TypeError('Axis must have `freq` set to convert to Periods') valid_types = (str, datetime, Period, pydt.date, pydt.time, np.datetime64) with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'Period with BDay freq is deprecated', category=FutureWarning) warnings.filterwarnings('ignore', 'PeriodDtype\\[B\\] is deprecated', category=FutureWarning) if isinstance(values, valid_types) or is_integer(values) or is_float(values): return get_datevalue(values, axis.freq) elif isinstance(values, PeriodIndex): return values.asfreq(axis.freq).asi8 elif isinstance(values, Index): return values.map(lambda x: get_datevalue(x, axis.freq)) elif lib.infer_dtype(values, skipna=False) == 'period': return PeriodIndex(values, freq=axis.freq).asi8 elif isinstance(values, (list, tuple, np.ndarray, Index)): return [get_datevalue(x, axis.freq) for x in values] return values def get_datevalue(date, freq): if isinstance(date, Period): return date.asfreq(freq).ordinal elif isinstance(date, (str, datetime, pydt.date, pydt.time, np.datetime64)): return Period(date, freq).ordinal elif is_integer(date) or is_float(date) or (isinstance(date, (np.ndarray, Index)) and date.size == 1): return date elif date is None: return None raise ValueError(f"Unrecognizable date '{date}'") class DatetimeConverter(mdates.DateConverter): @staticmethod def convert(values, unit, axis): if is_nested_list_like(values): values = [DatetimeConverter._convert_1d(v, unit, axis) for v in values] else: values = DatetimeConverter._convert_1d(values, unit, axis) return values @staticmethod def _convert_1d(values, unit, axis): def try_parse(values): try: return mdates.date2num(tools.to_datetime(values)) except Exception: return values if isinstance(values, (datetime, pydt.date, np.datetime64, pydt.time)): return mdates.date2num(values) elif is_integer(values) or is_float(values): return values elif isinstance(values, str): return try_parse(values) elif isinstance(values, (list, tuple, np.ndarray, Index, Series)): if isinstance(values, Series): values = Index(values) if isinstance(values, Index): values = values.values if not isinstance(values, np.ndarray): values = com.asarray_tuplesafe(values) if is_integer_dtype(values) or is_float_dtype(values): return values try: values = tools.to_datetime(values) except Exception: pass values = mdates.date2num(values) return values @staticmethod def axisinfo(unit: tzinfo | None, axis) -> munits.AxisInfo: tz = unit majloc = PandasAutoDateLocator(tz=tz) majfmt = PandasAutoDateFormatter(majloc, tz=tz) datemin = pydt.date(2000, 1, 1) datemax = pydt.date(2010, 1, 1) return munits.AxisInfo(majloc=majloc, majfmt=majfmt, label='', default_limits=(datemin, datemax)) class PandasAutoDateFormatter(mdates.AutoDateFormatter): def __init__(self, locator, tz=None, defaultfmt: str='%Y-%m-%d') -> None: mdates.AutoDateFormatter.__init__(self, locator, tz, defaultfmt) class PandasAutoDateLocator(mdates.AutoDateLocator): def get_locator(self, dmin, dmax): tot_sec = (dmax - dmin).total_seconds() if abs(tot_sec) < self.minticks: self._freq = -1 locator = MilliSecondLocator(self.tz) locator.set_axis(self.axis) locator.axis.set_view_interval(*self.axis.get_view_interval()) locator.axis.set_data_interval(*self.axis.get_data_interval()) return locator return mdates.AutoDateLocator.get_locator(self, dmin, dmax) def _get_unit(self): return MilliSecondLocator.get_unit_generic(self._freq) class MilliSecondLocator(mdates.DateLocator): UNIT = 1.0 / (24 * 3600 * 1000) def __init__(self, tz) -> None: mdates.DateLocator.__init__(self, tz) self._interval = 1.0 def _get_unit(self): return self.get_unit_generic(-1) @staticmethod def get_unit_generic(freq): unit = mdates.RRuleLocator.get_unit_generic(freq) if unit < 0: return MilliSecondLocator.UNIT return unit def __call__(self): try: (dmin, dmax) = self.viewlim_to_dt() except ValueError: return [] (nmax, nmin) = mdates.date2num((dmax, dmin)) num = (nmax - nmin) * 86400 * 1000 max_millis_ticks = 6 for interval in [1, 10, 50, 100, 200, 500]: if num <= interval * (max_millis_ticks - 1): self._interval = interval break self._interval = 1000.0 estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) if estimate > self.MAXTICKS * 2: raise RuntimeError(f'MillisecondLocator estimated to generate {estimate:d} ticks from {dmin} to {dmax}: exceeds Locator.MAXTICKS* 2 ({self.MAXTICKS * 2:d}) ') interval = self._get_interval() freq = f'{interval}ms' tz = self.tz.tzname(None) st = dmin.replace(tzinfo=None) ed = dmax.replace(tzinfo=None) all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).astype(object) try: if len(all_dates) > 0: locs = self.raise_if_exceeds(mdates.date2num(all_dates)) return locs except Exception: pass lims = mdates.date2num([dmin, dmax]) return lims def _get_interval(self): return self._interval def autoscale(self): (dmin, dmax) = self.datalim_to_dt() vmin = mdates.date2num(dmin) vmax = mdates.date2num(dmax) return self.nonsingular(vmin, vmax) def _get_default_annual_spacing(nyears) -> tuple[int, int]: if nyears < 11: (min_spacing, maj_spacing) = (1, 1) elif nyears < 20: (min_spacing, maj_spacing) = (1, 2) elif nyears < 50: (min_spacing, maj_spacing) = (1, 5) elif nyears < 100: (min_spacing, maj_spacing) = (5, 10) elif nyears < 200: (min_spacing, maj_spacing) = (5, 25) elif nyears < 600: (min_spacing, maj_spacing) = (10, 50) else: factor = nyears // 1000 + 1 (min_spacing, maj_spacing) = (factor * 20, factor * 100) return (min_spacing, maj_spacing) def _period_break(dates: PeriodIndex, period: str) -> npt.NDArray[np.intp]: mask = _period_break_mask(dates, period) return np.nonzero(mask)[0] def _period_break_mask(dates: PeriodIndex, period: str) -> npt.NDArray[np.bool_]: current = getattr(dates, period) previous = getattr(dates - 1 * dates.freq, period) return current != previous def has_level_label(label_flags: npt.NDArray[np.intp], vmin: float) -> bool: if label_flags.size == 0 or (label_flags.size == 1 and label_flags[0] == 0 and (vmin % 1 > 0.0)): return False else: return True def _get_periods_per_ymd(freq: BaseOffset) -> tuple[int, int, int]: dtype_code = freq._period_dtype_code freq_group = FreqGroup.from_period_dtype_code(dtype_code) ppd = -1 if dtype_code >= FreqGroup.FR_HR.value: ppd = periods_per_day(freq._creso) ppm = 28 * ppd ppy = 365 * ppd elif freq_group == FreqGroup.FR_BUS: ppm = 19 ppy = 261 elif freq_group == FreqGroup.FR_DAY: ppm = 28 ppy = 365 elif freq_group == FreqGroup.FR_WK: ppm = 3 ppy = 52 elif freq_group == FreqGroup.FR_MTH: ppm = 1 ppy = 12 elif freq_group == FreqGroup.FR_QTR: ppm = -1 ppy = 4 elif freq_group == FreqGroup.FR_ANN: ppm = -1 ppy = 1 else: raise NotImplementedError(f'Unsupported frequency: {dtype_code}') return (ppd, ppm, ppy) @functools.cache def _daily_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: dtype_code = freq._period_dtype_code (periodsperday, periodspermonth, periodsperyear) = _get_periods_per_ymd(freq) vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) span = vmax - vmin + 1 with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'Period with BDay freq is deprecated', category=FutureWarning) warnings.filterwarnings('ignore', 'PeriodDtype\\[B\\] is deprecated', category=FutureWarning) dates_ = period_range(start=Period(ordinal=vmin, freq=freq), end=Period(ordinal=vmax, freq=freq), freq=freq) info = np.zeros(span, dtype=[('val', np.int64), ('maj', bool), ('min', bool), ('fmt', '|S20')]) info['val'][:] = dates_.asi8 info['fmt'][:] = '' info['maj'][[0, -1]] = True info_maj = info['maj'] info_min = info['min'] info_fmt = info['fmt'] def first_label(label_flags): if label_flags[0] == 0 and label_flags.size > 1 and (vmin_orig % 1 > 0.0): return label_flags[1] else: return label_flags[0] if span <= periodspermonth: day_start = _period_break(dates_, 'day') month_start = _period_break(dates_, 'month') year_start = _period_break(dates_, 'year') def _hour_finder(label_interval: int, force_year_start: bool) -> None: target = dates_.hour mask = _period_break_mask(dates_, 'hour') info_maj[day_start] = True info_min[mask & (target % label_interval == 0)] = True info_fmt[mask & (target % label_interval == 0)] = '%H:%M' info_fmt[day_start] = '%H:%M\n%d-%b' info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' if force_year_start and (not has_level_label(year_start, vmin_orig)): info_fmt[first_label(day_start)] = '%H:%M\n%d-%b\n%Y' def _minute_finder(label_interval: int) -> None: target = dates_.minute hour_start = _period_break(dates_, 'hour') mask = _period_break_mask(dates_, 'minute') info_maj[hour_start] = True info_min[mask & (target % label_interval == 0)] = True info_fmt[mask & (target % label_interval == 0)] = '%H:%M' info_fmt[day_start] = '%H:%M\n%d-%b' info_fmt[year_start] = '%H:%M\n%d-%b\n%Y' def _second_finder(label_interval: int) -> None: target = dates_.second minute_start = _period_break(dates_, 'minute') mask = _period_break_mask(dates_, 'second') info_maj[minute_start] = True info_min[mask & (target % label_interval == 0)] = True info_fmt[mask & (target % label_interval == 0)] = '%H:%M:%S' info_fmt[day_start] = '%H:%M:%S\n%d-%b' info_fmt[year_start] = '%H:%M:%S\n%d-%b\n%Y' if span < periodsperday / 12000: _second_finder(1) elif span < periodsperday / 6000: _second_finder(2) elif span < periodsperday / 2400: _second_finder(5) elif span < periodsperday / 1200: _second_finder(10) elif span < periodsperday / 800: _second_finder(15) elif span < periodsperday / 400: _second_finder(30) elif span < periodsperday / 150: _minute_finder(1) elif span < periodsperday / 70: _minute_finder(2) elif span < periodsperday / 24: _minute_finder(5) elif span < periodsperday / 12: _minute_finder(15) elif span < periodsperday / 6: _minute_finder(30) elif span < periodsperday / 2.5: _hour_finder(1, False) elif span < periodsperday / 1.5: _hour_finder(2, False) elif span < periodsperday * 1.25: _hour_finder(3, False) elif span < periodsperday * 2.5: _hour_finder(6, True) elif span < periodsperday * 4: _hour_finder(12, True) else: info_maj[month_start] = True info_min[day_start] = True info_fmt[day_start] = '%d' info_fmt[month_start] = '%d\n%b' info_fmt[year_start] = '%d\n%b\n%Y' if not has_level_label(year_start, vmin_orig): if not has_level_label(month_start, vmin_orig): info_fmt[first_label(day_start)] = '%d\n%b\n%Y' else: info_fmt[first_label(month_start)] = '%d\n%b\n%Y' elif span <= periodsperyear // 4: month_start = _period_break(dates_, 'month') info_maj[month_start] = True if dtype_code < FreqGroup.FR_HR.value: info['min'] = True else: day_start = _period_break(dates_, 'day') info['min'][day_start] = True week_start = _period_break(dates_, 'week') year_start = _period_break(dates_, 'year') info_fmt[week_start] = '%d' info_fmt[month_start] = '\n\n%b' info_fmt[year_start] = '\n\n%b\n%Y' if not has_level_label(year_start, vmin_orig): if not has_level_label(month_start, vmin_orig): info_fmt[first_label(week_start)] = '\n\n%b\n%Y' else: info_fmt[first_label(month_start)] = '\n\n%b\n%Y' elif span <= 1.15 * periodsperyear: year_start = _period_break(dates_, 'year') month_start = _period_break(dates_, 'month') week_start = _period_break(dates_, 'week') info_maj[month_start] = True info_min[week_start] = True info_min[year_start] = False info_min[month_start] = False info_fmt[month_start] = '%b' info_fmt[year_start] = '%b\n%Y' if not has_level_label(year_start, vmin_orig): info_fmt[first_label(month_start)] = '%b\n%Y' elif span <= 2.5 * periodsperyear: year_start = _period_break(dates_, 'year') quarter_start = _period_break(dates_, 'quarter') month_start = _period_break(dates_, 'month') info_maj[quarter_start] = True info_min[month_start] = True info_fmt[quarter_start] = '%b' info_fmt[year_start] = '%b\n%Y' elif span <= 4 * periodsperyear: year_start = _period_break(dates_, 'year') month_start = _period_break(dates_, 'month') info_maj[year_start] = True info_min[month_start] = True info_min[year_start] = False month_break = dates_[month_start].month jan_or_jul = month_start[(month_break == 1) | (month_break == 7)] info_fmt[jan_or_jul] = '%b' info_fmt[year_start] = '%b\n%Y' elif span <= 11 * periodsperyear: year_start = _period_break(dates_, 'year') quarter_start = _period_break(dates_, 'quarter') info_maj[year_start] = True info_min[quarter_start] = True info_min[year_start] = False info_fmt[year_start] = '%Y' else: year_start = _period_break(dates_, 'year') year_break = dates_[year_start].year nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) major_idx = year_start[year_break % maj_anndef == 0] info_maj[major_idx] = True minor_idx = year_start[year_break % min_anndef == 0] info_min[minor_idx] = True info_fmt[major_idx] = '%Y' return info @functools.cache def _monthly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: (_, _, periodsperyear) = _get_periods_per_ymd(freq) vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) span = vmax - vmin + 1 info = np.zeros(span, dtype=[('val', int), ('maj', bool), ('min', bool), ('fmt', '|S8')]) info['val'] = np.arange(vmin, vmax + 1) dates_ = info['val'] info['fmt'] = '' year_start = (dates_ % 12 == 0).nonzero()[0] info_maj = info['maj'] info_fmt = info['fmt'] if span <= 1.15 * periodsperyear: info_maj[year_start] = True info['min'] = True info_fmt[:] = '%b' info_fmt[year_start] = '%b\n%Y' if not has_level_label(year_start, vmin_orig): if dates_.size > 1: idx = 1 else: idx = 0 info_fmt[idx] = '%b\n%Y' elif span <= 2.5 * periodsperyear: quarter_start = (dates_ % 3 == 0).nonzero() info_maj[year_start] = True info['fmt'][quarter_start] = True info['min'] = True info_fmt[quarter_start] = '%b' info_fmt[year_start] = '%b\n%Y' elif span <= 4 * periodsperyear: info_maj[year_start] = True info['min'] = True jan_or_jul = (dates_ % 12 == 0) | (dates_ % 12 == 6) info_fmt[jan_or_jul] = '%b' info_fmt[year_start] = '%b\n%Y' elif span <= 11 * periodsperyear: quarter_start = (dates_ % 3 == 0).nonzero() info_maj[year_start] = True info['min'][quarter_start] = True info_fmt[year_start] = '%Y' else: nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) years = dates_[year_start] // 12 + 1 major_idx = year_start[years % maj_anndef == 0] info_maj[major_idx] = True info['min'][year_start[years % min_anndef == 0]] = True info_fmt[major_idx] = '%Y' return info @functools.cache def _quarterly_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: (_, _, periodsperyear) = _get_periods_per_ymd(freq) vmin_orig = vmin (vmin, vmax) = (int(vmin), int(vmax)) span = vmax - vmin + 1 info = np.zeros(span, dtype=[('val', int), ('maj', bool), ('min', bool), ('fmt', '|S8')]) info['val'] = np.arange(vmin, vmax + 1) info['fmt'] = '' dates_ = info['val'] info_maj = info['maj'] info_fmt = info['fmt'] year_start = (dates_ % 4 == 0).nonzero()[0] if span <= 3.5 * periodsperyear: info_maj[year_start] = True info['min'] = True info_fmt[:] = 'Q%q' info_fmt[year_start] = 'Q%q\n%F' if not has_level_label(year_start, vmin_orig): if dates_.size > 1: idx = 1 else: idx = 0 info_fmt[idx] = 'Q%q\n%F' elif span <= 11 * periodsperyear: info_maj[year_start] = True info['min'] = True info_fmt[year_start] = '%F' else: years = dates_[year_start] // 4 + 1970 nyears = span / periodsperyear (min_anndef, maj_anndef) = _get_default_annual_spacing(nyears) major_idx = year_start[years % maj_anndef == 0] info_maj[major_idx] = True info['min'][year_start[years % min_anndef == 0]] = True info_fmt[major_idx] = '%F' return info @functools.cache def _annual_finder(vmin: float, vmax: float, freq: BaseOffset) -> np.ndarray: (vmin, vmax) = (int(vmin), int(vmax + 1)) span = vmax - vmin + 1 info = np.zeros(span, dtype=[('val', int), ('maj', bool), ('min', bool), ('fmt', '|S8')]) info['val'] = np.arange(vmin, vmax + 1) info['fmt'] = '' dates_ = info['val'] (min_anndef, maj_anndef) = _get_default_annual_spacing(span) major_idx = dates_ % maj_anndef == 0 minor_idx = dates_ % min_anndef == 0 info['maj'][major_idx] = True info['min'][minor_idx] = True info['fmt'][major_idx] = '%Y' return info def get_finder(freq: BaseOffset): dtype_code = freq._period_dtype_code fgroup = FreqGroup.from_period_dtype_code(dtype_code) if fgroup == FreqGroup.FR_ANN: return _annual_finder elif fgroup == FreqGroup.FR_QTR: return _quarterly_finder elif fgroup == FreqGroup.FR_MTH: return _monthly_finder elif dtype_code >= FreqGroup.FR_BUS.value or fgroup == FreqGroup.FR_WK: return _daily_finder else: raise NotImplementedError(f'Unsupported frequency: {dtype_code}') class TimeSeries_DateLocator(mpl.ticker.Locator): axis: Axis def __init__(self, freq: BaseOffset, minor_locator: bool=False, dynamic_mode: bool=True, base: int=1, quarter: int=1, month: int=1, day: int=1, plot_obj=None) -> None: freq = to_offset(freq, is_period=True) self.freq = freq self.base = base (self.quarter, self.month, self.day) = (quarter, month, day) self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 self.plot_obj = plot_obj self.finder = get_finder(freq) def _get_default_locs(self, vmin, vmax): locator = self.finder(vmin, vmax, self.freq) if self.isminor: return np.compress(locator['min'], locator['val']) return np.compress(locator['maj'], locator['val']) def __call__(self): vi = tuple(self.axis.get_view_interval()) (vmin, vmax) = vi if vmax < vmin: (vmin, vmax) = (vmax, vmin) if self.isdynamic: locs = self._get_default_locs(vmin, vmax) else: base = self.base (d, m) = divmod(vmin, base) vmin = (d + 1) * base locs = list(range(vmin, vmax + 1, base)) return locs def autoscale(self): (vmin, vmax) = self.axis.get_data_interval() locs = self._get_default_locs(vmin, vmax) (vmin, vmax) = locs[[0, -1]] if vmin == vmax: vmin -= 1 vmax += 1 return mpl.transforms.nonsingular(vmin, vmax) class TimeSeries_DateFormatter(mpl.ticker.Formatter): axis: Axis def __init__(self, freq: BaseOffset, minor_locator: bool=False, dynamic_mode: bool=True, plot_obj=None) -> None: freq = to_offset(freq, is_period=True) self.format = None self.freq = freq self.locs: list[Any] = [] self.formatdict: dict[Any, Any] | None = None self.isminor = minor_locator self.isdynamic = dynamic_mode self.offset = 0 self.plot_obj = plot_obj self.finder = get_finder(freq) def _set_default_format(self, vmin, vmax): info = self.finder(vmin, vmax, self.freq) if self.isminor: format = np.compress(info['min'] & np.logical_not(info['maj']), info) else: format = np.compress(info['maj'], info) self.formatdict = {x: f for (x, _, _, f) in format} return self.formatdict def set_locs(self, locs) -> None: self.locs = locs (vmin, vmax) = tuple(self.axis.get_view_interval()) if vmax < vmin: (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax) def __call__(self, x, pos: int | None=0) -> str: if self.formatdict is None: return '' else: fmt = self.formatdict.pop(x, '') if isinstance(fmt, np.bytes_): fmt = fmt.decode('utf-8') with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'Period with BDay freq is deprecated', category=FutureWarning) period = Period(ordinal=int(x), freq=self.freq) assert isinstance(period, Period) return period.strftime(fmt) class TimeSeries_TimedeltaFormatter(mpl.ticker.Formatter): axis: Axis @staticmethod def format_timedelta_ticks(x, pos, n_decimals: int) -> str: (s, ns) = divmod(x, 10 ** 9) (m, s) = divmod(s, 60) (h, m) = divmod(m, 60) (d, h) = divmod(h, 24) decimals = int(ns * 10 ** (n_decimals - 9)) s = f'{int(h):02d}:{int(m):02d}:{int(s):02d}' if n_decimals > 0: s += f'.{decimals:0{n_decimals}d}' if d != 0: s = f'{int(d):d} days {s}' return s def __call__(self, x, pos: int | None=0) -> str: (vmin, vmax) = tuple(self.axis.get_view_interval()) n_decimals = min(int(np.ceil(np.log10(100 * 10 ** 9 / abs(vmax - vmin)))), 9) return self.format_timedelta_ticks(x, pos, n_decimals) # File: pandas-main/pandas/plotting/_matplotlib/core.py from __future__ import annotations from abc import ABC, abstractmethod from collections.abc import Hashable, Iterable, Iterator, Sequence from typing import TYPE_CHECKING, Any, Literal, cast, final import warnings import matplotlib as mpl import numpy as np from pandas._libs import lib from pandas.errors import AbstractMethodError from pandas.util._decorators import cache_readonly from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_any_real_numeric_dtype, is_bool, is_float, is_float_dtype, is_hashable, is_integer, is_integer_dtype, is_iterator, is_list_like, is_number, is_numeric_dtype from pandas.core.dtypes.dtypes import CategoricalDtype, ExtensionDtype from pandas.core.dtypes.generic import ABCDataFrame, ABCDatetimeIndex, ABCIndex, ABCMultiIndex, ABCPeriodIndex, ABCSeries from pandas.core.dtypes.missing import isna import pandas.core.common as com from pandas.util.version import Version from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib import tools from pandas.plotting._matplotlib.converter import register_pandas_matplotlib_converters from pandas.plotting._matplotlib.groupby import reconstruct_data_with_by from pandas.plotting._matplotlib.misc import unpack_single_str_list from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.timeseries import decorate_axes, format_dateaxis, maybe_convert_index, maybe_resample, use_dynamic_x from pandas.plotting._matplotlib.tools import create_subplots, flatten_axes, format_date_labels, get_all_lines, get_xlim, handle_shared_axes if TYPE_CHECKING: from matplotlib.artist import Artist from matplotlib.axes import Axes from matplotlib.axis import Axis from matplotlib.figure import Figure from pandas._typing import IndexLabel, NDFrameT, PlottingOrientation, npt from pandas import DataFrame, Index, Series def holds_integer(column: Index) -> bool: return column.inferred_type in {'integer', 'mixed-integer'} def _color_in_style(style: str) -> bool: return not set(mpl.colors.BASE_COLORS).isdisjoint(style) class MPLPlot(ABC): @property @abstractmethod def _kind(self) -> str: raise NotImplementedError _layout_type = 'vertical' _default_rot = 0 @property def orientation(self) -> str | None: return None data: DataFrame def __init__(self, data, kind=None, by: IndexLabel | None=None, subplots: bool | Sequence[Sequence[str]]=False, sharex: bool | None=None, sharey: bool=False, use_index: bool=True, figsize: tuple[float, float] | None=None, grid=None, legend: bool | str=True, rot=None, ax=None, fig=None, title=None, xlim=None, ylim=None, xticks=None, yticks=None, xlabel: Hashable | None=None, ylabel: Hashable | None=None, fontsize: int | None=None, secondary_y: bool | tuple | list | np.ndarray=False, colormap=None, table: bool=False, layout=None, include_bool: bool=False, column: IndexLabel | None=None, *, logx: bool | None | Literal['sym']=False, logy: bool | None | Literal['sym']=False, loglog: bool | None | Literal['sym']=False, mark_right: bool=True, stacked: bool=False, label: Hashable | None=None, style=None, **kwds) -> None: if by in ([], ()): raise ValueError('No group keys passed!') self.by = com.maybe_make_list(by) if isinstance(data, ABCDataFrame): if column: self.columns = com.maybe_make_list(column) elif self.by is None: self.columns = [col for col in data.columns if is_numeric_dtype(data[col])] else: self.columns = [col for col in data.columns if col not in self.by and is_numeric_dtype(data[col])] if self.by is not None and self._kind == 'hist': self._grouped = data.groupby(unpack_single_str_list(self.by)) self.kind = kind self.subplots = type(self)._validate_subplots_kwarg(subplots, data, kind=self._kind) self.sharex = type(self)._validate_sharex(sharex, ax, by) self.sharey = sharey self.figsize = figsize self.layout = layout self.xticks = xticks self.yticks = yticks self.xlim = xlim self.ylim = ylim self.title = title self.use_index = use_index self.xlabel = xlabel self.ylabel = ylabel self.fontsize = fontsize if rot is not None: self.rot = rot self._rot_set = True else: self._rot_set = False self.rot = self._default_rot if grid is None: grid = False if secondary_y else mpl.rcParams['axes.grid'] self.grid = grid self.legend = legend self.legend_handles: list[Artist] = [] self.legend_labels: list[Hashable] = [] self.logx = type(self)._validate_log_kwd('logx', logx) self.logy = type(self)._validate_log_kwd('logy', logy) self.loglog = type(self)._validate_log_kwd('loglog', loglog) self.label = label self.style = style self.mark_right = mark_right self.stacked = stacked self.ax = ax xerr = kwds.pop('xerr', None) yerr = kwds.pop('yerr', None) nseries = self._get_nseries(data) (xerr, data) = type(self)._parse_errorbars('xerr', xerr, data, nseries) (yerr, data) = type(self)._parse_errorbars('yerr', yerr, data, nseries) self.errors = {'xerr': xerr, 'yerr': yerr} self.data = data if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, ABCIndex)): secondary_y = [secondary_y] self.secondary_y = secondary_y if 'cmap' in kwds and colormap: raise TypeError('Only specify one of `cmap` and `colormap`.') if 'cmap' in kwds: self.colormap = kwds.pop('cmap') else: self.colormap = colormap self.table = table self.include_bool = include_bool self.kwds = kwds color = kwds.pop('color', lib.no_default) self.color = self._validate_color_args(color, self.colormap) assert 'color' not in self.kwds self.data = self._ensure_frame(self.data) @final @staticmethod def _validate_sharex(sharex: bool | None, ax, by) -> bool: if sharex is None: if ax is None and by is None: sharex = True else: sharex = False elif not is_bool(sharex): raise TypeError('sharex must be a bool or None') return bool(sharex) @classmethod def _validate_log_kwd(cls, kwd: str, value: bool | None | Literal['sym']) -> bool | None | Literal['sym']: if value is None or isinstance(value, bool) or (isinstance(value, str) and value == 'sym'): return value raise ValueError(f"keyword '{kwd}' should be bool, None, or 'sym', not '{value}'") @final @staticmethod def _validate_subplots_kwarg(subplots: bool | Sequence[Sequence[str]], data: Series | DataFrame, kind: str) -> bool | list[tuple[int, ...]]: if isinstance(subplots, bool): return subplots elif not isinstance(subplots, Iterable): raise ValueError('subplots should be a bool or an iterable') supported_kinds = ('line', 'bar', 'barh', 'hist', 'kde', 'density', 'area', 'pie') if kind not in supported_kinds: raise ValueError(f"When subplots is an iterable, kind must be one of {', '.join(supported_kinds)}. Got {kind}.") if isinstance(data, ABCSeries): raise NotImplementedError('An iterable subplots for a Series is not supported.') columns = data.columns if isinstance(columns, ABCMultiIndex): raise NotImplementedError('An iterable subplots for a DataFrame with a MultiIndex column is not supported.') if columns.nunique() != len(columns): raise NotImplementedError('An iterable subplots for a DataFrame with non-unique column labels is not supported.') out = [] seen_columns: set[Hashable] = set() for group in subplots: if not is_list_like(group): raise ValueError('When subplots is an iterable, each entry should be a list/tuple of column names.') idx_locs = columns.get_indexer_for(group) if (idx_locs == -1).any(): bad_labels = np.extract(idx_locs == -1, group) raise ValueError(f'Column label(s) {list(bad_labels)} not found in the DataFrame.') unique_columns = set(group) duplicates = seen_columns.intersection(unique_columns) if duplicates: raise ValueError(f'Each column should be in only one subplot. Columns {duplicates} were found in multiple subplots.') seen_columns = seen_columns.union(unique_columns) out.append(tuple(idx_locs)) unseen_columns = columns.difference(seen_columns) for column in unseen_columns: idx_loc = columns.get_loc(column) out.append((idx_loc,)) return out def _validate_color_args(self, color, colormap): if color is lib.no_default: if 'colors' in self.kwds and colormap is not None: warnings.warn("'color' and 'colormap' cannot be used simultaneously. Using 'color'", stacklevel=find_stack_level()) return None if self.nseries == 1 and color is not None and (not is_list_like(color)): color = [color] if isinstance(color, tuple) and self.nseries == 1 and (len(color) in (3, 4)): color = [color] if colormap is not None: warnings.warn("'color' and 'colormap' cannot be used simultaneously. Using 'color'", stacklevel=find_stack_level()) if self.style is not None: if isinstance(self.style, dict): styles = [self.style[col] for col in self.columns if col in self.style] elif is_list_like(self.style): styles = self.style else: styles = [self.style] for s in styles: if _color_in_style(s): raise ValueError("Cannot pass 'style' string with a color symbol and 'color' keyword argument. Please use one or the other or pass 'style' without a color symbol") return color @final @staticmethod def _iter_data(data: DataFrame | dict[Hashable, Series | DataFrame]) -> Iterator[tuple[Hashable, np.ndarray]]: for (col, values) in data.items(): yield (col, np.asarray(values.values)) def _get_nseries(self, data: Series | DataFrame) -> int: if data.ndim == 1: return 1 elif self.by is not None and self._kind == 'hist': return len(self._grouped) elif self.by is not None and self._kind == 'box': return len(self.columns) else: return data.shape[1] @final @property def nseries(self) -> int: return self._get_nseries(self.data) @final def generate(self) -> None: self._compute_plot_data() fig = self.fig self._make_plot(fig) self._add_table() self._make_legend() self._adorn_subplots(fig) for ax in self.axes: self._post_plot_logic_common(ax) self._post_plot_logic(ax, self.data) @final @staticmethod def _has_plotted_object(ax: Axes) -> bool: return len(ax.lines) != 0 or len(ax.artists) != 0 or len(ax.containers) != 0 @final def _maybe_right_yaxis(self, ax: Axes, axes_num: int) -> Axes: if not self.on_right(axes_num): return self._get_ax_layer(ax) if hasattr(ax, 'right_ax'): return ax.right_ax elif hasattr(ax, 'left_ax'): return ax else: (orig_ax, new_ax) = (ax, ax.twinx()) new_ax._get_lines = orig_ax._get_lines new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill (orig_ax.right_ax, new_ax.left_ax) = (new_ax, orig_ax) if not self._has_plotted_object(orig_ax): orig_ax.get_yaxis().set_visible(False) if self.logy is True or self.loglog is True: new_ax.set_yscale('log') elif self.logy == 'sym' or self.loglog == 'sym': new_ax.set_yscale('symlog') return new_ax @final @cache_readonly def fig(self) -> Figure: return self._axes_and_fig[1] @final @cache_readonly def axes(self) -> Sequence[Axes]: return self._axes_and_fig[0] @final @cache_readonly def _axes_and_fig(self) -> tuple[Sequence[Axes], Figure]: import matplotlib.pyplot as plt if self.subplots: naxes = self.nseries if isinstance(self.subplots, bool) else len(self.subplots) (fig, axes) = create_subplots(naxes=naxes, sharex=self.sharex, sharey=self.sharey, figsize=self.figsize, ax=self.ax, layout=self.layout, layout_type=self._layout_type) elif self.ax is None: fig = plt.figure(figsize=self.figsize) axes = fig.add_subplot(111) else: fig = self.ax.get_figure() if self.figsize is not None: fig.set_size_inches(self.figsize) axes = self.ax axes = np.fromiter(flatten_axes(axes), dtype=object) if self.logx is True or self.loglog is True: [a.set_xscale('log') for a in axes] elif self.logx == 'sym' or self.loglog == 'sym': [a.set_xscale('symlog') for a in axes] if self.logy is True or self.loglog is True: [a.set_yscale('log') for a in axes] elif self.logy == 'sym' or self.loglog == 'sym': [a.set_yscale('symlog') for a in axes] axes_seq = cast(Sequence['Axes'], axes) return (axes_seq, fig) @property def result(self): if self.subplots: if self.layout is not None and (not is_list_like(self.ax)): return self.axes.reshape(*self.layout) else: return self.axes else: sec_true = isinstance(self.secondary_y, bool) and self.secondary_y all_sec = is_list_like(self.secondary_y) and len(self.secondary_y) == self.nseries if sec_true or all_sec: return self._get_ax_layer(self.axes[0], primary=False) else: return self.axes[0] @final @staticmethod def _convert_to_ndarray(data): if isinstance(data.dtype, CategoricalDtype): return data if (is_integer_dtype(data.dtype) or is_float_dtype(data.dtype)) and isinstance(data.dtype, ExtensionDtype): return data.to_numpy(dtype='float', na_value=np.nan) if len(data) > 0: return np.asarray(data) return data @final def _ensure_frame(self, data) -> DataFrame: if isinstance(data, ABCSeries): label = self.label if label is None and data.name is None: label = '' if label is None: data = data.to_frame() else: data = data.to_frame(name=label) elif self._kind in ('hist', 'box'): cols = self.columns if self.by is None else self.columns + self.by data = data.loc[:, cols] return data @final def _compute_plot_data(self) -> None: data = self.data if self.by is not None: self.subplots = True data = reconstruct_data_with_by(self.data, by=self.by, cols=self.columns) data = data.infer_objects() include_type = [np.number, 'datetime', 'datetimetz', 'timedelta'] if self.include_bool is True: include_type.append(np.bool_) exclude_type = None if self._kind == 'box': include_type = [np.number] exclude_type = ['timedelta'] if self._kind == 'scatter': include_type.extend(['object', 'category', 'string']) numeric_data = data.select_dtypes(include=include_type, exclude=exclude_type) is_empty = numeric_data.shape[-1] == 0 if is_empty: raise TypeError('no numeric data to plot') self.data = numeric_data.apply(type(self)._convert_to_ndarray) def _make_plot(self, fig: Figure) -> None: raise AbstractMethodError(self) @final def _add_table(self) -> None: if self.table is False: return elif self.table is True: data = self.data.transpose() else: data = self.table ax = self._get_ax(0) tools.table(ax, data) @final def _post_plot_logic_common(self, ax: Axes) -> None: if self.orientation == 'vertical' or self.orientation is None: type(self)._apply_axis_properties(ax.xaxis, rot=self.rot, fontsize=self.fontsize) type(self)._apply_axis_properties(ax.yaxis, fontsize=self.fontsize) if hasattr(ax, 'right_ax'): type(self)._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize) elif self.orientation == 'horizontal': type(self)._apply_axis_properties(ax.yaxis, rot=self.rot, fontsize=self.fontsize) type(self)._apply_axis_properties(ax.xaxis, fontsize=self.fontsize) if hasattr(ax, 'right_ax'): type(self)._apply_axis_properties(ax.right_ax.yaxis, fontsize=self.fontsize) else: raise ValueError @abstractmethod def _post_plot_logic(self, ax: Axes, data) -> None: @final def _adorn_subplots(self, fig: Figure) -> None: if len(self.axes) > 0: all_axes = self._get_subplots(fig) (nrows, ncols) = self._get_axes_layout(fig) handle_shared_axes(axarr=all_axes, nplots=len(all_axes), naxes=nrows * ncols, nrows=nrows, ncols=ncols, sharex=self.sharex, sharey=self.sharey) for ax in self.axes: ax = getattr(ax, 'right_ax', ax) if self.yticks is not None: ax.set_yticks(self.yticks) if self.xticks is not None: ax.set_xticks(self.xticks) if self.ylim is not None: ax.set_ylim(self.ylim) if self.xlim is not None: ax.set_xlim(self.xlim) if self.ylabel is not None: ax.set_ylabel(pprint_thing(self.ylabel)) ax.grid(self.grid) if self.title: if self.subplots: if is_list_like(self.title): if len(self.title) != self.nseries: raise ValueError(f'The length of `title` must equal the number of columns if using `title` of type `list` and `subplots=True`.\nlength of title = {len(self.title)}\nnumber of columns = {self.nseries}') for (ax, title) in zip(self.axes, self.title): ax.set_title(title) else: fig.suptitle(self.title) else: if is_list_like(self.title): msg = 'Using `title` of type `list` is not supported unless `subplots=True` is passed' raise ValueError(msg) self.axes[0].set_title(self.title) @final @staticmethod def _apply_axis_properties(axis: Axis, rot=None, fontsize: int | None=None) -> None: if rot is not None or fontsize is not None: labels = axis.get_majorticklabels() + axis.get_minorticklabels() for label in labels: if rot is not None: label.set_rotation(rot) if fontsize is not None: label.set_fontsize(fontsize) @final @property def legend_title(self) -> str | None: if not isinstance(self.data.columns, ABCMultiIndex): name = self.data.columns.name if name is not None: name = pprint_thing(name) return name else: stringified = map(pprint_thing, self.data.columns.names) return ','.join(stringified) @final def _mark_right_label(self, label: str, index: int) -> str: if not self.subplots and self.mark_right and self.on_right(index): label += ' (right)' return label @final def _append_legend_handles_labels(self, handle: Artist, label: str) -> None: self.legend_handles.append(handle) self.legend_labels.append(label) def _make_legend(self) -> None: (ax, leg) = self._get_ax_legend(self.axes[0]) handles = [] labels = [] title = '' if not self.subplots: if leg is not None: title = leg.get_title().get_text() if Version(mpl.__version__) < Version('3.7'): handles = leg.legendHandles else: handles = leg.legend_handles labels = [x.get_text() for x in leg.get_texts()] if self.legend: if self.legend == 'reverse': handles += reversed(self.legend_handles) labels += reversed(self.legend_labels) else: handles += self.legend_handles labels += self.legend_labels if self.legend_title is not None: title = self.legend_title if len(handles) > 0: ax.legend(handles, labels, loc='best', title=title) elif self.subplots and self.legend: for ax in self.axes: if ax.get_visible(): with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'No artists with labels found to put in legend.', UserWarning) ax.legend(loc='best') @final @staticmethod def _get_ax_legend(ax: Axes): leg = ax.get_legend() other_ax = getattr(ax, 'left_ax', None) or getattr(ax, 'right_ax', None) other_leg = None if other_ax is not None: other_leg = other_ax.get_legend() if leg is None and other_leg is not None: leg = other_leg ax = other_ax return (ax, leg) _need_to_set_index = False @final def _get_xticks(self): index = self.data.index is_datetype = index.inferred_type in ('datetime', 'date', 'datetime64', 'time') x: list[int] | np.ndarray if self.use_index: if isinstance(index, ABCPeriodIndex): x = index.to_timestamp()._mpl_repr() elif is_any_real_numeric_dtype(index.dtype): x = index._mpl_repr() elif isinstance(index, ABCDatetimeIndex) or is_datetype: x = index._mpl_repr() else: self._need_to_set_index = True x = list(range(len(index))) else: x = list(range(len(index))) return x @classmethod @register_pandas_matplotlib_converters def _plot(cls, ax: Axes, x, y: np.ndarray, style=None, is_errorbar: bool=False, **kwds): mask = isna(y) if mask.any(): y = np.ma.array(y) y = np.ma.masked_where(mask, y) if isinstance(x, ABCIndex): x = x._mpl_repr() if is_errorbar: if 'xerr' in kwds: kwds['xerr'] = np.array(kwds.get('xerr')) if 'yerr' in kwds: kwds['yerr'] = np.array(kwds.get('yerr')) return ax.errorbar(x, y, **kwds) else: args = (x, y, style) if style is not None else (x, y) return ax.plot(*args, **kwds) def _get_custom_index_name(self): return self.xlabel @final def _get_index_name(self) -> str | None: if isinstance(self.data.index, ABCMultiIndex): name = self.data.index.names if com.any_not_none(*name): name = ','.join([pprint_thing(x) for x in name]) else: name = None else: name = self.data.index.name if name is not None: name = pprint_thing(name) index_name = self._get_custom_index_name() if index_name is not None: name = pprint_thing(index_name) return name @final @classmethod def _get_ax_layer(cls, ax, primary: bool=True): if primary: return getattr(ax, 'left_ax', ax) else: return getattr(ax, 'right_ax', ax) @final def _col_idx_to_axis_idx(self, col_idx: int) -> int: if isinstance(self.subplots, list): return next((group_idx for (group_idx, group) in enumerate(self.subplots) if col_idx in group)) else: return col_idx @final def _get_ax(self, i: int) -> Axes: if self.subplots: i = self._col_idx_to_axis_idx(i) ax = self.axes[i] ax = self._maybe_right_yaxis(ax, i) self.axes[i] = ax else: ax = self.axes[0] ax = self._maybe_right_yaxis(ax, i) ax.get_yaxis().set_visible(True) return ax @final def on_right(self, i: int) -> bool: if isinstance(self.secondary_y, bool): return self.secondary_y if isinstance(self.secondary_y, (tuple, list, np.ndarray, ABCIndex)): return self.data.columns[i] in self.secondary_y @final def _apply_style_colors(self, colors, kwds: dict[str, Any], col_num: int, label: str): style = None if self.style is not None: if isinstance(self.style, list): try: style = self.style[col_num] except IndexError: pass elif isinstance(self.style, dict): style = self.style.get(label, style) else: style = self.style has_color = 'color' in kwds or self.colormap is not None nocolor_style = style is None or not _color_in_style(style) if (has_color or self.subplots) and nocolor_style: if isinstance(colors, dict): kwds['color'] = colors[label] else: kwds['color'] = colors[col_num % len(colors)] return (style, kwds) def _get_colors(self, num_colors: int | None=None, color_kwds: str='color'): if num_colors is None: num_colors = self.nseries if color_kwds == 'color': color = self.color else: color = self.kwds.get(color_kwds) return get_standard_colors(num_colors=num_colors, colormap=self.colormap, color=color) @final @staticmethod def _parse_errorbars(label: str, err, data: NDFrameT, nseries: int) -> tuple[Any, NDFrameT]: if err is None: return (None, data) def match_labels(data, e): e = e.reindex(data.index) return e if isinstance(err, ABCDataFrame): err = match_labels(data, err) elif isinstance(err, dict): pass elif isinstance(err, ABCSeries): err = match_labels(data, err) err = np.atleast_2d(err) err = np.tile(err, (nseries, 1)) elif isinstance(err, str): evalues = data[err].values data = data[data.columns.drop(err)] err = np.atleast_2d(evalues) err = np.tile(err, (nseries, 1)) elif is_list_like(err): if is_iterator(err): err = np.atleast_2d(list(err)) else: err = np.atleast_2d(err) err_shape = err.shape if isinstance(data, ABCSeries) and err_shape[0] == 2: err = np.expand_dims(err, 0) err_shape = err.shape if err_shape[2] != len(data): raise ValueError(f'Asymmetrical error bars should be provided with the shape (2, {len(data)})') elif isinstance(data, ABCDataFrame) and err.ndim == 3: if err_shape[0] != nseries or err_shape[1] != 2 or err_shape[2] != len(data): raise ValueError(f'Asymmetrical error bars should be provided with the shape ({nseries}, 2, {len(data)})') if len(err) == 1: err = np.tile(err, (nseries, 1)) elif is_number(err): err = np.tile([err], (nseries, len(data))) else: msg = f'No valid {label} detected' raise ValueError(msg) return (err, data) @final def _get_errorbars(self, label=None, index=None, xerr: bool=True, yerr: bool=True) -> dict[str, Any]: errors = {} for (kw, flag) in zip(['xerr', 'yerr'], [xerr, yerr]): if flag: err = self.errors[kw] if isinstance(err, (ABCDataFrame, dict)): if label is not None and label in err.keys(): err = err[label] else: err = None elif index is not None and err is not None: err = err[index] if err is not None: errors[kw] = err return errors @final def _get_subplots(self, fig: Figure) -> list[Axes]: if Version(mpl.__version__) < Version('3.8'): Klass = mpl.axes.Subplot else: Klass = mpl.axes.Axes return [ax for ax in fig.get_axes() if isinstance(ax, Klass) and ax.get_subplotspec() is not None] @final def _get_axes_layout(self, fig: Figure) -> tuple[int, int]: axes = self._get_subplots(fig) x_set = set() y_set = set() for ax in axes: points = ax.get_position().get_points() x_set.add(points[0][0]) y_set.add(points[0][1]) return (len(y_set), len(x_set)) class PlanePlot(MPLPlot, ABC): _layout_type = 'single' def __init__(self, data, x, y, **kwargs) -> None: MPLPlot.__init__(self, data, **kwargs) if x is None or y is None: raise ValueError(self._kind + ' requires an x and y column') if is_integer(x) and (not holds_integer(self.data.columns)): x = self.data.columns[x] if is_integer(y) and (not holds_integer(self.data.columns)): y = self.data.columns[y] self.x = x self.y = y @final def _get_nseries(self, data: Series | DataFrame) -> int: return 1 @final def _post_plot_logic(self, ax: Axes, data) -> None: (x, y) = (self.x, self.y) xlabel = self.xlabel if self.xlabel is not None else pprint_thing(x) ylabel = self.ylabel if self.ylabel is not None else pprint_thing(y) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) @final def _plot_colorbar(self, ax: Axes, *, fig: Figure, **kwds): img = ax.collections[-1] return fig.colorbar(img, ax=ax, **kwds) class ScatterPlot(PlanePlot): @property def _kind(self) -> Literal['scatter']: return 'scatter' def __init__(self, data, x, y, s=None, c=None, *, colorbar: bool | lib.NoDefault=lib.no_default, norm=None, **kwargs) -> None: if s is None: s = 20 elif is_hashable(s) and s in data.columns: s = data[s] self.s = s self.colorbar = colorbar self.norm = norm super().__init__(data, x, y, **kwargs) if is_integer(c) and (not holds_integer(self.data.columns)): c = self.data.columns[c] self.c = c def _make_plot(self, fig: Figure) -> None: (x, y, c, data) = (self.x, self.y, self.c, self.data) ax = self.axes[0] c_is_column = is_hashable(c) and c in self.data.columns color_by_categorical = c_is_column and isinstance(self.data[c].dtype, CategoricalDtype) color = self.color c_values = self._get_c_values(color, color_by_categorical, c_is_column) (norm, cmap) = self._get_norm_and_cmap(c_values, color_by_categorical) cb = self._get_colorbar(c_values, c_is_column) if self.legend: label = self.label else: label = None create_colors = not self._are_valid_colors(c_values) if create_colors: color_mapping = self._get_color_mapping(c_values) c_values = [color_mapping[s] for s in c_values] ax.legend(handles=[mpl.patches.Circle((0, 0), facecolor=c, label=s) for (s, c) in color_mapping.items()]) scatter = ax.scatter(data[x].values, data[y].values, c=c_values, label=label, cmap=cmap, norm=norm, s=self.s, **self.kwds) if cb: cbar_label = c if c_is_column else '' cbar = self._plot_colorbar(ax, fig=fig, label=cbar_label) if color_by_categorical: n_cats = len(self.data[c].cat.categories) cbar.set_ticks(np.linspace(0.5, n_cats - 0.5, n_cats)) cbar.ax.set_yticklabels(self.data[c].cat.categories) if label is not None: self._append_legend_handles_labels(scatter, label) errors_x = self._get_errorbars(label=x, index=0, yerr=False) errors_y = self._get_errorbars(label=y, index=0, xerr=False) if len(errors_x) > 0 or len(errors_y) > 0: err_kwds = dict(errors_x, **errors_y) err_kwds['ecolor'] = scatter.get_facecolor()[0] ax.errorbar(data[x].values, data[y].values, linestyle='none', **err_kwds) def _get_c_values(self, color, color_by_categorical: bool, c_is_column: bool): c = self.c if c is not None and color is not None: raise TypeError('Specify exactly one of `c` and `color`') if c is None and color is None: c_values = mpl.rcParams['patch.facecolor'] elif color is not None: c_values = color elif color_by_categorical: c_values = self.data[c].cat.codes elif c_is_column: c_values = self.data[c].values else: c_values = c return c_values def _are_valid_colors(self, c_values: Series) -> bool: unique = np.unique(c_values) try: if len(c_values) and all((isinstance(c, str) for c in unique)): mpl.colors.to_rgba_array(unique) return True except (TypeError, ValueError) as _: return False def _get_color_mapping(self, c_values: Series) -> dict[str, np.ndarray]: unique = np.unique(c_values) n_colors = len(unique) cmap = mpl.colormaps.get_cmap(self.colormap) colors = cmap(np.linspace(0, 1, n_colors)) return dict(zip(unique, colors)) def _get_norm_and_cmap(self, c_values, color_by_categorical: bool): c = self.c if self.colormap is not None: cmap = mpl.colormaps.get_cmap(self.colormap) elif not isinstance(c_values, str) and is_integer_dtype(c_values): cmap = mpl.colormaps['Greys'] else: cmap = None if color_by_categorical and cmap is not None: n_cats = len(self.data[c].cat.categories) cmap = mpl.colors.ListedColormap([cmap(i) for i in range(cmap.N)]) bounds = np.linspace(0, n_cats, n_cats + 1) norm = mpl.colors.BoundaryNorm(bounds, cmap.N) else: norm = self.norm return (norm, cmap) def _get_colorbar(self, c_values, c_is_column: bool) -> bool: plot_colorbar = self.colormap or c_is_column cb = self.colorbar if cb is lib.no_default: return is_numeric_dtype(c_values) and plot_colorbar return cb class HexBinPlot(PlanePlot): @property def _kind(self) -> Literal['hexbin']: return 'hexbin' def __init__(self, data, x, y, C=None, *, colorbar: bool=True, **kwargs) -> None: super().__init__(data, x, y, **kwargs) if is_integer(C) and (not holds_integer(self.data.columns)): C = self.data.columns[C] self.C = C self.colorbar = colorbar if len(self.data[self.x]._get_numeric_data()) == 0: raise ValueError(self._kind + ' requires x column to be numeric') if len(self.data[self.y]._get_numeric_data()) == 0: raise ValueError(self._kind + ' requires y column to be numeric') def _make_plot(self, fig: Figure) -> None: (x, y, data, C) = (self.x, self.y, self.data, self.C) ax = self.axes[0] cmap = self.colormap or 'BuGn' cmap = mpl.colormaps.get_cmap(cmap) cb = self.colorbar if C is None: c_values = None else: c_values = data[C].values ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap, **self.kwds) if cb: self._plot_colorbar(ax, fig=fig) def _make_legend(self) -> None: pass class LinePlot(MPLPlot): _default_rot = 0 @property def orientation(self) -> PlottingOrientation: return 'vertical' @property def _kind(self) -> Literal['line', 'area', 'hist', 'kde', 'box']: return 'line' def __init__(self, data, **kwargs) -> None: from pandas.plotting import plot_params MPLPlot.__init__(self, data, **kwargs) if self.stacked: self.data = self.data.fillna(value=0) self.x_compat = plot_params['x_compat'] if 'x_compat' in self.kwds: self.x_compat = bool(self.kwds.pop('x_compat')) @final def _is_ts_plot(self) -> bool: return not self.x_compat and self.use_index and self._use_dynamic_x() @final def _use_dynamic_x(self) -> bool: return use_dynamic_x(self._get_ax(0), self.data) def _make_plot(self, fig: Figure) -> None: if self._is_ts_plot(): data = maybe_convert_index(self._get_ax(0), self.data) x = data.index plotf = self._ts_plot it = data.items() else: x = self._get_xticks() plotf = self._plot it = self._iter_data(data=self.data) stacking_id = self._get_stacking_id() is_errorbar = com.any_not_none(*self.errors.values()) colors = self._get_colors() for (i, (label, y)) in enumerate(it): ax = self._get_ax(i) kwds = self.kwds.copy() if self.color is not None: kwds['color'] = self.color (style, kwds) = self._apply_style_colors(colors, kwds, i, label) errors = self._get_errorbars(label=label, index=i) kwds = dict(kwds, **errors) label = pprint_thing(label) label = self._mark_right_label(label, index=i) kwds['label'] = label newlines = plotf(ax, x, y, style=style, column_num=i, stacking_id=stacking_id, is_errorbar=is_errorbar, **kwds) self._append_legend_handles_labels(newlines[0], label) if self._is_ts_plot(): lines = get_all_lines(ax) (left, right) = get_xlim(lines) ax.set_xlim(left, right) @classmethod def _plot(cls, ax: Axes, x, y: np.ndarray, style=None, column_num=None, stacking_id=None, **kwds): if column_num == 0: cls._initialize_stacker(ax, stacking_id, len(y)) y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label']) lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds) cls._update_stacker(ax, stacking_id, y) return lines @final def _ts_plot(self, ax: Axes, x, data: Series, style=None, **kwds): (freq, data) = maybe_resample(data, ax, kwds) decorate_axes(ax, freq) if hasattr(ax, 'left_ax'): decorate_axes(ax.left_ax, freq) if hasattr(ax, 'right_ax'): decorate_axes(ax.right_ax, freq) ax._plot_data.append((data, self._kind, kwds)) lines = self._plot(ax, data.index, np.asarray(data.values), style=style, **kwds) format_dateaxis(ax, ax.freq, data.index) return lines @final def _get_stacking_id(self) -> int | None: if self.stacked: return id(self.data) else: return None @final @classmethod def _initialize_stacker(cls, ax: Axes, stacking_id, n: int) -> None: if stacking_id is None: return if not hasattr(ax, '_stacker_pos_prior'): ax._stacker_pos_prior = {} if not hasattr(ax, '_stacker_neg_prior'): ax._stacker_neg_prior = {} ax._stacker_pos_prior[stacking_id] = np.zeros(n) ax._stacker_neg_prior[stacking_id] = np.zeros(n) @final @classmethod def _get_stacked_values(cls, ax: Axes, stacking_id: int | None, values: np.ndarray, label) -> np.ndarray: if stacking_id is None: return values if not hasattr(ax, '_stacker_pos_prior'): cls._initialize_stacker(ax, stacking_id, len(values)) if (values >= 0).all(): return ax._stacker_pos_prior[stacking_id] + values elif (values <= 0).all(): return ax._stacker_neg_prior[stacking_id] + values raise ValueError(f"When stacked is True, each column must be either all positive or all negative. Column '{label}' contains both positive and negative values") @final @classmethod def _update_stacker(cls, ax: Axes, stacking_id: int | None, values) -> None: if stacking_id is None: return if (values >= 0).all(): ax._stacker_pos_prior[stacking_id] += values elif (values <= 0).all(): ax._stacker_neg_prior[stacking_id] += values def _post_plot_logic(self, ax: Axes, data) -> None: def get_label(i): if is_float(i) and i.is_integer(): i = int(i) try: return pprint_thing(data.index[i]) except Exception: return '' if self._need_to_set_index: xticks = ax.get_xticks() xticklabels = [get_label(x) for x in xticks] ax.xaxis.set_major_locator(mpl.ticker.FixedLocator(xticks)) ax.set_xticklabels(xticklabels) condition = not self._use_dynamic_x() and (data.index._is_all_dates and self.use_index) and (not self.subplots or (self.subplots and self.sharex)) index_name = self._get_index_name() if condition: if not self._rot_set: self.rot = 30 format_date_labels(ax, rot=self.rot) if index_name is not None and self.use_index: ax.set_xlabel(index_name) class AreaPlot(LinePlot): @property def _kind(self) -> Literal['area']: return 'area' def __init__(self, data, **kwargs) -> None: kwargs.setdefault('stacked', True) data = data.fillna(value=0) LinePlot.__init__(self, data, **kwargs) if not self.stacked: self.kwds.setdefault('alpha', 0.5) if self.logy or self.loglog: raise ValueError('Log-y scales are not supported in area plot') @classmethod def _plot(cls, ax: Axes, x, y: np.ndarray, style=None, column_num=None, stacking_id=None, is_errorbar: bool=False, **kwds): if column_num == 0: cls._initialize_stacker(ax, stacking_id, len(y)) y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label']) line_kwds = kwds.copy() line_kwds.pop('label') lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds) (xdata, y_values) = lines[0].get_data(orig=False) if stacking_id is None: start = np.zeros(len(y)) elif (y >= 0).all(): start = ax._stacker_pos_prior[stacking_id] elif (y <= 0).all(): start = ax._stacker_neg_prior[stacking_id] else: start = np.zeros(len(y)) if 'color' not in kwds: kwds['color'] = lines[0].get_color() rect = ax.fill_between(xdata, start, y_values, **kwds) cls._update_stacker(ax, stacking_id, y) res = [rect] return res def _post_plot_logic(self, ax: Axes, data) -> None: LinePlot._post_plot_logic(self, ax, data) is_shared_y = len(list(ax.get_shared_y_axes())) > 0 if self.ylim is None and (not is_shared_y): if (data >= 0).all().all(): ax.set_ylim(0, None) elif (data <= 0).all().all(): ax.set_ylim(None, 0) class BarPlot(MPLPlot): @property def _kind(self) -> Literal['bar', 'barh']: return 'bar' _default_rot = 90 @property def orientation(self) -> PlottingOrientation: return 'vertical' def __init__(self, data, *, align='center', bottom=0, left=0, width=0.5, position=0.5, log=False, **kwargs) -> None: self._is_series = isinstance(data, ABCSeries) self.bar_width = width self._align = align self._position = position self.tick_pos = np.arange(len(data)) if is_list_like(bottom): bottom = np.array(bottom) if is_list_like(left): left = np.array(left) self.bottom = bottom self.left = left self.log = log MPLPlot.__init__(self, data, **kwargs) @cache_readonly def ax_pos(self) -> np.ndarray: return self.tick_pos - self.tickoffset @cache_readonly def tickoffset(self): if self.stacked or self.subplots: return self.bar_width * self._position elif self._align == 'edge': w = self.bar_width / self.nseries return self.bar_width * (self._position - 0.5) + w * 0.5 else: return self.bar_width * self._position @cache_readonly def lim_offset(self): if self.stacked or self.subplots: if self._align == 'edge': return self.bar_width / 2 else: return 0 elif self._align == 'edge': w = self.bar_width / self.nseries return w * 0.5 else: return 0 @classmethod def _plot(cls, ax: Axes, x, y: np.ndarray, w, start: int | npt.NDArray[np.intp]=0, log: bool=False, **kwds): return ax.bar(x, y, w, bottom=start, log=log, **kwds) @property def _start_base(self): return self.bottom def _make_plot(self, fig: Figure) -> None: colors = self._get_colors() ncolors = len(colors) pos_prior = neg_prior = np.zeros(len(self.data)) K = self.nseries data = self.data.fillna(0) for (i, (label, y)) in enumerate(self._iter_data(data=data)): ax = self._get_ax(i) kwds = self.kwds.copy() if self._is_series: kwds['color'] = colors elif isinstance(colors, dict): kwds['color'] = colors[label] else: kwds['color'] = colors[i % ncolors] errors = self._get_errorbars(label=label, index=i) kwds = dict(kwds, **errors) label = pprint_thing(label) label = self._mark_right_label(label, index=i) if ('yerr' in kwds or 'xerr' in kwds) and kwds.get('ecolor') is None: kwds['ecolor'] = mpl.rcParams['xtick.color'] start = 0 if self.log and (y >= 1).all(): start = 1 start = start + self._start_base kwds['align'] = self._align if self.subplots: w = self.bar_width / 2 rect = self._plot(ax, self.ax_pos + w, y, self.bar_width, start=start, label=label, log=self.log, **kwds) ax.set_title(label) elif self.stacked: mask = y > 0 start = np.where(mask, pos_prior, neg_prior) + self._start_base w = self.bar_width / 2 rect = self._plot(ax, self.ax_pos + w, y, self.bar_width, start=start, label=label, log=self.log, **kwds) pos_prior = pos_prior + np.where(mask, y, 0) neg_prior = neg_prior + np.where(mask, 0, y) else: w = self.bar_width / K rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w, start=start, label=label, log=self.log, **kwds) self._append_legend_handles_labels(rect, label) def _post_plot_logic(self, ax: Axes, data) -> None: if self.use_index: str_index = [pprint_thing(key) for key in data.index] else: str_index = [pprint_thing(key) for key in range(data.shape[0])] s_edge = self.ax_pos[0] - 0.25 + self.lim_offset e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset self._decorate_ticks(ax, self._get_index_name(), str_index, s_edge, e_edge) def _decorate_ticks(self, ax: Axes, name: str | None, ticklabels: list[str], start_edge: float, end_edge: float) -> None: ax.set_xlim((start_edge, end_edge)) if self.xticks is not None: ax.set_xticks(np.array(self.xticks)) else: ax.set_xticks(self.tick_pos) ax.set_xticklabels(ticklabels) if name is not None and self.use_index: ax.set_xlabel(name) class BarhPlot(BarPlot): @property def _kind(self) -> Literal['barh']: return 'barh' _default_rot = 0 @property def orientation(self) -> Literal['horizontal']: return 'horizontal' @property def _start_base(self): return self.left @classmethod def _plot(cls, ax: Axes, x, y: np.ndarray, w, start: int | npt.NDArray[np.intp]=0, log: bool=False, **kwds): return ax.barh(x, y, w, left=start, log=log, **kwds) def _get_custom_index_name(self): return self.ylabel def _decorate_ticks(self, ax: Axes, name: str | None, ticklabels: list[str], start_edge: float, end_edge: float) -> None: ax.set_ylim((start_edge, end_edge)) ax.set_yticks(self.tick_pos) ax.set_yticklabels(ticklabels) if name is not None and self.use_index: ax.set_ylabel(name) ax.set_xlabel(self.xlabel) class PiePlot(MPLPlot): @property def _kind(self) -> Literal['pie']: return 'pie' _layout_type = 'horizontal' def __init__(self, data: Series | DataFrame, kind=None, **kwargs) -> None: data = data.fillna(value=0) lt_zero = data < 0 if isinstance(data, ABCDataFrame) and lt_zero.any().any(): raise ValueError(f"{self._kind} plot doesn't allow negative values") elif isinstance(data, ABCSeries) and lt_zero.any(): raise ValueError(f"{self._kind} plot doesn't allow negative values") MPLPlot.__init__(self, data, kind=kind, **kwargs) @classmethod def _validate_log_kwd(cls, kwd: str, value: bool | None | Literal['sym']) -> bool | None | Literal['sym']: super()._validate_log_kwd(kwd=kwd, value=value) if value is not False: warnings.warn(f"PiePlot ignores the '{kwd}' keyword", UserWarning, stacklevel=find_stack_level()) return False def _validate_color_args(self, color, colormap) -> None: return None def _make_plot(self, fig: Figure) -> None: colors = self._get_colors(num_colors=len(self.data), color_kwds='colors') self.kwds.setdefault('colors', colors) for (i, (label, y)) in enumerate(self._iter_data(data=self.data)): ax = self._get_ax(i) kwds = self.kwds.copy() def blank_labeler(label, value): if value == 0: return '' else: return label idx = [pprint_thing(v) for v in self.data.index] labels = kwds.pop('labels', idx) if labels is not None: blabels = [blank_labeler(left, value) for (left, value) in zip(labels, y)] else: blabels = None results = ax.pie(y, labels=blabels, **kwds) if kwds.get('autopct', None) is not None: (patches, texts, autotexts) = results else: (patches, texts) = results autotexts = [] if self.fontsize is not None: for t in texts + autotexts: t.set_fontsize(self.fontsize) leglabels = labels if labels is not None else idx for (_patch, _leglabel) in zip(patches, leglabels): self._append_legend_handles_labels(_patch, _leglabel) def _post_plot_logic(self, ax: Axes, data) -> None: pass # File: pandas-main/pandas/plotting/_matplotlib/groupby.py from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas.core.dtypes.missing import remove_na_arraylike from pandas import MultiIndex, concat from pandas.plotting._matplotlib.misc import unpack_single_str_list if TYPE_CHECKING: from collections.abc import Hashable from pandas._typing import IndexLabel from pandas import DataFrame, Series def create_iter_data_given_by(data: DataFrame, kind: str='hist') -> dict[Hashable, DataFrame | Series]: if kind == 'hist': level = 0 else: level = 1 assert isinstance(data.columns, MultiIndex) return {col: data.loc[:, data.columns.get_level_values(level) == col] for col in data.columns.levels[level]} def reconstruct_data_with_by(data: DataFrame, by: IndexLabel, cols: IndexLabel) -> DataFrame: by_modified = unpack_single_str_list(by) grouped = data.groupby(by_modified) data_list = [] for (key, group) in grouped: columns = MultiIndex.from_product([[key], cols]) sub_group = group[cols] sub_group.columns = columns data_list.append(sub_group) data = concat(data_list, axis=1) return data def reformat_hist_y_given_by(y: np.ndarray, by: IndexLabel | None) -> np.ndarray: if by is not None and len(y.shape) > 1: return np.array([remove_na_arraylike(col) for col in y.T]).T return remove_na_arraylike(y) # File: pandas-main/pandas/plotting/_matplotlib/hist.py from __future__ import annotations from typing import TYPE_CHECKING, Any, Literal, final import numpy as np from pandas.core.dtypes.common import is_integer, is_list_like from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex from pandas.core.dtypes.missing import isna, remove_na_arraylike from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.core import LinePlot, MPLPlot from pandas.plotting._matplotlib.groupby import create_iter_data_given_by, reformat_hist_y_given_by from pandas.plotting._matplotlib.misc import unpack_single_str_list from pandas.plotting._matplotlib.tools import create_subplots, flatten_axes, maybe_adjust_figure, set_ticks_props if TYPE_CHECKING: from matplotlib.axes import Axes from matplotlib.container import BarContainer from matplotlib.figure import Figure from matplotlib.patches import Polygon from pandas._typing import PlottingOrientation from pandas import DataFrame, Series class HistPlot(LinePlot): @property def _kind(self) -> Literal['hist', 'kde']: return 'hist' def __init__(self, data, bins: int | np.ndarray | list[np.ndarray]=10, bottom: int | np.ndarray=0, *, range=None, weights=None, **kwargs) -> None: if is_list_like(bottom): bottom = np.array(bottom) self.bottom = bottom self._bin_range = range self.weights = weights self.xlabel = kwargs.get('xlabel') self.ylabel = kwargs.get('ylabel') MPLPlot.__init__(self, data, **kwargs) self.bins = self._adjust_bins(bins) def _adjust_bins(self, bins: int | np.ndarray | list[np.ndarray]): if is_integer(bins): if self.by is not None: by_modified = unpack_single_str_list(self.by) grouped = self.data.groupby(by_modified)[self.columns] bins = [self._calculate_bins(group, bins) for (key, group) in grouped] else: bins = self._calculate_bins(self.data, bins) return bins def _calculate_bins(self, data: Series | DataFrame, bins) -> np.ndarray: nd_values = data.infer_objects()._get_numeric_data() values = nd_values.values if nd_values.ndim == 2: values = values.reshape(-1) values = values[~isna(values)] return np.histogram_bin_edges(values, bins=bins, range=self._bin_range) @classmethod def _plot(cls, ax: Axes, y: np.ndarray, style=None, bottom: int | np.ndarray=0, column_num: int=0, stacking_id=None, *, bins, **kwds) -> BarContainer | Polygon | list[BarContainer | Polygon]: if column_num == 0: cls._initialize_stacker(ax, stacking_id, len(bins) - 1) base = np.zeros(len(bins) - 1) bottom = bottom + cls._get_stacked_values(ax, stacking_id, base, kwds['label']) (n, bins, patches) = ax.hist(y, bins=bins, bottom=bottom, **kwds) cls._update_stacker(ax, stacking_id, n) return patches def _make_plot(self, fig: Figure) -> None: colors = self._get_colors() stacking_id = self._get_stacking_id() data = create_iter_data_given_by(self.data, self._kind) if self.by is not None else self.data for (i, (label, y)) in enumerate(self._iter_data(data=data)): ax = self._get_ax(i) kwds = self.kwds.copy() if self.color is not None: kwds['color'] = self.color label = pprint_thing(label) label = self._mark_right_label(label, index=i) kwds['label'] = label (style, kwds) = self._apply_style_colors(colors, kwds, i, label) if style is not None: kwds['style'] = style self._make_plot_keywords(kwds, y) if self.by is not None: kwds['bins'] = kwds['bins'][i] kwds['label'] = self.columns kwds.pop('color') if self.weights is not None: kwds['weights'] = type(self)._get_column_weights(self.weights, i, y) y = reformat_hist_y_given_by(y, self.by) artists = self._plot(ax, y, column_num=i, stacking_id=stacking_id, **kwds) if self.by is not None: ax.set_title(pprint_thing(label)) self._append_legend_handles_labels(artists[0], label) def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None: kwds['bottom'] = self.bottom kwds['bins'] = self.bins @final @staticmethod def _get_column_weights(weights, i: int, y): if weights is not None: if np.ndim(weights) != 1 and np.shape(weights)[-1] != 1: try: weights = weights[:, i] except IndexError as err: raise ValueError('weights must have the same shape as data, or be a single column') from err weights = weights[~isna(y)] return weights def _post_plot_logic(self, ax: Axes, data) -> None: if self.orientation == 'horizontal': ax.set_xlabel('Frequency' if self.xlabel is None else self.xlabel) ax.set_ylabel(self.ylabel) else: ax.set_xlabel(self.xlabel) ax.set_ylabel('Frequency' if self.ylabel is None else self.ylabel) @property def orientation(self) -> PlottingOrientation: if self.kwds.get('orientation', None) == 'horizontal': return 'horizontal' else: return 'vertical' class KdePlot(HistPlot): @property def _kind(self) -> Literal['kde']: return 'kde' @property def orientation(self) -> Literal['vertical']: return 'vertical' def __init__(self, data, bw_method=None, ind=None, *, weights=None, **kwargs) -> None: MPLPlot.__init__(self, data, **kwargs) self.bw_method = bw_method self.ind = ind self.weights = weights @staticmethod def _get_ind(y: np.ndarray, ind): if ind is None: sample_range = np.nanmax(y) - np.nanmin(y) ind = np.linspace(np.nanmin(y) - 0.5 * sample_range, np.nanmax(y) + 0.5 * sample_range, 1000) elif is_integer(ind): sample_range = np.nanmax(y) - np.nanmin(y) ind = np.linspace(np.nanmin(y) - 0.5 * sample_range, np.nanmax(y) + 0.5 * sample_range, ind) return ind @classmethod def _plot(cls, ax: Axes, y: np.ndarray, style=None, bw_method=None, weights=None, ind=None, column_num=None, stacking_id: int | None=None, **kwds): from scipy.stats import gaussian_kde y = remove_na_arraylike(y) gkde = gaussian_kde(y, bw_method=bw_method, weights=weights) y = gkde.evaluate(ind) lines = MPLPlot._plot(ax, ind, y, style=style, **kwds) return lines def _make_plot_keywords(self, kwds: dict[str, Any], y: np.ndarray) -> None: kwds['bw_method'] = self.bw_method kwds['ind'] = type(self)._get_ind(y, ind=self.ind) def _post_plot_logic(self, ax: Axes, data) -> None: ax.set_ylabel('Density') def _grouped_plot(plotf, data: Series | DataFrame, column=None, by=None, numeric_only: bool=True, figsize: tuple[float, float] | None=None, sharex: bool=True, sharey: bool=True, layout=None, rot: float=0, ax=None, **kwargs): if figsize == 'default': raise ValueError("figsize='default' is no longer supported. Specify figure size by tuple instead") grouped = data.groupby(by) if column is not None: grouped = grouped[column] naxes = len(grouped) (fig, axes) = create_subplots(naxes=naxes, figsize=figsize, sharex=sharex, sharey=sharey, ax=ax, layout=layout) for (ax, (key, group)) in zip(flatten_axes(axes), grouped): if numeric_only and isinstance(group, ABCDataFrame): group = group._get_numeric_data() plotf(group, ax, **kwargs) ax.set_title(pprint_thing(key)) return (fig, axes) def _grouped_hist(data: Series | DataFrame, column=None, by=None, ax=None, bins: int=50, figsize: tuple[float, float] | None=None, layout=None, sharex: bool=False, sharey: bool=False, rot: float=90, grid: bool=True, xlabelsize: int | None=None, xrot=None, ylabelsize: int | None=None, yrot=None, legend: bool=False, **kwargs): if legend: assert 'label' not in kwargs if data.ndim == 1: kwargs['label'] = data.name elif column is None: kwargs['label'] = data.columns else: kwargs['label'] = column def plot_group(group, ax) -> None: ax.hist(group.dropna().values, bins=bins, **kwargs) if legend: ax.legend() if xrot is None: xrot = rot (fig, axes) = _grouped_plot(plot_group, data, column=column, by=by, sharex=sharex, sharey=sharey, ax=ax, figsize=figsize, layout=layout, rot=rot) set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot) maybe_adjust_figure(fig, bottom=0.15, top=0.9, left=0.1, right=0.9, hspace=0.5, wspace=0.3) return axes def hist_series(self: Series, by=None, ax=None, grid: bool=True, xlabelsize: int | None=None, xrot=None, ylabelsize: int | None=None, yrot=None, figsize: tuple[float, float] | None=None, bins: int=10, legend: bool=False, **kwds): import matplotlib.pyplot as plt if legend and 'label' in kwds: raise ValueError('Cannot use both legend and label') if by is None: if kwds.get('layout', None) is not None: raise ValueError("The 'layout' keyword is not supported when 'by' is None") fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else plt.figure(figsize=figsize)) if figsize is not None and tuple(figsize) != tuple(fig.get_size_inches()): fig.set_size_inches(*figsize, forward=True) if ax is None: ax = fig.gca() elif ax.get_figure() != fig: raise AssertionError('passed axis not bound to passed figure') values = self.dropna().values if legend: kwds['label'] = self.name ax.hist(values, bins=bins, **kwds) if legend: ax.legend() ax.grid(grid) axes = np.array([ax]) set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot) else: if 'figure' in kwds: raise ValueError("Cannot pass 'figure' when using the 'by' argument, since a new 'Figure' instance will be created") axes = _grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize, bins=bins, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, legend=legend, **kwds) if hasattr(axes, 'ndim'): if axes.ndim == 1 and len(axes) == 1: return axes[0] return axes def hist_frame(data: DataFrame, column=None, by=None, grid: bool=True, xlabelsize: int | None=None, xrot=None, ylabelsize: int | None=None, yrot=None, ax=None, sharex: bool=False, sharey: bool=False, figsize: tuple[float, float] | None=None, layout=None, bins: int=10, legend: bool=False, **kwds): if legend and 'label' in kwds: raise ValueError('Cannot use both legend and label') if by is not None: axes = _grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize, sharex=sharex, sharey=sharey, layout=layout, bins=bins, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot, legend=legend, **kwds) return axes if column is not None: if not isinstance(column, (list, np.ndarray, ABCIndex)): column = [column] data = data[column] data = data.select_dtypes(include=(np.number, 'datetime64', 'datetimetz'), exclude='timedelta') naxes = len(data.columns) if naxes == 0: raise ValueError('hist method requires numerical or datetime columns, nothing to plot.') (fig, axes) = create_subplots(naxes=naxes, ax=ax, squeeze=False, sharex=sharex, sharey=sharey, figsize=figsize, layout=layout) can_set_label = 'label' not in kwds for (ax, col) in zip(flatten_axes(axes), data.columns): if legend and can_set_label: kwds['label'] = col ax.hist(data[col].dropna().values, bins=bins, **kwds) ax.set_title(col) ax.grid(grid) if legend: ax.legend() set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot) maybe_adjust_figure(fig, wspace=0.3, hspace=0.3) return axes # File: pandas-main/pandas/plotting/_matplotlib/misc.py from __future__ import annotations import random from typing import TYPE_CHECKING import matplotlib as mpl import numpy as np from pandas.core.dtypes.missing import notna from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.style import get_standard_colors from pandas.plotting._matplotlib.tools import create_subplots, do_adjust_figure, maybe_adjust_figure, set_ticks_props if TYPE_CHECKING: from collections.abc import Hashable from matplotlib.axes import Axes from matplotlib.figure import Figure from pandas import DataFrame, Index, Series def scatter_matrix(frame: DataFrame, alpha: float=0.5, figsize: tuple[float, float] | None=None, ax=None, grid: bool=False, diagonal: str='hist', marker: str='.', density_kwds=None, hist_kwds=None, range_padding: float=0.05, **kwds): df = frame._get_numeric_data() n = df.columns.size naxes = n * n (fig, axes) = create_subplots(naxes=naxes, figsize=figsize, ax=ax, squeeze=False) maybe_adjust_figure(fig, wspace=0, hspace=0) mask = notna(df) marker = _get_marker_compat(marker) hist_kwds = hist_kwds or {} density_kwds = density_kwds or {} kwds.setdefault('edgecolors', 'none') boundaries_list = [] for a in df.columns: values = df[a].values[mask[a].values] (rmin_, rmax_) = (np.min(values), np.max(values)) rdelta_ext = (rmax_ - rmin_) * range_padding / 2 boundaries_list.append((rmin_ - rdelta_ext, rmax_ + rdelta_ext)) for (i, a) in enumerate(df.columns): for (j, b) in enumerate(df.columns): ax = axes[i, j] if i == j: values = df[a].values[mask[a].values] if diagonal == 'hist': ax.hist(values, **hist_kwds) elif diagonal in ('kde', 'density'): from scipy.stats import gaussian_kde y = values gkde = gaussian_kde(y) ind = np.linspace(y.min(), y.max(), 1000) ax.plot(ind, gkde.evaluate(ind), **density_kwds) ax.set_xlim(boundaries_list[i]) else: common = (mask[a] & mask[b]).values ax.scatter(df[b][common], df[a][common], marker=marker, alpha=alpha, **kwds) ax.set_xlim(boundaries_list[j]) ax.set_ylim(boundaries_list[i]) ax.set_xlabel(b) ax.set_ylabel(a) if j != 0: ax.yaxis.set_visible(False) if i != n - 1: ax.xaxis.set_visible(False) if len(df.columns) > 1: lim1 = boundaries_list[0] locs = axes[0][1].yaxis.get_majorticklocs() locs = locs[(lim1[0] <= locs) & (locs <= lim1[1])] adj = (locs - lim1[0]) / (lim1[1] - lim1[0]) lim0 = axes[0][0].get_ylim() adj = adj * (lim0[1] - lim0[0]) + lim0[0] axes[0][0].yaxis.set_ticks(adj) if np.all(locs == locs.astype(int)): locs = locs.astype(int) axes[0][0].yaxis.set_ticklabels(locs) set_ticks_props(axes, xlabelsize=8, xrot=90, ylabelsize=8, yrot=0) return axes def _get_marker_compat(marker): if marker not in mpl.lines.lineMarkers: return 'o' return marker def radviz(frame: DataFrame, class_column, ax: Axes | None=None, color=None, colormap=None, **kwds) -> Axes: import matplotlib.pyplot as plt def normalize(series): a = min(series) b = max(series) return (series - a) / (b - a) n = len(frame) classes = frame[class_column].drop_duplicates() class_col = frame[class_column] df = frame.drop(class_column, axis=1).apply(normalize) if ax is None: ax = plt.gca() ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) to_plot: dict[Hashable, list[list]] = {} colors = get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) for kls in classes: to_plot[kls] = [[], []] m = len(frame.columns) - 1 s = np.array([(np.cos(t), np.sin(t)) for t in [2 * np.pi * (i / m) for i in range(m)]]) for i in range(n): row = df.iloc[i].values row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) y = (s * row_).sum(axis=0) / row.sum() kls = class_col.iat[i] to_plot[kls][0].append(y[0]) to_plot[kls][1].append(y[1]) for (i, kls) in enumerate(classes): ax.scatter(to_plot[kls][0], to_plot[kls][1], color=colors[i], label=pprint_thing(kls), **kwds) ax.legend() ax.add_patch(mpl.patches.Circle((0.0, 0.0), radius=1.0, facecolor='none')) for (xy, name) in zip(s, df.columns): ax.add_patch(mpl.patches.Circle(xy, radius=0.025, facecolor='gray')) if xy[0] < 0.0 and xy[1] < 0.0: ax.text(xy[0] - 0.025, xy[1] - 0.025, name, ha='right', va='top', size='small') elif xy[0] < 0.0 <= xy[1]: ax.text(xy[0] - 0.025, xy[1] + 0.025, name, ha='right', va='bottom', size='small') elif xy[1] < 0.0 <= xy[0]: ax.text(xy[0] + 0.025, xy[1] - 0.025, name, ha='left', va='top', size='small') elif xy[0] >= 0.0 and xy[1] >= 0.0: ax.text(xy[0] + 0.025, xy[1] + 0.025, name, ha='left', va='bottom', size='small') ax.axis('equal') return ax def andrews_curves(frame: DataFrame, class_column, ax: Axes | None=None, samples: int=200, color=None, colormap=None, **kwds) -> Axes: import matplotlib.pyplot as plt def function(amplitudes): def f(t): x1 = amplitudes[0] result = x1 / np.sqrt(2.0) coeffs = np.delete(np.copy(amplitudes), 0) coeffs = np.resize(coeffs, (int((coeffs.size + 1) / 2), 2)) harmonics = np.arange(0, coeffs.shape[0]) + 1 trig_args = np.outer(harmonics, t) result += np.sum(coeffs[:, 0, np.newaxis] * np.sin(trig_args) + coeffs[:, 1, np.newaxis] * np.cos(trig_args), axis=0) return result return f n = len(frame) class_col = frame[class_column] classes = frame[class_column].drop_duplicates() df = frame.drop(class_column, axis=1) t = np.linspace(-np.pi, np.pi, samples) used_legends: set[str] = set() color_values = get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) colors = dict(zip(classes, color_values)) if ax is None: ax = plt.gca() ax.set_xlim(-np.pi, np.pi) for i in range(n): row = df.iloc[i].values f = function(row) y = f(t) kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(t, y, color=colors[kls], label=label, **kwds) else: ax.plot(t, y, color=colors[kls], **kwds) ax.legend(loc='upper right') ax.grid() return ax def bootstrap_plot(series: Series, fig: Figure | None=None, size: int=50, samples: int=500, **kwds) -> Figure: import matplotlib.pyplot as plt data = list(series.values) samplings = [random.sample(data, size) for _ in range(samples)] means = np.array([np.mean(sampling) for sampling in samplings]) medians = np.array([np.median(sampling) for sampling in samplings]) midranges = np.array([(min(sampling) + max(sampling)) * 0.5 for sampling in samplings]) if fig is None: fig = plt.figure() x = list(range(samples)) axes = [] ax1 = fig.add_subplot(2, 3, 1) ax1.set_xlabel('Sample') axes.append(ax1) ax1.plot(x, means, **kwds) ax2 = fig.add_subplot(2, 3, 2) ax2.set_xlabel('Sample') axes.append(ax2) ax2.plot(x, medians, **kwds) ax3 = fig.add_subplot(2, 3, 3) ax3.set_xlabel('Sample') axes.append(ax3) ax3.plot(x, midranges, **kwds) ax4 = fig.add_subplot(2, 3, 4) ax4.set_xlabel('Mean') axes.append(ax4) ax4.hist(means, **kwds) ax5 = fig.add_subplot(2, 3, 5) ax5.set_xlabel('Median') axes.append(ax5) ax5.hist(medians, **kwds) ax6 = fig.add_subplot(2, 3, 6) ax6.set_xlabel('Midrange') axes.append(ax6) ax6.hist(midranges, **kwds) for axis in axes: plt.setp(axis.get_xticklabels(), fontsize=8) plt.setp(axis.get_yticklabels(), fontsize=8) if do_adjust_figure(fig): plt.tight_layout() return fig def parallel_coordinates(frame: DataFrame, class_column, cols=None, ax: Axes | None=None, color=None, use_columns: bool=False, xticks=None, colormap=None, axvlines: bool=True, axvlines_kwds=None, sort_labels: bool=False, **kwds) -> Axes: import matplotlib.pyplot as plt if axvlines_kwds is None: axvlines_kwds = {'linewidth': 1, 'color': 'black'} n = len(frame) classes = frame[class_column].drop_duplicates() class_col = frame[class_column] if cols is None: df = frame.drop(class_column, axis=1) else: df = frame[cols] used_legends: set[str] = set() ncols = len(df.columns) x: list[int] | Index if use_columns is True: if not np.all(np.isreal(list(df.columns))): raise ValueError('Columns must be numeric to be used as xticks') x = df.columns elif xticks is not None: if not np.all(np.isreal(xticks)): raise ValueError('xticks specified must be numeric') if len(xticks) != ncols: raise ValueError('Length of xticks must match number of columns') x = xticks else: x = list(range(ncols)) if ax is None: ax = plt.gca() color_values = get_standard_colors(num_colors=len(classes), colormap=colormap, color_type='random', color=color) if sort_labels: classes = sorted(classes) color_values = sorted(color_values) colors = dict(zip(classes, color_values)) for i in range(n): y = df.iloc[i].values kls = class_col.iat[i] label = pprint_thing(kls) if label not in used_legends: used_legends.add(label) ax.plot(x, y, color=colors[kls], label=label, **kwds) else: ax.plot(x, y, color=colors[kls], **kwds) if axvlines: for i in x: ax.axvline(i, **axvlines_kwds) ax.set_xticks(x) ax.set_xticklabels(df.columns) ax.set_xlim(x[0], x[-1]) ax.legend(loc='upper right') ax.grid() return ax def lag_plot(series: Series, lag: int=1, ax: Axes | None=None, **kwds) -> Axes: import matplotlib.pyplot as plt kwds.setdefault('c', plt.rcParams['patch.facecolor']) data = series.values y1 = data[:-lag] y2 = data[lag:] if ax is None: ax = plt.gca() ax.set_xlabel('y(t)') ax.set_ylabel(f'y(t + {lag})') ax.scatter(y1, y2, **kwds) return ax def autocorrelation_plot(series: Series, ax: Axes | None=None, **kwds) -> Axes: import matplotlib.pyplot as plt n = len(series) data = np.asarray(series) if ax is None: ax = plt.gca() ax.set_xlim(1, n) ax.set_ylim(-1.0, 1.0) mean = np.mean(data) c0 = np.sum((data - mean) ** 2) / n def r(h): return ((data[:n - h] - mean) * (data[h:] - mean)).sum() / n / c0 x = np.arange(n) + 1 y = [r(loc) for loc in x] z95 = 1.959963984540054 z99 = 2.5758293035489004 ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey') ax.axhline(y=z95 / np.sqrt(n), color='grey') ax.axhline(y=0.0, color='black') ax.axhline(y=-z95 / np.sqrt(n), color='grey') ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey') ax.set_xlabel('Lag') ax.set_ylabel('Autocorrelation') ax.plot(x, y, **kwds) if 'label' in kwds: ax.legend() ax.grid() return ax def unpack_single_str_list(keys): if isinstance(keys, list) and len(keys) == 1: keys = keys[0] return keys # File: pandas-main/pandas/plotting/_matplotlib/style.py from __future__ import annotations from collections.abc import Collection, Iterator, Sequence import itertools from typing import TYPE_CHECKING, cast, overload import warnings import matplotlib as mpl import matplotlib.colors import numpy as np from pandas._typing import MatplotlibColor as Color from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_list_like import pandas.core.common as com if TYPE_CHECKING: from matplotlib.colors import Colormap @overload def get_standard_colors(num_colors: int, colormap: Colormap | None=..., color_type: str=..., *, color: dict[str, Color]) -> dict[str, Color]: ... @overload def get_standard_colors(num_colors: int, colormap: Colormap | None=..., color_type: str=..., *, color: Color | Sequence[Color] | None=...) -> list[Color]: ... @overload def get_standard_colors(num_colors: int, colormap: Colormap | None=..., color_type: str=..., *, color: dict[str, Color] | Color | Sequence[Color] | None=...) -> dict[str, Color] | list[Color]: ... def get_standard_colors(num_colors: int, colormap: Colormap | None=None, color_type: str='default', *, color: dict[str, Color] | Color | Sequence[Color] | None=None) -> dict[str, Color] | list[Color]: if isinstance(color, dict): return color colors = _derive_colors(color=color, colormap=colormap, color_type=color_type, num_colors=num_colors) return list(_cycle_colors(colors, num_colors=num_colors)) def _derive_colors(*, color: Color | Collection[Color] | None, colormap: str | Colormap | None, color_type: str, num_colors: int) -> list[Color]: if color is None and colormap is not None: return _get_colors_from_colormap(colormap, num_colors=num_colors) elif color is not None: if colormap is not None: warnings.warn("'color' and 'colormap' cannot be used simultaneously. Using 'color'", stacklevel=find_stack_level()) return _get_colors_from_color(color) else: return _get_colors_from_color_type(color_type, num_colors=num_colors) def _cycle_colors(colors: list[Color], num_colors: int) -> Iterator[Color]: max_colors = max(num_colors, len(colors)) yield from itertools.islice(itertools.cycle(colors), max_colors) def _get_colors_from_colormap(colormap: str | Colormap, num_colors: int) -> list[Color]: cmap = _get_cmap_instance(colormap) return [cmap(num) for num in np.linspace(0, 1, num=num_colors)] def _get_cmap_instance(colormap: str | Colormap) -> Colormap: if isinstance(colormap, str): cmap = colormap colormap = mpl.colormaps[colormap] if colormap is None: raise ValueError(f'Colormap {cmap} is not recognized') return colormap def _get_colors_from_color(color: Color | Collection[Color]) -> list[Color]: if len(color) == 0: raise ValueError(f'Invalid color argument: {color}') if _is_single_color(color): color = cast(Color, color) return [color] color = cast(Collection[Color], color) return list(_gen_list_of_colors_from_iterable(color)) def _is_single_color(color: Color | Collection[Color]) -> bool: if isinstance(color, str) and _is_single_string_color(color): return True if _is_floats_color(color): return True return False def _gen_list_of_colors_from_iterable(color: Collection[Color]) -> Iterator[Color]: for x in color: if _is_single_color(x): yield x else: raise ValueError(f'Invalid color {x}') def _is_floats_color(color: Color | Collection[Color]) -> bool: return bool(is_list_like(color) and (len(color) == 3 or len(color) == 4) and all((isinstance(x, (int, float)) for x in color))) def _get_colors_from_color_type(color_type: str, num_colors: int) -> list[Color]: if color_type == 'default': return _get_default_colors(num_colors) elif color_type == 'random': return _get_random_colors(num_colors) else: raise ValueError("color_type must be either 'default' or 'random'") def _get_default_colors(num_colors: int) -> list[Color]: colors = [c['color'] for c in mpl.rcParams['axes.prop_cycle']] return colors[0:num_colors] def _get_random_colors(num_colors: int) -> list[Color]: return [_random_color(num) for num in range(num_colors)] def _random_color(column: int) -> list[float]: rs = com.random_state(column) return rs.rand(3).tolist() def _is_single_string_color(color: Color) -> bool: conv = matplotlib.colors.ColorConverter() try: conv.to_rgba(color) except ValueError: return False else: return True # File: pandas-main/pandas/plotting/_matplotlib/timeseries.py from __future__ import annotations import functools from typing import TYPE_CHECKING, Any, cast import warnings import numpy as np from pandas._libs.tslibs import BaseOffset, Period, to_offset from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR, FreqGroup from pandas.core.dtypes.generic import ABCDatetimeIndex, ABCPeriodIndex, ABCTimedeltaIndex from pandas.io.formats.printing import pprint_thing from pandas.plotting._matplotlib.converter import TimeSeries_DateFormatter, TimeSeries_DateLocator, TimeSeries_TimedeltaFormatter from pandas.tseries.frequencies import get_period_alias, is_subperiod, is_superperiod if TYPE_CHECKING: from datetime import timedelta from matplotlib.axes import Axes from pandas._typing import NDFrameT from pandas import DataFrame, DatetimeIndex, Index, PeriodIndex, Series def maybe_resample(series: Series, ax: Axes, kwargs: dict[str, Any]): if 'how' in kwargs: raise ValueError("'how' is not a valid keyword for plotting functions. If plotting multiple objects on shared axes, resample manually first.") (freq, ax_freq) = _get_freq(ax, series) if freq is None: raise ValueError('Cannot use dynamic axis without frequency info') if isinstance(series.index, ABCDatetimeIndex): series = series.to_period(freq=freq) if ax_freq is not None and freq != ax_freq: if is_superperiod(freq, ax_freq): series = series.copy() series.index = series.index.asfreq(ax_freq, how='s') freq = ax_freq elif _is_sup(freq, ax_freq): ser_ts = series.to_timestamp() ser_d = ser_ts.resample('D').last().dropna() ser_freq = ser_d.resample(ax_freq).last().dropna() series = ser_freq.to_period(ax_freq) freq = ax_freq elif is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq): _upsample_others(ax, freq, kwargs) else: raise ValueError('Incompatible frequency conversion') return (freq, series) def _is_sub(f1: str, f2: str) -> bool: return f1.startswith('W') and is_subperiod('D', f2) or (f2.startswith('W') and is_subperiod(f1, 'D')) def _is_sup(f1: str, f2: str) -> bool: return f1.startswith('W') and is_superperiod('D', f2) or (f2.startswith('W') and is_superperiod(f1, 'D')) def _upsample_others(ax: Axes, freq: BaseOffset, kwargs: dict[str, Any]) -> None: legend = ax.get_legend() (lines, labels) = _replot_ax(ax, freq) _replot_ax(ax, freq) other_ax = None if hasattr(ax, 'left_ax'): other_ax = ax.left_ax if hasattr(ax, 'right_ax'): other_ax = ax.right_ax if other_ax is not None: (rlines, rlabels) = _replot_ax(other_ax, freq) lines.extend(rlines) labels.extend(rlabels) if legend is not None and kwargs.get('legend', True) and (len(lines) > 0): title: str | None = legend.get_title().get_text() if title == 'None': title = None ax.legend(lines, labels, loc='best', title=title) def _replot_ax(ax: Axes, freq: BaseOffset): data = getattr(ax, '_plot_data', None) ax._plot_data = [] ax.clear() decorate_axes(ax, freq) lines = [] labels = [] if data is not None: for (series, plotf, kwds) in data: series = series.copy() idx = series.index.asfreq(freq, how='S') series.index = idx ax._plot_data.append((series, plotf, kwds)) if isinstance(plotf, str): from pandas.plotting._matplotlib import PLOT_CLASSES plotf = PLOT_CLASSES[plotf]._plot lines.append(plotf(ax, series.index._mpl_repr(), series.values, **kwds)[0]) labels.append(pprint_thing(series.name)) return (lines, labels) def decorate_axes(ax: Axes, freq: BaseOffset) -> None: if not hasattr(ax, '_plot_data'): ax._plot_data = [] ax.freq = freq xaxis = ax.get_xaxis() xaxis.freq = freq def _get_ax_freq(ax: Axes): ax_freq = getattr(ax, 'freq', None) if ax_freq is None: if hasattr(ax, 'left_ax'): ax_freq = getattr(ax.left_ax, 'freq', None) elif hasattr(ax, 'right_ax'): ax_freq = getattr(ax.right_ax, 'freq', None) if ax_freq is None: shared_axes = ax.get_shared_x_axes().get_siblings(ax) if len(shared_axes) > 1: for shared_ax in shared_axes: ax_freq = getattr(shared_ax, 'freq', None) if ax_freq is not None: break return ax_freq def _get_period_alias(freq: timedelta | BaseOffset | str) -> str | None: if isinstance(freq, BaseOffset): freqstr = freq.name else: freqstr = to_offset(freq, is_period=True).rule_code return get_period_alias(freqstr) def _get_freq(ax: Axes, series: Series): freq = getattr(series.index, 'freq', None) if freq is None: freq = getattr(series.index, 'inferred_freq', None) freq = to_offset(freq, is_period=True) ax_freq = _get_ax_freq(ax) if freq is None: freq = ax_freq freq = _get_period_alias(freq) return (freq, ax_freq) def use_dynamic_x(ax: Axes, data: DataFrame | Series) -> bool: freq = _get_index_freq(data.index) ax_freq = _get_ax_freq(ax) if freq is None: freq = ax_freq elif ax_freq is None and len(ax.get_lines()) > 0: return False if freq is None: return False freq_str = _get_period_alias(freq) if freq_str is None: return False if isinstance(data.index, ABCDatetimeIndex): freq_str = OFFSET_TO_PERIOD_FREQSTR.get(freq_str, freq_str) base = to_offset(freq_str, is_period=True)._period_dtype_code x = data.index if base <= FreqGroup.FR_DAY.value: return x[:1].is_normalized period = Period(x[0], freq_str) assert isinstance(period, Period) return period.to_timestamp().tz_localize(x.tz) == x[0] return True def _get_index_freq(index: Index) -> BaseOffset | None: freq = getattr(index, 'freq', None) if freq is None: freq = getattr(index, 'inferred_freq', None) if freq == 'B': weekdays = np.unique(index.dayofweek) if 5 in weekdays or 6 in weekdays: freq = None freq = to_offset(freq) return freq def maybe_convert_index(ax: Axes, data: NDFrameT) -> NDFrameT: if isinstance(data.index, (ABCDatetimeIndex, ABCPeriodIndex)): freq: str | BaseOffset | None = data.index.freq if freq is None: data.index = cast('DatetimeIndex', data.index) freq = data.index.inferred_freq freq = to_offset(freq) if freq is None: freq = _get_ax_freq(ax) if freq is None: raise ValueError('Could not get frequency alias for plotting') freq_str = _get_period_alias(freq) with warnings.catch_warnings(): warnings.filterwarnings('ignore', 'PeriodDtype\\[B\\] is deprecated', category=FutureWarning) if isinstance(data.index, ABCDatetimeIndex): data = data.tz_localize(None).to_period(freq=freq_str) elif isinstance(data.index, ABCPeriodIndex): data.index = data.index.asfreq(freq=freq_str, how='start') return data def _format_coord(freq, t, y) -> str: time_period = Period(ordinal=int(t), freq=freq) return f't = {time_period} y = {y:8f}' def format_dateaxis(subplot, freq: BaseOffset, index: DatetimeIndex | PeriodIndex) -> None: import matplotlib.pyplot as plt if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_locator(majlocator) subplot.xaxis.set_minor_locator(minlocator) majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_formatter(majformatter) subplot.xaxis.set_minor_formatter(minformatter) subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter(TimeSeries_TimedeltaFormatter()) else: raise TypeError('index type not supported') plt.draw_if_interactive() # File: pandas-main/pandas/plotting/_matplotlib/tools.py from __future__ import annotations from math import ceil from typing import TYPE_CHECKING import warnings import matplotlib as mpl import numpy as np from pandas.util._exceptions import find_stack_level from pandas.core.dtypes.common import is_list_like from pandas.core.dtypes.generic import ABCDataFrame, ABCIndex, ABCSeries if TYPE_CHECKING: from collections.abc import Generator, Iterable from matplotlib.axes import Axes from matplotlib.axis import Axis from matplotlib.figure import Figure from matplotlib.lines import Line2D from matplotlib.table import Table from pandas import DataFrame, Series def do_adjust_figure(fig: Figure) -> bool: if not hasattr(fig, 'get_constrained_layout'): return False return not fig.get_constrained_layout() def maybe_adjust_figure(fig: Figure, *args, **kwargs) -> None: if do_adjust_figure(fig): fig.subplots_adjust(*args, **kwargs) def format_date_labels(ax: Axes, rot) -> None: for label in ax.get_xticklabels(): label.set_horizontalalignment('right') label.set_rotation(rot) fig = ax.get_figure() if fig is not None: maybe_adjust_figure(fig, bottom=0.2) def table(ax, data: DataFrame | Series, rowLabels=None, colLabels=None, **kwargs) -> Table: if isinstance(data, ABCSeries): data = data.to_frame() elif isinstance(data, ABCDataFrame): pass else: raise ValueError('Input data must be DataFrame or Series') if rowLabels is None: rowLabels = data.index if colLabels is None: colLabels = data.columns cellText = data.values return mpl.table.table(ax, cellText=cellText, rowLabels=rowLabels, colLabels=colLabels, **kwargs) def _get_layout(nplots: int, layout: tuple[int, int] | None=None, layout_type: str='box') -> tuple[int, int]: if layout is not None: if not isinstance(layout, (tuple, list)) or len(layout) != 2: raise ValueError('Layout must be a tuple of (rows, columns)') (nrows, ncols) = layout if nrows == -1 and ncols > 0: layout = (ceil(nplots / ncols), ncols) elif ncols == -1 and nrows > 0: layout = (nrows, ceil(nplots / nrows)) elif ncols <= 0 and nrows <= 0: msg = 'At least one dimension of layout must be positive' raise ValueError(msg) (nrows, ncols) = layout if nrows * ncols < nplots: raise ValueError(f'Layout of {nrows}x{ncols} must be larger than required size {nplots}') return layout if layout_type == 'single': return (1, 1) elif layout_type == 'horizontal': return (1, nplots) elif layout_type == 'vertical': return (nplots, 1) layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)} try: return layouts[nplots] except KeyError: k = 1 while k ** 2 < nplots: k += 1 if (k - 1) * k >= nplots: return (k, k - 1) else: return (k, k) def create_subplots(naxes: int, sharex: bool=False, sharey: bool=False, squeeze: bool=True, subplot_kw=None, ax=None, layout=None, layout_type: str='box', **fig_kw): import matplotlib.pyplot as plt if subplot_kw is None: subplot_kw = {} if ax is None: fig = plt.figure(**fig_kw) else: if is_list_like(ax): if squeeze: ax = np.fromiter(flatten_axes(ax), dtype=object) if layout is not None: warnings.warn('When passing multiple axes, layout keyword is ignored.', UserWarning, stacklevel=find_stack_level()) if sharex or sharey: warnings.warn('When passing multiple axes, sharex and sharey are ignored. These settings must be specified when creating axes.', UserWarning, stacklevel=find_stack_level()) if ax.size == naxes: fig = ax.flat[0].get_figure() return (fig, ax) else: raise ValueError(f'The number of passed axes must be {naxes}, the same as the output plot') fig = ax.get_figure() if naxes == 1: if squeeze: return (fig, ax) else: return (fig, np.fromiter(flatten_axes(ax), dtype=object)) else: warnings.warn('To output multiple subplots, the figure containing the passed axes is being cleared.', UserWarning, stacklevel=find_stack_level()) fig.clear() (nrows, ncols) = _get_layout(naxes, layout=layout, layout_type=layout_type) nplots = nrows * ncols axarr = np.empty(nplots, dtype=object) ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw) if sharex: subplot_kw['sharex'] = ax0 if sharey: subplot_kw['sharey'] = ax0 axarr[0] = ax0 for i in range(1, nplots): kwds = subplot_kw.copy() if i >= naxes: kwds['sharex'] = None kwds['sharey'] = None ax = fig.add_subplot(nrows, ncols, i + 1, **kwds) axarr[i] = ax if naxes != nplots: for ax in axarr[naxes:]: ax.set_visible(False) handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey) if squeeze: if nplots == 1: axes = axarr[0] else: axes = axarr.reshape(nrows, ncols).squeeze() else: axes = axarr.reshape(nrows, ncols) return (fig, axes) def _remove_labels_from_axis(axis: Axis) -> None: for t in axis.get_majorticklabels(): t.set_visible(False) if isinstance(axis.get_minor_locator(), mpl.ticker.NullLocator): axis.set_minor_locator(mpl.ticker.AutoLocator()) if isinstance(axis.get_minor_formatter(), mpl.ticker.NullFormatter): axis.set_minor_formatter(mpl.ticker.FormatStrFormatter('')) for t in axis.get_minorticklabels(): t.set_visible(False) axis.get_label().set_visible(False) def _has_externally_shared_axis(ax1: Axes, compare_axis: str) -> bool: if compare_axis == 'x': axes = ax1.get_shared_x_axes() elif compare_axis == 'y': axes = ax1.get_shared_y_axes() else: raise ValueError("_has_externally_shared_axis() needs 'x' or 'y' as a second parameter") axes_siblings = axes.get_siblings(ax1) ax1_points = ax1.get_position().get_points() for ax2 in axes_siblings: if not np.array_equal(ax1_points, ax2.get_position().get_points()): return True return False def handle_shared_axes(axarr: Iterable[Axes], nplots: int, naxes: int, nrows: int, ncols: int, sharex: bool, sharey: bool) -> None: if nplots > 1: row_num = lambda x: x.get_subplotspec().rowspan.start col_num = lambda x: x.get_subplotspec().colspan.start is_first_col = lambda x: x.get_subplotspec().is_first_col() if nrows > 1: try: layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool_) for ax in axarr: layout[row_num(ax), col_num(ax)] = ax.get_visible() for ax in axarr: if not layout[row_num(ax) + 1, col_num(ax)]: continue if sharex or _has_externally_shared_axis(ax, 'x'): _remove_labels_from_axis(ax.xaxis) except IndexError: is_last_row = lambda x: x.get_subplotspec().is_last_row() for ax in axarr: if is_last_row(ax): continue if sharex or _has_externally_shared_axis(ax, 'x'): _remove_labels_from_axis(ax.xaxis) if ncols > 1: for ax in axarr: if is_first_col(ax): continue if sharey or _has_externally_shared_axis(ax, 'y'): _remove_labels_from_axis(ax.yaxis) def flatten_axes(axes: Axes | Iterable[Axes]) -> Generator[Axes, None, None]: if not is_list_like(axes): yield axes elif isinstance(axes, (np.ndarray, ABCIndex)): yield from np.asarray(axes).reshape(-1) else: yield from axes def set_ticks_props(axes: Axes | Iterable[Axes], xlabelsize: int | None=None, xrot=None, ylabelsize: int | None=None, yrot=None): for ax in flatten_axes(axes): if xlabelsize is not None: mpl.artist.setp(ax.get_xticklabels(), fontsize=xlabelsize) if xrot is not None: mpl.artist.setp(ax.get_xticklabels(), rotation=xrot) if ylabelsize is not None: mpl.artist.setp(ax.get_yticklabels(), fontsize=ylabelsize) if yrot is not None: mpl.artist.setp(ax.get_yticklabels(), rotation=yrot) return axes def get_all_lines(ax: Axes) -> list[Line2D]: lines = ax.get_lines() if hasattr(ax, 'right_ax'): lines += ax.right_ax.get_lines() if hasattr(ax, 'left_ax'): lines += ax.left_ax.get_lines() return lines def get_xlim(lines: Iterable[Line2D]) -> tuple[float, float]: (left, right) = (np.inf, -np.inf) for line in lines: x = line.get_xdata(orig=False) left = min(np.nanmin(x), left) right = max(np.nanmax(x), right) return (left, right) # File: pandas-main/pandas/plotting/_misc.py from __future__ import annotations from contextlib import contextmanager from typing import TYPE_CHECKING, Any from pandas.plotting._core import _get_plot_backend if TYPE_CHECKING: from collections.abc import Generator, Mapping from matplotlib.axes import Axes from matplotlib.colors import Colormap from matplotlib.figure import Figure from matplotlib.table import Table import numpy as np from pandas import DataFrame, Series def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table: plot_backend = _get_plot_backend('matplotlib') return plot_backend.table(ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs) def register() -> None: plot_backend = _get_plot_backend('matplotlib') plot_backend.register() def deregister() -> None: plot_backend = _get_plot_backend('matplotlib') plot_backend.deregister() def scatter_matrix(frame: DataFrame, alpha: float=0.5, figsize: tuple[float, float] | None=None, ax: Axes | None=None, grid: bool=False, diagonal: str='hist', marker: str='.', density_kwds: Mapping[str, Any] | None=None, hist_kwds: Mapping[str, Any] | None=None, range_padding: float=0.05, **kwargs) -> np.ndarray: plot_backend = _get_plot_backend('matplotlib') return plot_backend.scatter_matrix(frame=frame, alpha=alpha, figsize=figsize, ax=ax, grid=grid, diagonal=diagonal, marker=marker, density_kwds=density_kwds, hist_kwds=hist_kwds, range_padding=range_padding, **kwargs) def radviz(frame: DataFrame, class_column: str, ax: Axes | None=None, color: list[str] | tuple[str, ...] | None=None, colormap: Colormap | str | None=None, **kwds) -> Axes: plot_backend = _get_plot_backend('matplotlib') return plot_backend.radviz(frame=frame, class_column=class_column, ax=ax, color=color, colormap=colormap, **kwds) def andrews_curves(frame: DataFrame, class_column: str, ax: Axes | None=None, samples: int=200, color: list[str] | tuple[str, ...] | None=None, colormap: Colormap | str | None=None, **kwargs) -> Axes: plot_backend = _get_plot_backend('matplotlib') return plot_backend.andrews_curves(frame=frame, class_column=class_column, ax=ax, samples=samples, color=color, colormap=colormap, **kwargs) def bootstrap_plot(series: Series, fig: Figure | None=None, size: int=50, samples: int=500, **kwds) -> Figure: plot_backend = _get_plot_backend('matplotlib') return plot_backend.bootstrap_plot(series=series, fig=fig, size=size, samples=samples, **kwds) def parallel_coordinates(frame: DataFrame, class_column: str, cols: list[str] | None=None, ax: Axes | None=None, color: list[str] | tuple[str, ...] | None=None, use_columns: bool=False, xticks: list | tuple | None=None, colormap: Colormap | str | None=None, axvlines: bool=True, axvlines_kwds: Mapping[str, Any] | None=None, sort_labels: bool=False, **kwargs) -> Axes: plot_backend = _get_plot_backend('matplotlib') return plot_backend.parallel_coordinates(frame=frame, class_column=class_column, cols=cols, ax=ax, color=color, use_columns=use_columns, xticks=xticks, colormap=colormap, axvlines=axvlines, axvlines_kwds=axvlines_kwds, sort_labels=sort_labels, **kwargs) def lag_plot(series: Series, lag: int=1, ax: Axes | None=None, **kwds) -> Axes: plot_backend = _get_plot_backend('matplotlib') return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds) def autocorrelation_plot(series: Series, ax: Axes | None=None, **kwargs) -> Axes: plot_backend = _get_plot_backend('matplotlib') return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs) class _Options(dict): _ALIASES = {'x_compat': 'xaxis.compat'} _DEFAULT_KEYS = ['xaxis.compat'] def __init__(self) -> None: super().__setitem__('xaxis.compat', False) def __getitem__(self, key): key = self._get_canonical_key(key) if key not in self: raise ValueError(f'{key} is not a valid pandas plotting option') return super().__getitem__(key) def __setitem__(self, key, value) -> None: key = self._get_canonical_key(key) super().__setitem__(key, value) def __delitem__(self, key) -> None: key = self._get_canonical_key(key) if key in self._DEFAULT_KEYS: raise ValueError(f'Cannot remove default parameter {key}') super().__delitem__(key) def __contains__(self, key) -> bool: key = self._get_canonical_key(key) return super().__contains__(key) def reset(self) -> None: self.__init__() def _get_canonical_key(self, key: str) -> str: return self._ALIASES.get(key, key) @contextmanager def use(self, key, value) -> Generator[_Options, None, None]: old_value = self[key] try: self[key] = value yield self finally: self[key] = old_value plot_params = _Options() # File: pandas-main/pandas/tseries/frequencies.py from __future__ import annotations from typing import TYPE_CHECKING import numpy as np from pandas._libs import lib from pandas._libs.algos import unique_deltas from pandas._libs.tslibs import Timestamp, get_unit_from_dtype, periods_per_day, tz_convert_from_utc from pandas._libs.tslibs.ccalendar import DAYS, MONTH_ALIASES, MONTH_NUMBERS, MONTHS, int_to_weekday from pandas._libs.tslibs.dtypes import OFFSET_TO_PERIOD_FREQSTR from pandas._libs.tslibs.fields import build_field_sarray, month_position_check from pandas._libs.tslibs.offsets import DateOffset, Day, to_offset from pandas._libs.tslibs.parsing import get_rule_month from pandas.util._decorators import cache_readonly from pandas.core.dtypes.common import is_numeric_dtype from pandas.core.dtypes.dtypes import DatetimeTZDtype, PeriodDtype from pandas.core.dtypes.generic import ABCIndex, ABCSeries from pandas.core.algorithms import unique if TYPE_CHECKING: from pandas._typing import npt from pandas import DatetimeIndex, Series, TimedeltaIndex from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin _need_suffix = ['QS', 'BQE', 'BQS', 'YS', 'BYE', 'BYS'] for _prefix in _need_suffix: for _m in MONTHS: key = f'{_prefix}-{_m}' OFFSET_TO_PERIOD_FREQSTR[key] = OFFSET_TO_PERIOD_FREQSTR[_prefix] for _prefix in ['Y', 'Q']: for _m in MONTHS: _alias = f'{_prefix}-{_m}' OFFSET_TO_PERIOD_FREQSTR[_alias] = _alias for _d in DAYS: OFFSET_TO_PERIOD_FREQSTR[f'W-{_d}'] = f'W-{_d}' def get_period_alias(offset_str: str) -> str | None: return OFFSET_TO_PERIOD_FREQSTR.get(offset_str, None) def infer_freq(index: DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin) -> str | None: from pandas.core.api import DatetimeIndex if isinstance(index, ABCSeries): values = index._values if not (lib.is_np_dtype(values.dtype, 'mM') or isinstance(values.dtype, DatetimeTZDtype) or values.dtype == object): raise TypeError(f'cannot infer freq from a non-convertible dtype on a Series of {index.dtype}') index = values inferer: _FrequencyInferer if not hasattr(index, 'dtype'): pass elif isinstance(index.dtype, PeriodDtype): raise TypeError('PeriodIndex given. Check the `freq` attribute instead of using infer_freq.') elif lib.is_np_dtype(index.dtype, 'm'): inferer = _TimedeltaFrequencyInferer(index) return inferer.get_freq() elif is_numeric_dtype(index.dtype): raise TypeError(f'cannot infer freq from a non-convertible index of dtype {index.dtype}') if not isinstance(index, DatetimeIndex): index = DatetimeIndex(index) inferer = _FrequencyInferer(index) return inferer.get_freq() class _FrequencyInferer: def __init__(self, index) -> None: self.index = index self.i8values = index.asi8 if isinstance(index, ABCIndex): self._creso = get_unit_from_dtype(index._data._ndarray.dtype) else: self._creso = get_unit_from_dtype(index._ndarray.dtype) if hasattr(index, 'tz'): if index.tz is not None: self.i8values = tz_convert_from_utc(self.i8values, index.tz, reso=self._creso) if len(index) < 3: raise ValueError('Need at least 3 dates to infer frequency') self.is_monotonic = self.index._is_monotonic_increasing or self.index._is_monotonic_decreasing @cache_readonly def deltas(self) -> npt.NDArray[np.int64]: return unique_deltas(self.i8values) @cache_readonly def deltas_asi8(self) -> npt.NDArray[np.int64]: return unique_deltas(self.index.asi8) @cache_readonly def is_unique(self) -> bool: return len(self.deltas) == 1 @cache_readonly def is_unique_asi8(self) -> bool: return len(self.deltas_asi8) == 1 def get_freq(self) -> str | None: if not self.is_monotonic or not self.index._is_unique: return None delta = self.deltas[0] ppd = periods_per_day(self._creso) if delta and _is_multiple(delta, ppd): return self._infer_daily_rule() if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]): return 'bh' if not self.is_unique_asi8: return None delta = self.deltas_asi8[0] pph = ppd // 24 ppm = pph // 60 pps = ppm // 60 if _is_multiple(delta, pph): return _maybe_add_count('h', delta / pph) elif _is_multiple(delta, ppm): return _maybe_add_count('min', delta / ppm) elif _is_multiple(delta, pps): return _maybe_add_count('s', delta / pps) elif _is_multiple(delta, pps // 1000): return _maybe_add_count('ms', delta / (pps // 1000)) elif _is_multiple(delta, pps // 1000000): return _maybe_add_count('us', delta / (pps // 1000000)) else: return _maybe_add_count('ns', delta) @cache_readonly def day_deltas(self) -> list[int]: ppd = periods_per_day(self._creso) return [x / ppd for x in self.deltas] @cache_readonly def hour_deltas(self) -> list[int]: pph = periods_per_day(self._creso) // 24 return [x / pph for x in self.deltas] @cache_readonly def fields(self) -> np.ndarray: return build_field_sarray(self.i8values, reso=self._creso) @cache_readonly def rep_stamp(self) -> Timestamp: return Timestamp(self.i8values[0], unit=self.index.unit) def month_position_check(self) -> str | None: return month_position_check(self.fields, self.index.dayofweek) @cache_readonly def mdiffs(self) -> npt.NDArray[np.int64]: nmonths = self.fields['Y'] * 12 + self.fields['M'] return unique_deltas(nmonths.astype('i8')) @cache_readonly def ydiffs(self) -> npt.NDArray[np.int64]: return unique_deltas(self.fields['Y'].astype('i8')) def _infer_daily_rule(self) -> str | None: annual_rule = self._get_annual_rule() if annual_rule: nyears = self.ydiffs[0] month = MONTH_ALIASES[self.rep_stamp.month] alias = f'{annual_rule}-{month}' return _maybe_add_count(alias, nyears) quarterly_rule = self._get_quarterly_rule() if quarterly_rule: nquarters = self.mdiffs[0] / 3 mod_dict = {0: 12, 2: 11, 1: 10} month = MONTH_ALIASES[mod_dict[self.rep_stamp.month % 3]] alias = f'{quarterly_rule}-{month}' return _maybe_add_count(alias, nquarters) monthly_rule = self._get_monthly_rule() if monthly_rule: return _maybe_add_count(monthly_rule, self.mdiffs[0]) if self.is_unique: return self._get_daily_rule() if self._is_business_daily(): return 'B' wom_rule = self._get_wom_rule() if wom_rule: return wom_rule return None def _get_daily_rule(self) -> str | None: ppd = periods_per_day(self._creso) days = self.deltas[0] / ppd if days % 7 == 0: wd = int_to_weekday[self.rep_stamp.weekday()] alias = f'W-{wd}' return _maybe_add_count(alias, days / 7) else: return _maybe_add_count('D', days) def _get_annual_rule(self) -> str | None: if len(self.ydiffs) > 1: return None if len(unique(self.fields['M'])) > 1: return None pos_check = self.month_position_check() if pos_check is None: return None else: return {'cs': 'YS', 'bs': 'BYS', 'ce': 'YE', 'be': 'BYE'}.get(pos_check) def _get_quarterly_rule(self) -> str | None: if len(self.mdiffs) > 1: return None if not self.mdiffs[0] % 3 == 0: return None pos_check = self.month_position_check() if pos_check is None: return None else: return {'cs': 'QS', 'bs': 'BQS', 'ce': 'QE', 'be': 'BQE'}.get(pos_check) def _get_monthly_rule(self) -> str | None: if len(self.mdiffs) > 1: return None pos_check = self.month_position_check() if pos_check is None: return None else: return {'cs': 'MS', 'bs': 'BMS', 'ce': 'ME', 'be': 'BME'}.get(pos_check) def _is_business_daily(self) -> bool: if self.day_deltas != [1, 3]: return False first_weekday = self.index[0].weekday() shifts = np.diff(self.i8values) ppd = periods_per_day(self._creso) shifts = np.floor_divide(shifts, ppd) weekdays = np.mod(first_weekday + np.cumsum(shifts), 7) return bool(np.all((weekdays == 0) & (shifts == 3) | (weekdays > 0) & (weekdays <= 4) & (shifts == 1))) def _get_wom_rule(self) -> str | None: weekdays = unique(self.index.weekday) if len(weekdays) > 1: return None week_of_months = unique((self.index.day - 1) // 7) week_of_months = week_of_months[week_of_months < 4] if len(week_of_months) == 0 or len(week_of_months) > 1: return None week = week_of_months[0] + 1 wd = int_to_weekday[weekdays[0]] return f'WOM-{week}{wd}' class _TimedeltaFrequencyInferer(_FrequencyInferer): def _infer_daily_rule(self): if self.is_unique: return self._get_daily_rule() def _is_multiple(us, mult: int) -> bool: return us % mult == 0 def _maybe_add_count(base: str, count: float) -> str: if count != 1: assert count == int(count) count = int(count) return f'{count}{base}' else: return base def is_subperiod(source, target) -> bool: if target is None or source is None: return False source = _maybe_coerce_freq(source) target = _maybe_coerce_freq(target) if _is_annual(target): if _is_quarterly(source): return _quarter_months_conform(get_rule_month(source), get_rule_month(target)) return source in {'D', 'C', 'B', 'M', 'h', 'min', 's', 'ms', 'us', 'ns'} elif _is_quarterly(target): return source in {'D', 'C', 'B', 'M', 'h', 'min', 's', 'ms', 'us', 'ns'} elif _is_monthly(target): return source in {'D', 'C', 'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif _is_weekly(target): return source in {target, 'D', 'C', 'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif target == 'B': return source in {'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif target == 'C': return source in {'C', 'h', 'min', 's', 'ms', 'us', 'ns'} elif target == 'D': return source in {'D', 'h', 'min', 's', 'ms', 'us', 'ns'} elif target == 'h': return source in {'h', 'min', 's', 'ms', 'us', 'ns'} elif target == 'min': return source in {'min', 's', 'ms', 'us', 'ns'} elif target == 's': return source in {'s', 'ms', 'us', 'ns'} elif target == 'ms': return source in {'ms', 'us', 'ns'} elif target == 'us': return source in {'us', 'ns'} elif target == 'ns': return source in {'ns'} else: return False def is_superperiod(source, target) -> bool: if target is None or source is None: return False source = _maybe_coerce_freq(source) target = _maybe_coerce_freq(target) if _is_annual(source): if _is_annual(target): return get_rule_month(source) == get_rule_month(target) if _is_quarterly(target): smonth = get_rule_month(source) tmonth = get_rule_month(target) return _quarter_months_conform(smonth, tmonth) return target in {'D', 'C', 'B', 'M', 'h', 'min', 's', 'ms', 'us', 'ns'} elif _is_quarterly(source): return target in {'D', 'C', 'B', 'M', 'h', 'min', 's', 'ms', 'us', 'ns'} elif _is_monthly(source): return target in {'D', 'C', 'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif _is_weekly(source): return target in {source, 'D', 'C', 'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif source == 'B': return target in {'D', 'C', 'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif source == 'C': return target in {'D', 'C', 'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif source == 'D': return target in {'D', 'C', 'B', 'h', 'min', 's', 'ms', 'us', 'ns'} elif source == 'h': return target in {'h', 'min', 's', 'ms', 'us', 'ns'} elif source == 'min': return target in {'min', 's', 'ms', 'us', 'ns'} elif source == 's': return target in {'s', 'ms', 'us', 'ns'} elif source == 'ms': return target in {'ms', 'us', 'ns'} elif source == 'us': return target in {'us', 'ns'} elif source == 'ns': return target in {'ns'} else: return False def _maybe_coerce_freq(code) -> str: assert code is not None if isinstance(code, DateOffset): code = PeriodDtype(to_offset(code.name))._freqstr if code in {'h', 'min', 's', 'ms', 'us', 'ns'}: return code else: return code.upper() def _quarter_months_conform(source: str, target: str) -> bool: snum = MONTH_NUMBERS[source] tnum = MONTH_NUMBERS[target] return snum % 3 == tnum % 3 def _is_annual(rule: str) -> bool: rule = rule.upper() return rule == 'Y' or rule.startswith('Y-') def _is_quarterly(rule: str) -> bool: rule = rule.upper() return rule == 'Q' or rule.startswith(('Q-', 'BQ')) def _is_monthly(rule: str) -> bool: rule = rule.upper() return rule in ('M', 'BM') def _is_weekly(rule: str) -> bool: rule = rule.upper() return rule == 'W' or rule.startswith('W-') __all__ = ['Day', 'get_period_alias', 'infer_freq', 'is_subperiod', 'is_superperiod', 'to_offset'] # File: pandas-main/pandas/tseries/holiday.py from __future__ import annotations from datetime import datetime, timedelta from typing import TYPE_CHECKING import warnings from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE import numpy as np from pandas._libs.tslibs.offsets import BaseOffset from pandas.errors import PerformanceWarning from pandas import DateOffset, DatetimeIndex, Series, Timestamp, concat, date_range from pandas.tseries.offsets import Day, Easter if TYPE_CHECKING: from collections.abc import Callable def next_monday(dt: datetime) -> datetime: if dt.weekday() == 5: return dt + timedelta(2) elif dt.weekday() == 6: return dt + timedelta(1) return dt def next_monday_or_tuesday(dt: datetime) -> datetime: dow = dt.weekday() if dow in (5, 6): return dt + timedelta(2) if dow == 0: return dt + timedelta(1) return dt def previous_friday(dt: datetime) -> datetime: if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt - timedelta(2) return dt def sunday_to_monday(dt: datetime) -> datetime: if dt.weekday() == 6: return dt + timedelta(1) return dt def weekend_to_monday(dt: datetime) -> datetime: if dt.weekday() == 6: return dt + timedelta(1) elif dt.weekday() == 5: return dt + timedelta(2) return dt def nearest_workday(dt: datetime) -> datetime: if dt.weekday() == 5: return dt - timedelta(1) elif dt.weekday() == 6: return dt + timedelta(1) return dt def next_workday(dt: datetime) -> datetime: dt += timedelta(days=1) while dt.weekday() > 4: dt += timedelta(days=1) return dt def previous_workday(dt: datetime) -> datetime: dt -= timedelta(days=1) while dt.weekday() > 4: dt -= timedelta(days=1) return dt def before_nearest_workday(dt: datetime) -> datetime: return previous_workday(nearest_workday(dt)) def after_nearest_workday(dt: datetime) -> datetime: return next_workday(nearest_workday(dt)) class Holiday: start_date: Timestamp | None end_date: Timestamp | None days_of_week: tuple[int, ...] | None def __init__(self, name: str, year=None, month=None, day=None, offset: BaseOffset | list[BaseOffset] | None=None, observance: Callable | None=None, start_date=None, end_date=None, days_of_week: tuple | None=None) -> None: if offset is not None: if observance is not None: raise NotImplementedError('Cannot use both offset and observance.') if not (isinstance(offset, BaseOffset) or (isinstance(offset, list) and all((isinstance(off, BaseOffset) for off in offset)))): raise ValueError('Only BaseOffsets and flat lists of them are supported for offset.') self.name = name self.year = year self.month = month self.day = day self.offset = offset self.start_date = Timestamp(start_date) if start_date is not None else start_date self.end_date = Timestamp(end_date) if end_date is not None else end_date self.observance = observance assert days_of_week is None or type(days_of_week) == tuple self.days_of_week = days_of_week def __repr__(self) -> str: info = '' if self.year is not None: info += f'year={self.year}, ' info += f'month={self.month}, day={self.day}, ' if self.offset is not None: info += f'offset={self.offset}' if self.observance is not None: info += f'observance={self.observance}' repr = f'Holiday: {self.name} ({info})' return repr def dates(self, start_date, end_date, return_name: bool=False) -> Series | DatetimeIndex: start_date = Timestamp(start_date) end_date = Timestamp(end_date) filter_start_date = start_date filter_end_date = end_date if self.year is not None: dt = Timestamp(datetime(self.year, self.month, self.day)) dti = DatetimeIndex([dt]) if return_name: return Series(self.name, index=dti) else: return dti dates = self._reference_dates(start_date, end_date) holiday_dates = self._apply_rule(dates) if self.days_of_week is not None: holiday_dates = holiday_dates[np.isin(holiday_dates.dayofweek, self.days_of_week).ravel()] if self.start_date is not None: filter_start_date = max(self.start_date.tz_localize(filter_start_date.tz), filter_start_date) if self.end_date is not None: filter_end_date = min(self.end_date.tz_localize(filter_end_date.tz), filter_end_date) holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)] if return_name: return Series(self.name, index=holiday_dates) return holiday_dates def _reference_dates(self, start_date: Timestamp, end_date: Timestamp) -> DatetimeIndex: if self.start_date is not None: start_date = self.start_date.tz_localize(start_date.tz) if self.end_date is not None: end_date = self.end_date.tz_localize(start_date.tz) year_offset = DateOffset(years=1) reference_start_date = Timestamp(datetime(start_date.year - 1, self.month, self.day)) reference_end_date = Timestamp(datetime(end_date.year + 1, self.month, self.day)) dates = date_range(start=reference_start_date, end=reference_end_date, freq=year_offset, tz=start_date.tz) return dates def _apply_rule(self, dates: DatetimeIndex) -> DatetimeIndex: if dates.empty: return dates.copy() if self.observance is not None: return dates.map(lambda d: self.observance(d)) if self.offset is not None: if not isinstance(self.offset, list): offsets = [self.offset] else: offsets = self.offset for offset in offsets: with warnings.catch_warnings(): warnings.simplefilter('ignore', PerformanceWarning) dates += offset return dates holiday_calendars = {} def register(cls) -> None: try: name = cls.name except AttributeError: name = cls.__name__ holiday_calendars[name] = cls def get_calendar(name: str) -> AbstractHolidayCalendar: return holiday_calendars[name]() class HolidayCalendarMetaClass(type): def __new__(cls, clsname: str, bases, attrs): calendar_class = super().__new__(cls, clsname, bases, attrs) register(calendar_class) return calendar_class class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass): rules: list[Holiday] = [] start_date = Timestamp(datetime(1970, 1, 1)) end_date = Timestamp(datetime(2200, 12, 31)) _cache = None def __init__(self, name: str='', rules=None) -> None: super().__init__() if not name: name = type(self).__name__ self.name = name if rules is not None: self.rules = rules def rule_from_name(self, name: str) -> Holiday | None: for rule in self.rules: if rule.name == name: return rule return None def holidays(self, start=None, end=None, return_name: bool=False): if self.rules is None: raise Exception(f'Holiday Calendar {self.name} does not have any rules specified') if start is None: start = AbstractHolidayCalendar.start_date if end is None: end = AbstractHolidayCalendar.end_date start = Timestamp(start) end = Timestamp(end) if self._cache is None or start < self._cache[0] or end > self._cache[1]: pre_holidays = [rule.dates(start, end, return_name=True) for rule in self.rules] if pre_holidays: holidays = concat(pre_holidays) else: holidays = Series(index=DatetimeIndex([]), dtype=object) self._cache = (start, end, holidays.sort_index()) holidays = self._cache[2] holidays = holidays[start:end] if return_name: return holidays else: return holidays.index @staticmethod def merge_class(base, other): try: other = other.rules except AttributeError: pass if not isinstance(other, list): other = [other] other_holidays = {holiday.name: holiday for holiday in other} try: base = base.rules except AttributeError: pass if not isinstance(base, list): base = [base] base_holidays = {holiday.name: holiday for holiday in base} other_holidays.update(base_holidays) return list(other_holidays.values()) def merge(self, other, inplace: bool=False): holidays = self.merge_class(self, other) if inplace: self.rules = holidays else: return holidays USMemorialDay = Holiday('Memorial Day', month=5, day=31, offset=DateOffset(weekday=MO(-1))) USLaborDay = Holiday('Labor Day', month=9, day=1, offset=DateOffset(weekday=MO(1))) USColumbusDay = Holiday('Columbus Day', month=10, day=1, offset=DateOffset(weekday=MO(2))) USThanksgivingDay = Holiday('Thanksgiving Day', month=11, day=1, offset=DateOffset(weekday=TH(4))) USMartinLutherKingJr = Holiday('Birthday of Martin Luther King, Jr.', start_date=datetime(1986, 1, 1), month=1, day=1, offset=DateOffset(weekday=MO(3))) USPresidentsDay = Holiday("Washington's Birthday", month=2, day=1, offset=DateOffset(weekday=MO(3))) GoodFriday = Holiday('Good Friday', month=1, day=1, offset=[Easter(), Day(-2)]) EasterMonday = Holiday('Easter Monday', month=1, day=1, offset=[Easter(), Day(1)]) class USFederalHolidayCalendar(AbstractHolidayCalendar): rules = [Holiday("New Year's Day", month=1, day=1, observance=nearest_workday), USMartinLutherKingJr, USPresidentsDay, USMemorialDay, Holiday('Juneteenth National Independence Day', month=6, day=19, start_date='2021-06-18', observance=nearest_workday), Holiday('Independence Day', month=7, day=4, observance=nearest_workday), USLaborDay, USColumbusDay, Holiday('Veterans Day', month=11, day=11, observance=nearest_workday), USThanksgivingDay, Holiday('Christmas Day', month=12, day=25, observance=nearest_workday)] def HolidayCalendarFactory(name: str, base, other, base_class=AbstractHolidayCalendar): rules = AbstractHolidayCalendar.merge_class(base, other) calendar_class = type(name, (base_class,), {'rules': rules, 'name': name}) return calendar_class __all__ = ['after_nearest_workday', 'before_nearest_workday', 'FR', 'get_calendar', 'HolidayCalendarFactory', 'MO', 'nearest_workday', 'next_monday', 'next_monday_or_tuesday', 'next_workday', 'previous_friday', 'previous_workday', 'register', 'SA', 'SU', 'sunday_to_monday', 'TH', 'TU', 'WE', 'weekend_to_monday'] # File: pandas-main/pandas/tseries/offsets.py from __future__ import annotations from pandas._libs.tslibs.offsets import FY5253, BaseOffset, BDay, BMonthBegin, BMonthEnd, BQuarterBegin, BQuarterEnd, BusinessDay, BusinessHour, BusinessMonthBegin, BusinessMonthEnd, BYearBegin, BYearEnd, CBMonthBegin, CBMonthEnd, CDay, CustomBusinessDay, CustomBusinessHour, CustomBusinessMonthBegin, CustomBusinessMonthEnd, DateOffset, Day, Easter, FY5253Quarter, Hour, LastWeekOfMonth, Micro, Milli, Minute, MonthBegin, MonthEnd, Nano, QuarterBegin, QuarterEnd, Second, SemiMonthBegin, SemiMonthEnd, Tick, Week, WeekOfMonth, YearBegin, YearEnd __all__ = ['Day', 'BaseOffset', 'BusinessDay', 'BusinessMonthBegin', 'BusinessMonthEnd', 'BDay', 'CustomBusinessDay', 'CustomBusinessMonthBegin', 'CustomBusinessMonthEnd', 'CDay', 'CBMonthEnd', 'CBMonthBegin', 'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd', 'SemiMonthEnd', 'SemiMonthBegin', 'BusinessHour', 'CustomBusinessHour', 'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd', 'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd', 'LastWeekOfMonth', 'FY5253Quarter', 'FY5253', 'Week', 'WeekOfMonth', 'Easter', 'Tick', 'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano', 'DateOffset'] # File: pandas-main/pandas/util/__init__.py def __getattr__(key: str): if key == 'hash_array': from pandas.core.util.hashing import hash_array return hash_array if key == 'hash_pandas_object': from pandas.core.util.hashing import hash_pandas_object return hash_pandas_object if key == 'Appender': from pandas.util._decorators import Appender return Appender if key == 'Substitution': from pandas.util._decorators import Substitution return Substitution if key == 'cache_readonly': from pandas.util._decorators import cache_readonly return cache_readonly raise AttributeError(f"module 'pandas.util' has no attribute '{key}'") def __dir__() -> list[str]: return list(globals().keys()) + ['hash_array', 'hash_pandas_object'] # File: pandas-main/pandas/util/_decorators.py from __future__ import annotations from functools import wraps import inspect from textwrap import dedent from typing import TYPE_CHECKING, Any, cast import warnings from pandas._libs.properties import cache_readonly from pandas._typing import F, T from pandas.util._exceptions import find_stack_level if TYPE_CHECKING: from collections.abc import Callable, Mapping def deprecate(name: str, alternative: Callable[..., Any], version: str, alt_name: str | None=None, klass: type[Warning] | None=None, stacklevel: int=2, msg: str | None=None) -> Callable[[F], F]: alt_name = alt_name or alternative.__name__ klass = klass or FutureWarning warning_msg = msg or f'{name} is deprecated, use {alt_name} instead.' @wraps(alternative) def wrapper(*args, **kwargs) -> Callable[..., Any]: warnings.warn(warning_msg, klass, stacklevel=stacklevel) return alternative(*args, **kwargs) msg = msg or f'Use `{alt_name}` instead.' doc_error_msg = f'deprecate needs a correctly formatted docstring in the target function (should have a one liner short summary, and opening quotes should be in their own line). Found:\n{alternative.__doc__}' if alternative.__doc__: if alternative.__doc__.count('\n') < 3: raise AssertionError(doc_error_msg) (empty1, summary, empty2, doc_string) = alternative.__doc__.split('\n', 3) if empty1 or (empty2 and (not summary)): raise AssertionError(doc_error_msg) wrapper.__doc__ = dedent(f'\n {summary.strip()}\n\n .. deprecated:: {version}\n {msg}\n\n {dedent(doc_string)}') return wrapper def deprecate_kwarg(old_arg_name: str, new_arg_name: str | None, mapping: Mapping[Any, Any] | Callable[[Any], Any] | None=None, stacklevel: int=2) -> Callable[[F], F]: if mapping is not None and (not hasattr(mapping, 'get')) and (not callable(mapping)): raise TypeError('mapping from old to new argument values must be dict or callable!') def _deprecate_kwarg(func: F) -> F: @wraps(func) def wrapper(*args, **kwargs) -> Callable[..., Any]: old_arg_value = kwargs.pop(old_arg_name, None) if old_arg_value is not None: if new_arg_name is None: msg = f'the {old_arg_name!r} keyword is deprecated and will be removed in a future version. Please take steps to stop the use of {old_arg_name!r}' warnings.warn(msg, FutureWarning, stacklevel=stacklevel) kwargs[old_arg_name] = old_arg_value return func(*args, **kwargs) elif mapping is not None: if callable(mapping): new_arg_value = mapping(old_arg_value) else: new_arg_value = mapping.get(old_arg_value, old_arg_value) msg = f'the {old_arg_name}={old_arg_value!r} keyword is deprecated, use {new_arg_name}={new_arg_value!r} instead.' else: new_arg_value = old_arg_value msg = f'the {old_arg_name!r} keyword is deprecated, use {new_arg_name!r} instead.' warnings.warn(msg, FutureWarning, stacklevel=stacklevel) if kwargs.get(new_arg_name) is not None: msg = f'Can only specify {old_arg_name!r} or {new_arg_name!r}, not both.' raise TypeError(msg) kwargs[new_arg_name] = new_arg_value return func(*args, **kwargs) return cast(F, wrapper) return _deprecate_kwarg def _format_argument_list(allow_args: list[str]) -> str: if 'self' in allow_args: allow_args.remove('self') if not allow_args: return '' elif len(allow_args) == 1: return f" except for the argument '{allow_args[0]}'" else: last = allow_args[-1] args = ', '.join(["'" + x + "'" for x in allow_args[:-1]]) return f" except for the arguments {args} and '{last}'" def future_version_msg(version: str | None) -> str: if version is None: return 'In a future version of pandas' else: return f'Starting with pandas version {version}' def deprecate_nonkeyword_arguments(version: str | None, allowed_args: list[str] | None=None, name: str | None=None) -> Callable[[F], F]: def decorate(func): old_sig = inspect.signature(func) if allowed_args is not None: allow_args = allowed_args else: allow_args = [p.name for p in old_sig.parameters.values() if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.default is p.empty] new_params = [p.replace(kind=p.KEYWORD_ONLY) if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) and p.name not in allow_args else p for p in old_sig.parameters.values()] new_params.sort(key=lambda p: p.kind) new_sig = old_sig.replace(parameters=new_params) num_allow_args = len(allow_args) msg = f'{future_version_msg(version)} all arguments of {name or func.__qualname__}{{arguments}} will be keyword-only.' @wraps(func) def wrapper(*args, **kwargs): if len(args) > num_allow_args: warnings.warn(msg.format(arguments=_format_argument_list(allow_args)), FutureWarning, stacklevel=find_stack_level()) return func(*args, **kwargs) wrapper.__signature__ = new_sig return wrapper return decorate def doc(*docstrings: None | str | Callable, **params: object) -> Callable[[F], F]: def decorator(decorated: F) -> F: docstring_components: list[str | Callable] = [] if decorated.__doc__: docstring_components.append(dedent(decorated.__doc__)) for docstring in docstrings: if docstring is None: continue if hasattr(docstring, '_docstring_components'): docstring_components.extend(docstring._docstring_components) elif isinstance(docstring, str) or docstring.__doc__: docstring_components.append(docstring) params_applied = [component.format(**params) if isinstance(component, str) and len(params) > 0 else component for component in docstring_components] decorated.__doc__ = ''.join([component if isinstance(component, str) else dedent(component.__doc__ or '') for component in params_applied]) decorated._docstring_components = docstring_components return decorated return decorator class Substitution: def __init__(self, *args, **kwargs) -> None: if args and kwargs: raise AssertionError('Only positional or keyword args are allowed') self.params = args or kwargs def __call__(self, func: F) -> F: func.__doc__ = func.__doc__ and func.__doc__ % self.params return func def update(self, *args, **kwargs) -> None: if isinstance(self.params, dict): self.params.update(*args, **kwargs) class Appender: addendum: str | None def __init__(self, addendum: str | None, join: str='', indents: int=0) -> None: if indents > 0: self.addendum = indent(addendum, indents=indents) else: self.addendum = addendum self.join = join def __call__(self, func: T) -> T: func.__doc__ = func.__doc__ if func.__doc__ else '' self.addendum = self.addendum if self.addendum else '' docitems = [func.__doc__, self.addendum] func.__doc__ = dedent(self.join.join(docitems)) return func def indent(text: str | None, indents: int=1) -> str: if not text or not isinstance(text, str): return '' jointext = ''.join(['\n'] + [' '] * indents) return jointext.join(text.split('\n')) __all__ = ['Appender', 'cache_readonly', 'deprecate', 'deprecate_kwarg', 'deprecate_nonkeyword_arguments', 'doc', 'future_version_msg', 'Substitution'] def set_module(module) -> Callable[[F], F]: def decorator(func: F) -> F: if module is not None: func.__module__ = module return func return decorator # File: pandas-main/pandas/util/_doctools.py from __future__ import annotations from typing import TYPE_CHECKING import numpy as np import pandas as pd if TYPE_CHECKING: from collections.abc import Iterable from matplotlib.figure import Figure class TablePlotter: def __init__(self, cell_width: float=0.37, cell_height: float=0.25, font_size: float=7.5) -> None: self.cell_width = cell_width self.cell_height = cell_height self.font_size = font_size def _shape(self, df: pd.DataFrame) -> tuple[int, int]: (row, col) = df.shape return (row + df.columns.nlevels, col + df.index.nlevels) def _get_cells(self, left, right, vertical) -> tuple[int, int]: if vertical: vcells = max(sum((self._shape(df)[0] for df in left)), self._shape(right)[0]) hcells = max((self._shape(df)[1] for df in left)) + self._shape(right)[1] else: vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]]) hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]]) return (hcells, vcells) def plot(self, left, right, labels: Iterable[str]=(), vertical: bool=True) -> Figure: from matplotlib import gridspec import matplotlib.pyplot as plt if not isinstance(left, list): left = [left] left = [self._conv(df) for df in left] right = self._conv(right) (hcells, vcells) = self._get_cells(left, right, vertical) if vertical: figsize = (self.cell_width * hcells, self.cell_height * vcells) else: figsize = (self.cell_width * hcells, self.cell_height * vcells) fig = plt.figure(figsize=figsize) if vertical: gs = gridspec.GridSpec(len(left), hcells) max_left_cols = max((self._shape(df)[1] for df in left)) max_left_rows = max((self._shape(df)[0] for df in left)) for (i, (_left, _label)) in enumerate(zip(left, labels)): ax = fig.add_subplot(gs[i, 0:max_left_cols]) self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows) ax = plt.subplot(gs[:, max_left_cols:]) self._make_table(ax, right, title='Result', height=1.05 / vcells) fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) else: max_rows = max((self._shape(df)[0] for df in left + [right])) height = 1.0 / np.max(max_rows) gs = gridspec.GridSpec(1, hcells) i = 0 for (df, _label) in zip(left, labels): sp = self._shape(df) ax = fig.add_subplot(gs[0, i:i + sp[1]]) self._make_table(ax, df, title=_label, height=height) i += sp[1] ax = plt.subplot(gs[0, i:]) self._make_table(ax, right, title='Result', height=height) fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) return fig def _conv(self, data): if isinstance(data, pd.Series): if data.name is None: data = data.to_frame(name='') else: data = data.to_frame() data = data.fillna('NaN') return data def _insert_index(self, data): data = data.copy() idx_nlevels = data.index.nlevels if idx_nlevels == 1: data.insert(0, 'Index', data.index) else: for i in range(idx_nlevels): data.insert(i, f'Index{i}', data.index._get_level_values(i)) col_nlevels = data.columns.nlevels if col_nlevels > 1: col = data.columns._get_level_values(0) values = [data.columns._get_level_values(i)._values for i in range(1, col_nlevels)] col_df = pd.DataFrame(values) data.columns = col_df.columns data = pd.concat([col_df, data]) data.columns = col return data def _make_table(self, ax, df, title: str, height: float | None=None) -> None: if df is None: ax.set_visible(False) return from pandas import plotting idx_nlevels = df.index.nlevels col_nlevels = df.columns.nlevels df = self._insert_index(df) tb = plotting.table(ax, df, loc=9) tb.set_fontsize(self.font_size) if height is None: height = 1.0 / (len(df) + 1) props = tb.properties() for ((r, c), cell) in props['celld'].items(): if c == -1: cell.set_visible(False) elif r < col_nlevels and c < idx_nlevels: cell.set_visible(False) elif r < col_nlevels or c < idx_nlevels: cell.set_facecolor('#AAAAAA') cell.set_height(height) ax.set_title(title, size=self.font_size) ax.axis('off') def main() -> None: import matplotlib.pyplot as plt p = TablePlotter() df1 = pd.DataFrame({'A': [10, 11, 12], 'B': [20, 21, 22], 'C': [30, 31, 32]}) df2 = pd.DataFrame({'A': [10, 12], 'C': [30, 32]}) p.plot([df1, df2], pd.concat([df1, df2]), labels=['df1', 'df2'], vertical=True) plt.show() df3 = pd.DataFrame({'X': [10, 12], 'Z': [30, 32]}) p.plot([df1, df3], pd.concat([df1, df3], axis=1), labels=['df1', 'df2'], vertical=False) plt.show() idx = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'), (2, 'C')]) column = pd.MultiIndex.from_tuples([(1, 'A'), (1, 'B')]) df3 = pd.DataFrame({'v1': [1, 2, 3, 4, 5, 6], 'v2': [5, 6, 7, 8, 9, 10]}, index=idx) df3.columns = column p.plot(df3, df3, labels=['df3']) plt.show() if __name__ == '__main__': main() # File: pandas-main/pandas/util/_exceptions.py from __future__ import annotations import contextlib import inspect import os import re from typing import TYPE_CHECKING import warnings if TYPE_CHECKING: from collections.abc import Generator from types import FrameType @contextlib.contextmanager def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]: try: yield except Exception as err: if not err.args: raise msg = str(err.args[0]) msg = msg.replace(old_name, new_name) args: tuple[str, ...] = (msg,) if len(err.args) > 1: args = args + err.args[1:] err.args = args raise def find_stack_level() -> int: import pandas as pd pkg_dir = os.path.dirname(pd.__file__) test_dir = os.path.join(pkg_dir, 'tests') frame: FrameType | None = inspect.currentframe() try: n = 0 while frame: filename = inspect.getfile(frame) if filename.startswith(pkg_dir) and (not filename.startswith(test_dir)): frame = frame.f_back n += 1 else: break finally: del frame return n @contextlib.contextmanager def rewrite_warning(target_message: str, target_category: type[Warning], new_message: str, new_category: type[Warning] | None=None) -> Generator[None, None, None]: if new_category is None: new_category = target_category with warnings.catch_warnings(record=True) as record: yield if len(record) > 0: match = re.compile(target_message) for warning in record: if warning.category is target_category and re.search(match, str(warning.message)): category = new_category message: Warning | str = new_message else: (category, message) = (warning.category, warning.message) warnings.warn_explicit(message=message, category=category, filename=warning.filename, lineno=warning.lineno) # File: pandas-main/pandas/util/_print_versions.py from __future__ import annotations import codecs import json import locale import os import platform import struct import sys from typing import TYPE_CHECKING if TYPE_CHECKING: from pandas._typing import JSONSerializable from pandas.compat._optional import VERSIONS, get_version, import_optional_dependency def _get_commit_hash() -> str | None: try: from pandas._version_meson import __git_version__ return __git_version__ except ImportError: from pandas._version import get_versions versions = get_versions() return versions['full-revisionid'] def _get_sys_info() -> dict[str, JSONSerializable]: uname_result = platform.uname() (language_code, encoding) = locale.getlocale() return {'commit': _get_commit_hash(), 'python': platform.python_version(), 'python-bits': struct.calcsize('P') * 8, 'OS': uname_result.system, 'OS-release': uname_result.release, 'Version': uname_result.version, 'machine': uname_result.machine, 'processor': uname_result.processor, 'byteorder': sys.byteorder, 'LC_ALL': os.environ.get('LC_ALL'), 'LANG': os.environ.get('LANG'), 'LOCALE': {'language-code': language_code, 'encoding': encoding}} def _get_dependency_info() -> dict[str, JSONSerializable]: deps = ['pandas', 'numpy', 'dateutil', 'pip', 'Cython', 'sphinx', 'IPython'] deps.extend(list(VERSIONS)) result: dict[str, JSONSerializable] = {} for modname in deps: try: mod = import_optional_dependency(modname, errors='ignore') except Exception: result[modname] = 'N/A' else: result[modname] = get_version(mod) if mod else None return result def show_versions(as_json: str | bool=False) -> None: sys_info = _get_sys_info() deps = _get_dependency_info() if as_json: j = {'system': sys_info, 'dependencies': deps} if as_json is True: sys.stdout.writelines(json.dumps(j, indent=2)) else: assert isinstance(as_json, str) with codecs.open(as_json, 'wb', encoding='utf8') as f: json.dump(j, f, indent=2) else: assert isinstance(sys_info['LOCALE'], dict) language_code = sys_info['LOCALE']['language-code'] encoding = sys_info['LOCALE']['encoding'] sys_info['LOCALE'] = f'{language_code}.{encoding}' maxlen = max((len(x) for x in deps)) print('\nINSTALLED VERSIONS') print('------------------') for (k, v) in sys_info.items(): print(f'{k:<{maxlen}}: {v}') print('') for (k, v) in deps.items(): print(f'{k:<{maxlen}}: {v}') # File: pandas-main/pandas/util/_validators.py """""" from __future__ import annotations from collections.abc import Iterable, Sequence from typing import TypeVar, overload import numpy as np from pandas._libs import lib from pandas.core.dtypes.common import is_bool, is_integer BoolishT = TypeVar('BoolishT', bool, int) BoolishNoneT = TypeVar('BoolishNoneT', bool, int, None) def _check_arg_length(fname, args, max_fname_arg_count, compat_args) -> None: if max_fname_arg_count < 0: raise ValueError("'max_fname_arg_count' must be non-negative") if len(args) > len(compat_args): max_arg_count = len(compat_args) + max_fname_arg_count actual_arg_count = len(args) + max_fname_arg_count argument = 'argument' if max_arg_count == 1 else 'arguments' raise TypeError(f'{fname}() takes at most {max_arg_count} {argument} ({actual_arg_count} given)') def _check_for_default_values(fname, arg_val_dict, compat_args) -> None: for key in arg_val_dict: try: v1 = arg_val_dict[key] v2 = compat_args[key] if v1 is not None and v2 is None or (v1 is None and v2 is not None): match = False else: match = v1 == v2 if not is_bool(match): raise ValueError("'match' is not a boolean") except ValueError: match = arg_val_dict[key] is compat_args[key] if not match: raise ValueError(f"the '{key}' parameter is not supported in the pandas implementation of {fname}()") def validate_args(fname, args, max_fname_arg_count, compat_args) -> None: _check_arg_length(fname, args, max_fname_arg_count, compat_args) kwargs = dict(zip(compat_args, args)) _check_for_default_values(fname, kwargs, compat_args) def _check_for_invalid_keys(fname, kwargs, compat_args) -> None: diff = set(kwargs) - set(compat_args) if diff: bad_arg = next(iter(diff)) raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'") def validate_kwargs(fname, kwargs, compat_args) -> None: kwds = kwargs.copy() _check_for_invalid_keys(fname, kwargs, compat_args) _check_for_default_values(fname, kwds, compat_args) def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args) -> None: _check_arg_length(fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args) args_dict = dict(zip(compat_args, args)) for key in args_dict: if key in kwargs: raise TypeError(f"{fname}() got multiple values for keyword argument '{key}'") kwargs.update(args_dict) validate_kwargs(fname, kwargs, compat_args) def validate_bool_kwarg(value: BoolishNoneT, arg_name: str, none_allowed: bool=True, int_allowed: bool=False) -> BoolishNoneT: good_value = is_bool(value) if none_allowed: good_value = good_value or value is None if int_allowed: good_value = good_value or isinstance(value, int) if not good_value: raise ValueError(f'For argument "{arg_name}" expected type bool, received type {type(value).__name__}.') return value def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool=True): from pandas.core.missing import clean_fill_method if value is None and method is None: raise ValueError("Must specify a fill 'value' or 'method'.") if value is None and method is not None: method = clean_fill_method(method) elif value is not None and method is None: if validate_scalar_dict_value and isinstance(value, (list, tuple)): raise TypeError(f'"value" parameter must be a scalar or dict, but you passed a "{type(value).__name__}"') elif value is not None and method is not None: raise ValueError("Cannot specify both 'value' and 'method'.") return (value, method) def validate_percentile(q: float | Iterable[float]) -> np.ndarray: q_arr = np.asarray(q) msg = 'percentiles should all be in the interval [0, 1]' if q_arr.ndim == 0: if not 0 <= q_arr <= 1: raise ValueError(msg) elif not all((0 <= qs <= 1 for qs in q_arr)): raise ValueError(msg) return q_arr @overload def validate_ascending(ascending: BoolishT) -> BoolishT: ... @overload def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: ... def validate_ascending(ascending: bool | int | Sequence[BoolishT]) -> bool | int | list[BoolishT]: kwargs = {'none_allowed': False, 'int_allowed': True} if not isinstance(ascending, Sequence): return validate_bool_kwarg(ascending, 'ascending', **kwargs) return [validate_bool_kwarg(item, 'ascending', **kwargs) for item in ascending] def validate_endpoints(closed: str | None) -> tuple[bool, bool]: left_closed = False right_closed = False if closed is None: left_closed = True right_closed = True elif closed == 'left': left_closed = True elif closed == 'right': right_closed = True else: raise ValueError("Closed has to be either 'left', 'right' or None") return (left_closed, right_closed) def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: left_right_inclusive: tuple[bool, bool] | None = None if isinstance(inclusive, str): left_right_inclusive = {'both': (True, True), 'left': (True, False), 'right': (False, True), 'neither': (False, False)}.get(inclusive) if left_right_inclusive is None: raise ValueError("Inclusive has to be either 'both', 'neither', 'left' or 'right'") return left_right_inclusive def validate_insert_loc(loc: int, length: int) -> int: if not is_integer(loc): raise TypeError(f'loc must be an integer between -{length} and {length}') if loc < 0: loc += length if not 0 <= loc <= length: raise IndexError(f'loc must be an integer between -{length} and {length}') return loc def check_dtype_backend(dtype_backend) -> None: if dtype_backend is not lib.no_default: if dtype_backend not in ['numpy_nullable', 'pyarrow']: raise ValueError(f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and 'pyarrow' are allowed.") # File: pandas-main/pandas/util/version/__init__.py from __future__ import annotations from collections.abc import Callable import itertools import re from typing import Any, NamedTuple, SupportsInt, Union __all__ = ['VERSION_PATTERN', 'InvalidVersion', 'Version', 'parse'] class InfinityType: def __repr__(self) -> str: return 'Infinity' def __hash__(self) -> int: return hash(repr(self)) def __lt__(self, other: object) -> bool: return False def __le__(self, other: object) -> bool: return False def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) def __gt__(self, other: object) -> bool: return True def __ge__(self, other: object) -> bool: return True def __neg__(self: object) -> NegativeInfinityType: return NegativeInfinity Infinity = InfinityType() class NegativeInfinityType: def __repr__(self) -> str: return '-Infinity' def __hash__(self) -> int: return hash(repr(self)) def __lt__(self, other: object) -> bool: return True def __le__(self, other: object) -> bool: return True def __eq__(self, other: object) -> bool: return isinstance(other, type(self)) def __gt__(self, other: object) -> bool: return False def __ge__(self, other: object) -> bool: return False def __neg__(self: object) -> InfinityType: return Infinity NegativeInfinity = NegativeInfinityType() LocalType = tuple[Union[int, str], ...] CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, tuple[str, int]] CmpLocalType = Union[NegativeInfinityType, tuple[Union[tuple[int, str], tuple[NegativeInfinityType, Union[int, str]]], ...]] CmpKey = tuple[int, tuple[int, ...], CmpPrePostDevType, CmpPrePostDevType, CmpPrePostDevType, CmpLocalType] VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool] class _Version(NamedTuple): epoch: int release: tuple[int, ...] dev: tuple[str, int] | None pre: tuple[str, int] | None post: tuple[str, int] | None local: LocalType | None def parse(version: str) -> Version: return Version(version) class InvalidVersion(ValueError): class _BaseVersion: _key: tuple[Any, ...] def __hash__(self) -> int: return hash(self._key) def __lt__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key < other._key def __le__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key <= other._key def __eq__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key == other._key def __ge__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key >= other._key def __gt__(self, other: _BaseVersion) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key > other._key def __ne__(self, other: object) -> bool: if not isinstance(other, _BaseVersion): return NotImplemented return self._key != other._key _VERSION_PATTERN = '\n v?\n (?:\n (?:(?P[0-9]+)!)? # epoch\n (?P[0-9]+(?:\\.[0-9]+)*) # release segment\n (?P
                                          # pre-release\n            [-_\\.]?\n            (?Palpha|a|beta|b|preview|pre|c|rc)\n            [-_\\.]?\n            (?P[0-9]+)?\n        )?\n        (?P                                         # post release\n            (?:-(?P[0-9]+))\n            |\n            (?:\n                [-_\\.]?\n                (?Ppost|rev|r)\n                [-_\\.]?\n                (?P[0-9]+)?\n            )\n        )?\n        (?P                                          # dev release\n            [-_\\.]?\n            (?Pdev)\n            [-_\\.]?\n            (?P[0-9]+)?\n        )?\n    )\n    (?:\\+(?P[a-z0-9]+(?:[-_\\.][a-z0-9]+)*))?       # local version\n'
VERSION_PATTERN = _VERSION_PATTERN

class Version(_BaseVersion):
    _regex = re.compile('^\\s*' + VERSION_PATTERN + '\\s*$', re.VERBOSE | re.IGNORECASE)
    _key: CmpKey

    def __init__(self, version: str) -> None:
        match = self._regex.search(version)
        if not match:
            raise InvalidVersion(f"Invalid version: '{version}'")
        self._version = _Version(epoch=int(match.group('epoch')) if match.group('epoch') else 0, release=tuple((int(i) for i in match.group('release').split('.'))), pre=_parse_letter_version(match.group('pre_l'), match.group('pre_n')), post=_parse_letter_version(match.group('post_l'), match.group('post_n1') or match.group('post_n2')), dev=_parse_letter_version(match.group('dev_l'), match.group('dev_n')), local=_parse_local_version(match.group('local')))
        self._key = _cmpkey(self._version.epoch, self._version.release, self._version.pre, self._version.post, self._version.dev, self._version.local)

    def __repr__(self) -> str:
        return f""

    def __str__(self) -> str:
        parts = []
        if self.epoch != 0:
            parts.append(f'{self.epoch}!')
        parts.append('.'.join((str(x) for x in self.release)))
        if self.pre is not None:
            parts.append(''.join((str(x) for x in self.pre)))
        if self.post is not None:
            parts.append(f'.post{self.post}')
        if self.dev is not None:
            parts.append(f'.dev{self.dev}')
        if self.local is not None:
            parts.append(f'+{self.local}')
        return ''.join(parts)

    @property
    def epoch(self) -> int:
        return self._version.epoch

    @property
    def release(self) -> tuple[int, ...]:
        return self._version.release

    @property
    def pre(self) -> tuple[str, int] | None:
        return self._version.pre

    @property
    def post(self) -> int | None:
        return self._version.post[1] if self._version.post else None

    @property
    def dev(self) -> int | None:
        return self._version.dev[1] if self._version.dev else None

    @property
    def local(self) -> str | None:
        if self._version.local:
            return '.'.join((str(x) for x in self._version.local))
        else:
            return None

    @property
    def public(self) -> str:
        return str(self).split('+', 1)[0]

    @property
    def base_version(self) -> str:
        parts = []
        if self.epoch != 0:
            parts.append(f'{self.epoch}!')
        parts.append('.'.join((str(x) for x in self.release)))
        return ''.join(parts)

    @property
    def is_prerelease(self) -> bool:
        return self.dev is not None or self.pre is not None

    @property
    def is_postrelease(self) -> bool:
        return self.post is not None

    @property
    def is_devrelease(self) -> bool:
        return self.dev is not None

    @property
    def major(self) -> int:
        return self.release[0] if len(self.release) >= 1 else 0

    @property
    def minor(self) -> int:
        return self.release[1] if len(self.release) >= 2 else 0

    @property
    def micro(self) -> int:
        return self.release[2] if len(self.release) >= 3 else 0

def _parse_letter_version(letter: str | None, number: str | bytes | SupportsInt | None) -> tuple[str, int] | None:
    if letter:
        if number is None:
            number = 0
        letter = letter.lower()
        if letter == 'alpha':
            letter = 'a'
        elif letter == 'beta':
            letter = 'b'
        elif letter in ['c', 'pre', 'preview']:
            letter = 'rc'
        elif letter in ['rev', 'r']:
            letter = 'post'
        return (letter, int(number))
    if not letter and number:
        letter = 'post'
        return (letter, int(number))
    return None
_local_version_separators = re.compile('[\\._-]')

def _parse_local_version(local: str | None) -> LocalType | None:
    if local is not None:
        return tuple((part.lower() if not part.isdigit() else int(part) for part in _local_version_separators.split(local)))
    return None

def _cmpkey(epoch: int, release: tuple[int, ...], pre: tuple[str, int] | None, post: tuple[str, int] | None, dev: tuple[str, int] | None, local: LocalType | None) -> CmpKey:
    _release = tuple(reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release)))))
    if pre is None and post is None and (dev is not None):
        _pre: CmpPrePostDevType = NegativeInfinity
    elif pre is None:
        _pre = Infinity
    else:
        _pre = pre
    if post is None:
        _post: CmpPrePostDevType = NegativeInfinity
    else:
        _post = post
    if dev is None:
        _dev: CmpPrePostDevType = Infinity
    else:
        _dev = dev
    if local is None:
        _local: CmpLocalType = NegativeInfinity
    else:
        _local = tuple(((i, '') if isinstance(i, int) else (NegativeInfinity, i) for i in local))
    return (epoch, _release, _pre, _post, _dev, _local)