Skip to content

Commit b5a5268

Browse files
authored
STY: De-privatize imported names (#36235)
1 parent b1d3897 commit b5a5268

File tree

18 files changed

+88
-77
lines changed

18 files changed

+88
-77
lines changed

pandas/_libs/interval.pyx

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ from pandas._libs.tslibs.util cimport (
4646
is_timedelta64_object,
4747
)
4848

49-
_VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
49+
VALID_CLOSED = frozenset(['left', 'right', 'both', 'neither'])
5050

5151

5252
cdef class IntervalMixin:
@@ -318,7 +318,7 @@ cdef class Interval(IntervalMixin):
318318
self._validate_endpoint(left)
319319
self._validate_endpoint(right)
320320

321-
if closed not in _VALID_CLOSED:
321+
if closed not in VALID_CLOSED:
322322
raise ValueError(f"invalid option for 'closed': {closed}")
323323
if not left <= right:
324324
raise ValueError("left side of interval must be <= right side")

pandas/core/arrays/_arrow_utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import numpy as np
55
import pyarrow
66

7-
from pandas.core.arrays.interval import _VALID_CLOSED
7+
from pandas.core.arrays.interval import VALID_CLOSED
88

99
_pyarrow_version_ge_015 = LooseVersion(pyarrow.__version__) >= LooseVersion("0.15")
1010

@@ -83,7 +83,7 @@ class ArrowIntervalType(pyarrow.ExtensionType):
8383
def __init__(self, subtype, closed):
8484
# attributes need to be set first before calling
8585
# super init (as that calls serialize)
86-
assert closed in _VALID_CLOSED
86+
assert closed in VALID_CLOSED
8787
self._closed = closed
8888
if not isinstance(subtype, pyarrow.DataType):
8989
subtype = pyarrow.type_for_alias(str(subtype))

pandas/core/arrays/interval.py

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,12 @@
55

66
from pandas._config import get_option
77

8-
from pandas._libs.interval import Interval, IntervalMixin, intervals_to_interval_bounds
8+
from pandas._libs.interval import (
9+
VALID_CLOSED,
10+
Interval,
11+
IntervalMixin,
12+
intervals_to_interval_bounds,
13+
)
914
from pandas.compat.numpy import function as nv
1015
from pandas.util._decorators import Appender
1116

@@ -42,7 +47,6 @@
4247
from pandas.core.indexers import check_array_indexer
4348
from pandas.core.indexes.base import ensure_index
4449

45-
_VALID_CLOSED = {"left", "right", "both", "neither"}
4650
_interval_shared_docs = {}
4751

4852
_shared_docs_kwargs = dict(
@@ -475,7 +479,7 @@ def _validate(self):
475479
* left and right have the same missing values
476480
* left is always below right
477481
"""
478-
if self.closed not in _VALID_CLOSED:
482+
if self.closed not in VALID_CLOSED:
479483
msg = f"invalid option for 'closed': {self.closed}"
480484
raise ValueError(msg)
481485
if len(self.left) != len(self.right):
@@ -1012,7 +1016,7 @@ def closed(self):
10121016
)
10131017
)
10141018
def set_closed(self, closed):
1015-
if closed not in _VALID_CLOSED:
1019+
if closed not in VALID_CLOSED:
10161020
msg = f"invalid option for 'closed': {closed}"
10171021
raise ValueError(msg)
10181022

pandas/core/arrays/sparse/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,6 @@
55
BlockIndex,
66
IntIndex,
77
SparseArray,
8-
_make_index,
8+
make_sparse_index,
99
)
1010
from pandas.core.arrays.sparse.dtype import SparseDtype

pandas/core/arrays/sparse/array.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1556,15 +1556,15 @@ def make_sparse(arr: np.ndarray, kind="block", fill_value=None, dtype=None, copy
15561556
else:
15571557
indices = mask.nonzero()[0].astype(np.int32)
15581558

1559-
index = _make_index(length, indices, kind)
1559+
index = make_sparse_index(length, indices, kind)
15601560
sparsified_values = arr[mask]
15611561
if dtype is not None:
15621562
sparsified_values = astype_nansafe(sparsified_values, dtype=dtype)
15631563
# TODO: copy
15641564
return sparsified_values, index, fill_value
15651565

15661566

1567-
def _make_index(length, indices, kind):
1567+
def make_sparse_index(length, indices, kind):
15681568

15691569
if kind == "block" or isinstance(kind, BlockIndex):
15701570
locs, lens = splib.get_blocks(indices)

pandas/core/computation/engines.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ def _evaluate(self) -> None:
130130
pass
131131

132132

133-
_engines: Dict[str, Type[AbstractEngine]] = {
133+
ENGINES: Dict[str, Type[AbstractEngine]] = {
134134
"numexpr": NumExprEngine,
135135
"python": PythonEngine,
136136
}

pandas/core/computation/eval.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@
99
from pandas._libs.lib import no_default
1010
from pandas.util._validators import validate_bool_kwarg
1111

12-
from pandas.core.computation.engines import _engines
13-
from pandas.core.computation.expr import Expr, _parsers
12+
from pandas.core.computation.engines import ENGINES
13+
from pandas.core.computation.expr import PARSERS, Expr
1414
from pandas.core.computation.parsing import tokenize_string
1515
from pandas.core.computation.scope import ensure_scope
1616

@@ -43,8 +43,8 @@ def _check_engine(engine: Optional[str]) -> str:
4343
if engine is None:
4444
engine = "numexpr" if NUMEXPR_INSTALLED else "python"
4545

46-
if engine not in _engines:
47-
valid_engines = list(_engines.keys())
46+
if engine not in ENGINES:
47+
valid_engines = list(ENGINES.keys())
4848
raise KeyError(
4949
f"Invalid engine '{engine}' passed, valid engines are {valid_engines}"
5050
)
@@ -75,9 +75,9 @@ def _check_parser(parser: str):
7575
KeyError
7676
* If an invalid parser is passed
7777
"""
78-
if parser not in _parsers:
78+
if parser not in PARSERS:
7979
raise KeyError(
80-
f"Invalid parser '{parser}' passed, valid parsers are {_parsers.keys()}"
80+
f"Invalid parser '{parser}' passed, valid parsers are {PARSERS.keys()}"
8181
)
8282

8383

@@ -341,7 +341,7 @@ def eval(
341341
parsed_expr = Expr(expr, engine=engine, parser=parser, env=env)
342342

343343
# construct the engine and evaluate the parsed expression
344-
eng = _engines[engine]
344+
eng = ENGINES[engine]
345345
eng_inst = eng(parsed_expr)
346346
ret = eng_inst.evaluate()
347347

pandas/core/computation/expr.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -782,7 +782,7 @@ def __init__(
782782
self.env = env or Scope(level=level + 1)
783783
self.engine = engine
784784
self.parser = parser
785-
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
785+
self._visitor = PARSERS[parser](self.env, self.engine, self.parser)
786786
self.terms = self.parse()
787787

788788
@property
@@ -814,4 +814,4 @@ def names(self):
814814
return frozenset(term.name for term in com.flatten(self.terms))
815815

816816

817-
_parsers = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}
817+
PARSERS = {"python": PythonExprVisitor, "pandas": PandasExprVisitor}

pandas/core/config_init.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -314,9 +314,9 @@ def use_numba_cb(key):
314314

315315

316316
def table_schema_cb(key):
317-
from pandas.io.formats.printing import _enable_data_resource_formatter
317+
from pandas.io.formats.printing import enable_data_resource_formatter
318318

319-
_enable_data_resource_formatter(cf.get_option(key))
319+
enable_data_resource_formatter(cf.get_option(key))
320320

321321

322322
def is_terminal() -> bool:

pandas/core/groupby/generic.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -70,9 +70,9 @@
7070
GroupBy,
7171
_agg_template,
7272
_apply_docs,
73-
_group_selection_context,
7473
_transform_template,
7574
get_groupby,
75+
group_selection_context,
7676
)
7777
from pandas.core.groupby.numba_ import generate_numba_func, split_for_numba
7878
from pandas.core.indexes.api import Index, MultiIndex, all_indexes_same
@@ -230,7 +230,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
230230
raise NotImplementedError(
231231
"Numba engine can only be used with a single function."
232232
)
233-
with _group_selection_context(self):
233+
with group_selection_context(self):
234234
data = self._selected_obj
235235
result, index = self._aggregate_with_numba(
236236
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
@@ -685,7 +685,7 @@ def value_counts(
685685
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
686686
):
687687

688-
from pandas.core.reshape.merge import _get_join_indexers
688+
from pandas.core.reshape.merge import get_join_indexers
689689
from pandas.core.reshape.tile import cut
690690

691691
if bins is not None and not np.iterable(bins):
@@ -787,7 +787,7 @@ def value_counts(
787787

788788
right = [diff.cumsum() - 1, codes[-1]]
789789

790-
_, idx = _get_join_indexers(left, right, sort=False, how="left")
790+
_, idx = get_join_indexers(left, right, sort=False, how="left")
791791
out = np.where(idx != -1, out[idx], 0)
792792

793793
if sort:
@@ -942,7 +942,7 @@ def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs)
942942
raise NotImplementedError(
943943
"Numba engine can only be used with a single function."
944944
)
945-
with _group_selection_context(self):
945+
with group_selection_context(self):
946946
data = self._selected_obj
947947
result, index = self._aggregate_with_numba(
948948
data, func, *args, engine_kwargs=engine_kwargs, **kwargs

0 commit comments

Comments
 (0)