Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pandas/_libs/hashtable_class_helper.pxi.in
Original file line number Diff line number Diff line change
Expand Up @@ -1070,7 +1070,7 @@ cdef class StringHashTable(HashTable):
val = values[i]

if isinstance(val, str):
# GH#31499 if we have a np.str_ PyUnicode_AsUTF8 won't recognize
# GH#31499 if we have an np.str_ PyUnicode_AsUTF8 won't recognize
# it as a str, even though isinstance does.
v = PyUnicode_AsUTF8(<str>val)
else:
Expand Down Expand Up @@ -1108,7 +1108,7 @@ cdef class StringHashTable(HashTable):
val = values[i]

if isinstance(val, str):
# GH#31499 if we have a np.str_ PyUnicode_AsUTF8 won't recognize
# GH#31499 if we have an np.str_ PyUnicode_AsUTF8 won't recognize
# it as a str, even though isinstance does.
v = PyUnicode_AsUTF8(<str>val)
else:
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/index.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ cdef bint is_definitely_invalid_key(object val):

cdef ndarray _get_bool_indexer(ndarray values, object val, ndarray mask = None):
"""
Return a ndarray[bool] of locations where val matches self.values.
Return an ndarray[bool] of locations where val matches self.values.
If val is not NA, this is equivalent to `self.values == val`
"""
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslib.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def format_array_from_datetime(
NPY_DATETIMEUNIT reso=NPY_FR_ns,
) -> np.ndarray:
"""
return a np object array of the string formatted values
return an np object array of the string formatted values

Parameters
----------
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/conversion.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ cdef (int64_t, int) precision_from_unit(

cdef int64_t get_datetime64_nanos(object val, NPY_DATETIMEUNIT reso) except? -1:
"""
Extract the value and unit from a np.datetime64 object, then convert the
Extract the value and unit from an np.datetime64 object, then convert the
value to nanoseconds if necessary.
"""
cdef:
Expand Down
4 changes: 2 additions & 2 deletions pandas/_libs/tslibs/nattype.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1885,7 +1885,7 @@ cdef bint checknull_with_nat(object val):

cdef bint is_dt64nat(object val):
"""
Is this a np.datetime64 object np.datetime64("NaT").
Is this an np.datetime64 object np.datetime64("NaT").
"""
if cnp.is_datetime64_object(val):
return cnp.get_datetime64_value(val) == NPY_NAT
Expand All @@ -1894,7 +1894,7 @@ cdef bint is_dt64nat(object val):

cdef bint is_td64nat(object val):
"""
Is this a np.timedelta64 object np.timedelta64("NaT").
Is this an np.timedelta64 object np.timedelta64("NaT").
"""
if cnp.is_timedelta64_object(val):
return cnp.get_timedelta64_value(val) == NPY_NAT
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/offsets.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -818,7 +818,7 @@ cdef class BaseOffset:
state["normalize"] = self.normalize

# we don't want to actually pickle the calendar object
# as its a np.busyday; we recreate on deserialization
# as its an np.busyday; we recreate on deserialization
state.pop("calendar", None)
if "kwds" in state:
state["kwds"].pop("calendar", None)
Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/timedeltas.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -1489,7 +1489,7 @@ cdef class _Timedelta(timedelta):
"""
cdef:
str abbrev = npy_unit_to_abbrev(self._creso)
# TODO: way to create a np.timedelta64 obj with the reso directly
# TODO: way to create an np.timedelta64 obj with the reso directly
# instead of having to get the abbrev?
return np.timedelta64(self._value, abbrev)

Expand Down
2 changes: 1 addition & 1 deletion pandas/_libs/tslibs/timestamps.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ cdef class _Timestamp(ABCTimestamp):

@classmethod
def _from_dt64(cls, dt64: np.datetime64):
# construct a Timestamp from a np.datetime64 object, keeping the
# construct a Timestamp from an np.datetime64 object, keeping the
# resolution of the input.
# This is here mainly so we can incrementally implement non-nano
# (e.g. only tznaive at first)
Expand Down
2 changes: 1 addition & 1 deletion pandas/_testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ def get_cython_table_params(ndframe, func_names_and_expected):
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The first item is a name of an NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
Expand Down
2 changes: 1 addition & 1 deletion pandas/_testing/asserters.py
Original file line number Diff line number Diff line change
Expand Up @@ -803,7 +803,7 @@ def assert_extension_array_equal(
):
return
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
# np.asarray for case where we have an np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8),
np.asarray(right.asi8),
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -1161,7 +1161,7 @@ def _sub_datetimelike_scalar(
raise TypeError(f"cannot subtract a datelike from a {type(self).__name__}")

self = cast("DatetimeArray", self)
# subtract a datetime from myself, yielding a ndarray[timedelta64[ns]]
# subtract a datetime from myself, yielding an ndarray[timedelta64[ns]]

if isna(other):
# i.e. np.datetime64("NaT")
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -1977,7 +1977,7 @@ def repeat(
axis: AxisInt | None = None,
) -> Self:
"""
Repeat elements of a IntervalArray.
Repeat elements of an IntervalArray.
Returns a new IntervalArray where each element of the current IntervalArray
is repeated consecutively a given number of times.
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,7 @@ def trans(x):

def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT:
"""
If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit.
If array is an int/uint/float bit size lower than 64 bit, upcast it to 64 bit.
Parameters
----------
Expand Down Expand Up @@ -1393,7 +1393,7 @@ def construct_1d_arraylike_from_scalar(
value: Scalar, length: int, dtype: DtypeObj | None
) -> ArrayLike:
"""
create a np.ndarray / pandas type of specified shape and dtype
create an np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/dtypes/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,7 +602,7 @@ def is_categorical_dtype(arr_or_dtype) -> bool:

def is_string_or_object_np_dtype(dtype: np.dtype) -> bool:
"""
Faster alternative to is_string_dtype, assumes we have a np.dtype object.
Faster alternative to is_string_dtype, assumes we have an np.dtype object.
"""
return dtype == object or dtype.kind in "SU"

Expand Down
4 changes: 2 additions & 2 deletions pandas/core/dtypes/dtypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@

class PandasExtensionDtype(ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
An np.dtype duck-typed class, suitable for holding a custom dtype.

THIS IS NOT A REAL NUMPY DTYPE
"""
Expand Down Expand Up @@ -503,7 +503,7 @@ def _hash_categories(self) -> int:
# assumes if any individual category is a tuple, then all our. ATM
# I don't really want to support just some of the categories being
# tuples.
cat_list = list(categories) # breaks if a np.array of categories
cat_list = list(categories) # breaks if an np.array of categories
cat_array = hash_tuples(cat_list)
else:
if categories.dtype == "O" and len({type(x) for x in categories}) != 1:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/dtypes/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -593,7 +593,7 @@ def construct_1d_array_from_inferred_fill_value(

def maybe_fill(arr: np.ndarray) -> np.ndarray:
"""
Fill numpy.ndarray with NaN, unless we have a integer or boolean dtype.
Fill numpy.ndarray with NaN, unless we have an integer or boolean dtype.
"""
if arr.dtype.kind not in "iub":
arr.fill(np.nan)
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/groupby/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,7 @@ def _get_indices(self, names):

def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
# in the indices, could be a Timestamp or an np.datetime64
if isinstance(s, datetime.datetime):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1319,7 +1319,7 @@ def _maybe_disallow_fill(self, allow_fill: bool, fill_value, indices) -> bool:

def repeat(self, repeats, axis: None = None) -> Self:
"""
Repeat elements of a Index.
Repeat elements of an Index.
Returns a new Index where each element of the current Index
is repeated consecutively a given number of times.
Expand Down Expand Up @@ -4937,7 +4937,7 @@ def _can_use_libjoin(self) -> bool:
Whether we can use the fastpaths implemented in _libs.join.
This is driven by whether (in monotonic increasing cases that are
guaranteed not to have NAs) we can convert to a np.ndarray without
guaranteed not to have NAs) we can convert to an np.ndarray without
making a copy. If we cannot, this negates the performance benefit
of using libjoin.
"""
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexes/range.py
Original file line number Diff line number Diff line change
Expand Up @@ -921,7 +921,7 @@ def _union(self, other: Index, sort: bool | None):
sort : bool or None, default None
Whether to sort (monotonically increasing) the resulting index.
``sort=None|True`` returns a ``RangeIndex`` if possible or a sorted
``Index`` with a int64 dtype if not.
``Index`` with an int64 dtype if not.
``sort=False`` can return a ``RangeIndex`` if self is monotonically
increasing and other is fully contained in self. Otherwise, returns
an unsorted ``Index`` with an int64 dtype.
Expand Down Expand Up @@ -1184,7 +1184,7 @@ def _concat(self, indexes: list[Index], name: Hashable) -> Index:
Overriding parent method for the case of all RangeIndex instances.

When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Index with a int64 dtype otherwise. E.g.:
RangeIndex if possible, Index with an int64 dtype otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Index([0,1,2,4,5], dtype='int64')
"""
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1480,7 +1480,7 @@ def _get_slice_axis(self, slice_obj: slice, axis: AxisInt):
def _convert_to_indexer(self, key, axis: AxisInt):
"""
Convert indexing key into something we can use to do actual fancy
indexing on a ndarray.
indexing on an ndarray.
Examples
ix[:5] -> slice(0, 5)
Expand Down Expand Up @@ -1991,7 +1991,7 @@ def _setitem_with_indexer_split_path(self, indexer, value, name: str):
lplane_indexer = length_of_indexer(pi, self.obj.index)
# lplane_indexer gives the expected length of obj[indexer[0]]

# we need an iterable, with a ndim of at least 1
# we need an iterable, with an ndim of at least 1
# eg. don't pass through np.array(0)
if is_list_like_indexer(value) and getattr(value, "ndim", 1) > 0:
if isinstance(value, ABCDataFrame):
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/internals/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -1151,7 +1151,7 @@ def putmask(self, mask, new) -> list[Block]:
Parameters
----------
mask : np.ndarray[bool], SparseArray[bool], or BooleanArray
new : a ndarray/object
new : an ndarray/object
Returns
-------
Expand Down Expand Up @@ -1214,7 +1214,7 @@ def where(self, other, cond) -> list[Block]:
Parameters
----------
other : a ndarray/object
other : an ndarray/object
cond : np.ndarray[bool], SparseArray[bool], or BooleanArray
Returns
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/internals/construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ def ndarray_to_mgr(
values, index, columns, dtype: DtypeObj | None, copy: bool
) -> Manager:
# used in DataFrame.__init__
# input must be a ndarray, list, Series, Index, ExtensionArray
# input must be an ndarray, list, Series, Index, ExtensionArray
infer_object = not isinstance(values, (ABCSeries, Index, ExtensionArray))

if isinstance(values, ABCSeries):
Expand Down
4 changes: 2 additions & 2 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -807,7 +807,7 @@ def reindex_indexer(
only_slice : bool, default False
Whether to take views, not copies, along columns.
use_na_proxy : bool, default False
Whether to use a np.void ndarray for newly introduced columns.
Whether to use an np.void ndarray for newly introduced columns.
pandas-indexer with -1's only.
"""
Expand Down Expand Up @@ -883,7 +883,7 @@ def _slice_take_blocks_ax0(
If True, we always return views on existing arrays, never copies.
This is used when called from ops.blockwise.operate_blockwise.
use_na_proxy : bool, default False
Whether to use a np.void ndarray for newly introduced columns.
Whether to use an np.void ndarray for newly introduced columns.
ref_inplace_op: bool, default False
Don't track refs if True because we operate inplace
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -851,7 +851,7 @@ def pad_or_backfill_inplace(
# reshape a 1 dim if needed
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with axis != 0")
raise AssertionError("cannot interpolate on an ndim == 1 with axis != 0")
values = values.reshape(tuple((1,) + values.shape))

method = clean_fill_method(method)
Expand Down
6 changes: 3 additions & 3 deletions pandas/core/ops/mask_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def kleene_or(
return kleene_or(right, left, right_mask, left_mask)

if not isinstance(left, np.ndarray):
raise TypeError("Either `left` or `right` need to be a np.ndarray.")
raise TypeError("Either `left` or `right` need to be an np.ndarray.")

raise_for_nan(right, method="or")

Expand Down Expand Up @@ -113,7 +113,7 @@ def kleene_xor(
return kleene_xor(right, left, right_mask, left_mask)

if not isinstance(left, np.ndarray):
raise TypeError("Either `left` or `right` need to be a np.ndarray.")
raise TypeError("Either `left` or `right` need to be an np.ndarray.")

raise_for_nan(right, method="xor")
if right is libmissing.NA:
Expand Down Expand Up @@ -163,7 +163,7 @@ def kleene_and(
return kleene_and(right, left, right_mask, left_mask)

if not isinstance(left, np.ndarray):
raise TypeError("Either `left` or `right` need to be a np.ndarray.")
raise TypeError("Either `left` or `right` need to be an np.ndarray.")
raise_for_nan(right, method="and")

if right is libmissing.NA:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/series.py
Original file line number Diff line number Diff line change
Expand Up @@ -4293,7 +4293,7 @@ def explode(self, ignore_index: bool = False) -> Series:
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of elements in
result in an np.nan for that row. In addition, the ordering of elements in
the output will be non-deterministic when exploding sets.

Reference :ref:`the user guide <reshaping.explode>` for more examples.
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/util/hashing.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ def hash_array(
dtype=uint64)
"""
if not hasattr(vals, "dtype"):
raise TypeError("must pass a ndarray-like")
raise TypeError("must pass an ndarray-like")

if isinstance(vals, ABCExtensionArray):
return vals._hash_pandas_object(
Expand Down
2 changes: 1 addition & 1 deletion pandas/errors/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -991,7 +991,7 @@ class InvalidColumnName(Warning):

class CategoricalConversionWarning(Warning):
"""
Warning is raised when reading a partial labeled Stata file using a iterator.
Warning is raised when reading a partial labeled Stata file using an iterator.

This warning helps ensure data integrity and alerts users to potential issues
during the incremental reading of Stata files with labeled data, allowing for
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/json/_json.py
Original file line number Diff line number Diff line change
Expand Up @@ -1280,7 +1280,7 @@ def _try_convert_data(
@final
def _try_convert_to_date(self, data: Series) -> Series:
"""
Try to parse a ndarray like into a date column.
Try to parse an ndarray like into a date column.

Try to coerce object in epoch/iso formats and integer/float in epoch
formats.
Expand Down
8 changes: 4 additions & 4 deletions pandas/plotting/_matplotlib/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1126,16 +1126,16 @@ def _parse_errorbars(
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
ndarray: provides an np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame

Asymmetrical error bars are also supported, however raw error values
must be provided in this case. For a ``N`` length :class:`Series`, a
must be provided in this case. For an ``N`` length :class:`Series`, a
``2xN`` array should be provided indicating lower and upper (or left
and right) errors. For a ``MxN`` :class:`DataFrame`, asymmetrical errors
should be in a ``Mx2xN`` array.
and right) errors. For an ``MxN`` :class:`DataFrame`, asymmetrical errors
should be in an ``Mx2xN`` array.
"""
if err is None:
return None, data
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/arithmetic/test_timedelta64.py
Original file line number Diff line number Diff line change
Expand Up @@ -1830,7 +1830,7 @@ def test_td64_div_object_mixed_result(self, box_with_array):
expected = expected.to_numpy()
tm.assert_equal(res, expected)
if box_with_array is DataFrame:
# We have a np.timedelta64(NaT), not pd.NaT
# We have an np.timedelta64(NaT), not pd.NaT
assert isinstance(res.iloc[1, 0], np.timedelta64)

res = tdi // other
Expand All @@ -1841,7 +1841,7 @@ def test_td64_div_object_mixed_result(self, box_with_array):
expected = expected.to_numpy()
tm.assert_equal(res, expected)
if box_with_array is DataFrame:
# We have a np.timedelta64(NaT), not pd.NaT
# We have an np.timedelta64(NaT), not pd.NaT
assert isinstance(res.iloc[1, 0], np.timedelta64)

# ------------------------------------------------------------------
Expand Down
Loading
Loading