The following issues were found
pandas/tests/indexes/multi/test_conversion.py
12 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
Reported by Pylint.
Line: 12
Column: 1
import pandas._testing as tm
def test_to_numpy(idx):
result = idx.to_numpy()
exp = idx.values
tm.assert_numpy_array_equal(result, exp)
Reported by Pylint.
Line: 18
Column: 1
tm.assert_numpy_array_equal(result, exp)
def test_to_frame():
tuples = [(1, "one"), (1, "two"), (2, "one"), (2, "two")]
index = MultiIndex.from_tuples(tuples)
result = index.to_frame(index=False)
expected = DataFrame(tuples)
Reported by Pylint.
Line: 91
Column: 1
tm.assert_frame_equal(result, expected)
def test_to_frame_dtype_fidelity():
# GH 22420
mi = MultiIndex.from_arrays(
[
pd.date_range("19910905", periods=6, tz="US/Eastern"),
[1, 1, 1, 2, 2, 2],
Reported by Pylint.
Line: 93
Column: 5
def test_to_frame_dtype_fidelity():
# GH 22420
mi = MultiIndex.from_arrays(
[
pd.date_range("19910905", periods=6, tz="US/Eastern"),
[1, 1, 1, 2, 2, 2],
pd.Categorical(["a", "a", "b", "b", "c", "c"], ordered=True),
["x", "x", "y", "z", "x", "y"],
Reported by Pylint.
Line: 112
Column: 5
"c": ["x", "x", "y", "z", "x", "y"],
}
)
df = mi.to_frame(index=False)
df_dtypes = df.dtypes.to_dict()
tm.assert_frame_equal(df, expected_df)
assert original_dtypes == df_dtypes
Reported by Pylint.
Line: 116
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
df_dtypes = df.dtypes.to_dict()
tm.assert_frame_equal(df, expected_df)
assert original_dtypes == df_dtypes
def test_to_frame_resulting_column_order():
# GH 22420
expected = ["z", 0, "a"]
Reported by Bandit.
Line: 119
Column: 1
assert original_dtypes == df_dtypes
def test_to_frame_resulting_column_order():
# GH 22420
expected = ["z", 0, "a"]
mi = MultiIndex.from_arrays(
[["a", "b", "c"], ["x", "y", "z"], ["q", "w", "e"]], names=expected
)
Reported by Pylint.
Line: 122
Column: 5
def test_to_frame_resulting_column_order():
# GH 22420
expected = ["z", 0, "a"]
mi = MultiIndex.from_arrays(
[["a", "b", "c"], ["x", "y", "z"], ["q", "w", "e"]], names=expected
)
result = mi.to_frame().columns.tolist()
assert result == expected
Reported by Pylint.
pandas/core/internals/ops.py
12 issues
Line: 33
Column: 17
left_ea = blk_vals.ndim == 1
rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
# Assertions are disabled for performance, but should hold:
# if left_ea:
# assert len(locs) == 1, locs
# assert len(rblks) == 1, rblks
Reported by Pylint.
Line: 60
Column: 15
res_values = array_op(lvals, rvals)
if left_ea and not right_ea and hasattr(res_values, "reshape"):
res_values = res_values.reshape(1, -1)
nbs = rblk._split_op_result(res_values)
# Assertions are disabled for performance, but should hold:
# if right_ea or left_ea:
# assert len(nbs) == 1
# else:
Reported by Pylint.
Line: 107
Column: 3
# Require that the indexing into lvals be slice-like
assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs
# TODO(EA2D): with 2D EAs only this first clause would be needed
if not (left_ea or right_ea):
# error: Invalid index type "Tuple[Any, slice]" for "Union[ndarray,
# ExtensionArray]"; expected type "Union[int, slice, ndarray]"
lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[index]
assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
Reported by Pylint.
Line: 1
Column: 1
from __future__ import annotations
from collections import namedtuple
from typing import (
TYPE_CHECKING,
Iterator,
)
from pandas._typing import ArrayLike
Reported by Pylint.
Line: 49
Column: 1
yield info
def operate_blockwise(
left: BlockManager, right: BlockManager, array_op
) -> BlockManager:
# At this point we have already checked the parent DataFrames for
# assert rframe._indexed_same(lframe)
Reported by Pylint.
Line: 87
Column: 9
"""
Reset mgr_locs to correspond to our original DataFrame.
"""
for nb in nbs:
nblocs = locs[nb.mgr_locs.indexer]
nb.mgr_locs = nblocs
# Assertions are disabled for performance, but should hold:
# assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape)
# assert all(x in locs.as_array for x in nb.mgr_locs.as_array)
Reported by Pylint.
Line: 105
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
rvals = rblk.values
# Require that the indexing into lvals be slice-like
assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs
# TODO(EA2D): with 2D EAs only this first clause would be needed
if not (left_ea or right_ea):
# error: Invalid index type "Tuple[Any, slice]" for "Union[ndarray,
# ExtensionArray]"; expected type "Union[int, slice, ndarray]"
Reported by Bandit.
Line: 112
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# error: Invalid index type "Tuple[Any, slice]" for "Union[ndarray,
# ExtensionArray]"; expected type "Union[int, slice, ndarray]"
lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[index]
assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
elif left_ea and right_ea:
assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
elif right_ea:
# lvals are 2D, rvals are 1D
Reported by Bandit.
Line: 114
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[index]
assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
elif left_ea and right_ea:
assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
elif right_ea:
# lvals are 2D, rvals are 1D
# error: Invalid index type "Tuple[Any, slice]" for "Union[ndarray,
# ExtensionArray]"; expected type "Union[int, slice, ndarray]"
Reported by Bandit.
Line: 121
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# error: Invalid index type "Tuple[Any, slice]" for "Union[ndarray,
# ExtensionArray]"; expected type "Union[int, slice, ndarray]"
lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[index]
assert lvals.shape[0] == 1, lvals.shape
# error: Invalid index type "Tuple[int, slice]" for "Union[Any,
# ExtensionArray]"; expected type "Union[int, slice, ndarray]"
lvals = lvals[0, :] # type: ignore[index]
else:
# lvals are 1D, rvals are 2D
Reported by Bandit.
pandas/tests/dtypes/cast/test_construct_from_scalar.py
12 issues
Line: 2
Column: 1
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import (
Categorical,
Timedelta,
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas import (
Categorical,
Timedelta,
Reported by Pylint.
Line: 15
Column: 1
import pandas._testing as tm
def test_cast_1d_array_like_from_scalar_categorical():
# see gh-19565
#
# Categorical result from scalar did not maintain
# categories and ordering of the passed dtype.
cats = ["a", "b", "c"]
Reported by Pylint.
Line: 28
Column: 1
tm.assert_categorical_equal(result, expected)
def test_cast_1d_array_like_from_timestamp():
# check we dont lose nanoseconds
ts = Timestamp.now() + Timedelta(1)
res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]"))
assert res[0] == ts
Reported by Pylint.
Line: 30
Column: 5
def test_cast_1d_array_like_from_timestamp():
# check we dont lose nanoseconds
ts = Timestamp.now() + Timedelta(1)
res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]"))
assert res[0] == ts
def test_cast_1d_array_like_from_timedelta():
Reported by Pylint.
Line: 32
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# check we dont lose nanoseconds
ts = Timestamp.now() + Timedelta(1)
res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]"))
assert res[0] == ts
def test_cast_1d_array_like_from_timedelta():
# check we dont lose nanoseconds
td = Timedelta(1)
Reported by Bandit.
Line: 35
Column: 1
assert res[0] == ts
def test_cast_1d_array_like_from_timedelta():
# check we dont lose nanoseconds
td = Timedelta(1)
res = construct_1d_arraylike_from_scalar(td, 2, np.dtype("m8[ns]"))
assert res[0] == td
Reported by Pylint.
Line: 37
Column: 5
def test_cast_1d_array_like_from_timedelta():
# check we dont lose nanoseconds
td = Timedelta(1)
res = construct_1d_arraylike_from_scalar(td, 2, np.dtype("m8[ns]"))
assert res[0] == td
def test_cast_1d_array_like_mismatched_datetimelike():
Reported by Pylint.
Line: 39
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# check we dont lose nanoseconds
td = Timedelta(1)
res = construct_1d_arraylike_from_scalar(td, 2, np.dtype("m8[ns]"))
assert res[0] == td
def test_cast_1d_array_like_mismatched_datetimelike():
td = np.timedelta64("NaT", "ns")
dt = np.datetime64("NaT", "ns")
Reported by Bandit.
Line: 42
Column: 1
assert res[0] == td
def test_cast_1d_array_like_mismatched_datetimelike():
td = np.timedelta64("NaT", "ns")
dt = np.datetime64("NaT", "ns")
with pytest.raises(TypeError, match="Cannot cast"):
construct_1d_arraylike_from_scalar(td, 2, dt.dtype)
Reported by Pylint.
pandas/tests/frame/common.py
12 issues
Line: 1
Column: 1
from __future__ import annotations
from pandas import (
DataFrame,
concat,
)
def _check_mixed_float(df, dtype=None):
Reported by Pylint.
Line: 9
Column: 1
)
def _check_mixed_float(df, dtype=None):
# float16 are most likely to be upcasted to float32
dtypes = {"A": "float32", "B": "float32", "C": "float16", "D": "float64"}
if isinstance(dtype, str):
dtypes = {k: dtype for k, v in dtypes.items()}
elif isinstance(dtype, dict):
Reported by Pylint.
Line: 17
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get("A"):
assert df.dtypes["A"] == dtypes["A"]
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
Reported by Bandit.
Line: 19
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if dtypes.get("A"):
assert df.dtypes["A"] == dtypes["A"]
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
assert df.dtypes["D"] == dtypes["D"]
Reported by Bandit.
Line: 21
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
assert df.dtypes["D"] == dtypes["D"]
def _check_mixed_int(df, dtype=None):
Reported by Bandit.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
assert df.dtypes["D"] == dtypes["D"]
def _check_mixed_int(df, dtype=None):
dtypes = {"A": "int32", "B": "uint64", "C": "uint8", "D": "int64"}
if isinstance(dtype, str):
Reported by Bandit.
Line: 26
Column: 1
assert df.dtypes["D"] == dtypes["D"]
def _check_mixed_int(df, dtype=None):
dtypes = {"A": "int32", "B": "uint64", "C": "uint8", "D": "int64"}
if isinstance(dtype, str):
dtypes = {k: dtype for k, v in dtypes.items()}
elif isinstance(dtype, dict):
dtypes.update(dtype)
Reported by Pylint.
Line: 33
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
elif isinstance(dtype, dict):
dtypes.update(dtype)
if dtypes.get("A"):
assert df.dtypes["A"] == dtypes["A"]
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
Reported by Bandit.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if dtypes.get("A"):
assert df.dtypes["A"] == dtypes["A"]
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
assert df.dtypes["D"] == dtypes["D"]
Reported by Bandit.
Line: 37
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if dtypes.get("B"):
assert df.dtypes["B"] == dtypes["B"]
if dtypes.get("C"):
assert df.dtypes["C"] == dtypes["C"]
if dtypes.get("D"):
assert df.dtypes["D"] == dtypes["D"]
def zip_frames(frames: list[DataFrame], axis: int = 1) -> DataFrame:
Reported by Bandit.
pandas/core/array_algos/masked_reductions.py
12 issues
Line: 10
Column: 1
import numpy as np
from pandas._libs import missing as libmissing
from pandas.core.nanops import check_below_min_count
def _sumprod(
Reported by Pylint.
Line: 51
Column: 1
return func(values, where=~mask)
def sum(
values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
):
return _sumprod(
np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count
)
Reported by Pylint.
Line: 99
Column: 1
return libmissing.NA
def min(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
return _minmax(np.min, values=values, mask=mask, skipna=skipna)
def max(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
return _minmax(np.max, values=values, mask=mask, skipna=skipna)
Reported by Pylint.
Line: 103
Column: 1
return _minmax(np.min, values=values, mask=mask, skipna=skipna)
def max(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
return _minmax(np.max, values=values, mask=mask, skipna=skipna)
def mean(values: np.ndarray, mask: np.ndarray, skipna: bool = True):
if not values.size or mask.all():
Reported by Pylint.
Line: 41
Column: 9
``min_count`` non-NA values are present the result will be NA.
"""
if not skipna:
if mask.any() or check_below_min_count(values.shape, None, min_count):
return libmissing.NA
else:
return func(values)
else:
if check_below_min_count(values.shape, mask, min_count):
Reported by Pylint.
Line: 51
Column: 1
return func(values, where=~mask)
def sum(
values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
):
return _sumprod(
np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count
)
Reported by Pylint.
Line: 59
Column: 1
)
def prod(
values: np.ndarray, mask: np.ndarray, *, skipna: bool = True, min_count: int = 0
):
return _sumprod(
np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count
)
Reported by Pylint.
Line: 85
Column: 9
Whether to skip NA.
"""
if not skipna:
if mask.any() or not values.size:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
else:
return func(values)
else:
Reported by Pylint.
Line: 92
Column: 9
return func(values)
else:
subset = values[~mask]
if subset.size:
return func(subset)
else:
# min/max with empty array raise in numpy, pandas returns NA
return libmissing.NA
Reported by Pylint.
Line: 99
Column: 1
return libmissing.NA
def min(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
return _minmax(np.min, values=values, mask=mask, skipna=skipna)
def max(values: np.ndarray, mask: np.ndarray, *, skipna: bool = True):
return _minmax(np.max, values=values, mask=mask, skipna=skipna)
Reported by Pylint.
pandas/tests/arrays/floating/test_to_numpy.py
12 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
Reported by Pylint.
Line: 113
Column: 31
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy_string(box, dtype):
con = pd.Series if box else pd.array
arr = con([0.0, 1.0, None], dtype="Float64")
result = arr.to_numpy(dtype="str")
expected = np.array([0.0, 1.0, pd.NA], dtype="<U32")
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
Reported by Pylint.
Line: 10
Column: 1
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy(box):
con = pd.Series if box else pd.array
# default (with or without missing values) -> object dtype
arr = con([0.1, 0.2, 0.3], dtype="Float64")
result = arr.to_numpy()
Reported by Pylint.
Line: 26
Column: 1
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy_float(box):
con = pd.Series if box else pd.array
# no missing values -> can convert to float, otherwise raises
arr = con([0.1, 0.2, 0.3], dtype="Float64")
result = arr.to_numpy(dtype="float64")
Reported by Pylint.
Line: 46
Column: 1
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy_int(box):
con = pd.Series if box else pd.array
# no missing values -> can convert to int, otherwise raises
arr = con([1.0, 2.0, 3.0], dtype="Float64")
result = arr.to_numpy(dtype="int64")
Reported by Pylint.
Line: 67
Column: 1
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy_na_value(box):
con = pd.Series if box else pd.array
arr = con([0.0, 1.0, None], dtype="Float64")
result = arr.to_numpy(dtype=object, na_value=None)
expected = np.array([0.0, 1.0, None], dtype="object")
Reported by Pylint.
Line: 84
Column: 1
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_na_value_with_nan():
# array with both NaN and NA -> only fill NA with `na_value`
arr = FloatingArray(np.array([0.0, np.nan, 0.0]), np.array([False, False, True]))
result = arr.to_numpy(dtype="float64", na_value=-1)
expected = np.array([0.0, np.nan, -1.0], dtype="float64")
tm.assert_numpy_array_equal(result, expected)
Reported by Pylint.
Line: 94
Column: 1
@pytest.mark.parametrize("dtype", ["float64", "float32", "int32", "int64", "bool"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy_dtype(box, dtype):
con = pd.Series if box else pd.array
arr = con([0.0, 1.0], dtype="Float64")
result = arr.to_numpy(dtype=dtype)
expected = np.array([0, 1], dtype=dtype)
Reported by Pylint.
Line: 105
Column: 1
@pytest.mark.parametrize("dtype", ["float64", "float32", "int32", "int64", "bool"])
@pytest.mark.parametrize("box", [True, False], ids=["series", "array"])
def test_to_numpy_na_raises(box, dtype):
con = pd.Series if box else pd.array
arr = con([0.0, 1.0, None], dtype="Float64")
with pytest.raises(ValueError, match=dtype):
arr.to_numpy(dtype=dtype)
Reported by Pylint.
pandas/tests/groupby/test_index_as_string.py
12 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.fixture(params=[["inner"], ["inner", "outer"]])
def frame(request):
Reported by Pylint.
Line: 49
Column: 40
(["inner", "B"], [pd.Grouper(level="inner"), "B"]), # Index and column
],
)
def test_grouper_index_level_as_string(frame, key_strs, groupers):
result = frame.groupby(key_strs).mean()
expected = frame.groupby(groupers).mean()
tm.assert_frame_equal(result, expected)
Reported by Pylint.
Line: 70
Column: 47
["B", "outer", "inner"],
],
)
def test_grouper_index_level_as_string_series(series, levels):
# Compute expected result
if isinstance(levels, list):
groupers = [pd.Grouper(level=lv) for lv in levels]
else:
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.fixture(params=[["inner"], ["inner", "outer"]])
def frame(request):
Reported by Pylint.
Line: 9
Column: 1
@pytest.fixture(params=[["inner"], ["inner", "outer"]])
def frame(request):
levels = request.param
df = pd.DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 3, 1, 2, 3],
Reported by Pylint.
Line: 11
Column: 5
@pytest.fixture(params=[["inner"], ["inner", "outer"]])
def frame(request):
levels = request.param
df = pd.DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 3, 1, 2, 3],
"A": np.arange(6),
"B": ["one", "one", "two", "two", "one", "one"],
Reported by Pylint.
Line: 20
Column: 9
}
)
if levels:
df = df.set_index(levels)
return df
@pytest.fixture()
Reported by Pylint.
Line: 26
Column: 1
@pytest.fixture()
def series():
df = pd.DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 3, 1, 2, 3],
"A": np.arange(6),
Reported by Pylint.
Line: 27
Column: 5
@pytest.fixture()
def series():
df = pd.DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 3, 1, 2, 3],
"A": np.arange(6),
"B": ["one", "one", "two", "two", "one", "one"],
Reported by Pylint.
Line: 35
Column: 5
"B": ["one", "one", "two", "two", "one", "one"],
}
)
s = df.set_index(["outer", "inner", "B"])["A"]
return s
@pytest.mark.parametrize(
Reported by Pylint.
asv_bench/benchmarks/pandas_vb_common.py
12 issues
Line: 6
Column: 1
import numpy as np
import pandas as pd
# Compatibility import for lib
for imp in ["pandas._libs.lib", "pandas.lib"]:
try:
lib = import_module(imp)
Reported by Pylint.
Line: 18
Column: 5
# Compatibility import for the testing module
try:
import pandas._testing as tm
except ImportError:
import pandas.util.testing as tm # noqa
numeric_dtypes = [
Reported by Pylint.
Line: 20
Column: 5
try:
import pandas._testing as tm
except ImportError:
import pandas.util.testing as tm # noqa
numeric_dtypes = [
np.int64,
np.int32,
Reported by Pylint.
Line: 56
Column: 1
extension_dtypes = []
def setup(*args, **kwargs):
# This function just needs to be imported into each benchmark file to
# set up the random seed before each function.
# https://asv.readthedocs.io/en/latest/writing_benchmarks.html
np.random.seed(1234)
Reported by Pylint.
Line: 56
Column: 1
extension_dtypes = []
def setup(*args, **kwargs):
# This function just needs to be imported into each benchmark file to
# set up the random seed before each function.
# https://asv.readthedocs.io/en/latest/writing_benchmarks.html
np.random.seed(1234)
Reported by Pylint.
Line: 79
Column: 1
# causes an exception to be raised
pass
def teardown(self, *args, **kwargs):
self.remove(self.fname)
Reported by Pylint.
Line: 79
Column: 1
# causes an exception to be raised
pass
def teardown(self, *args, **kwargs):
self.remove(self.fname)
Reported by Pylint.
Line: 1
Column: 1
from importlib import import_module
import os
import numpy as np
import pandas as pd
# Compatibility import for lib
for imp in ["pandas._libs.lib", "pandas.lib"]:
Reported by Pylint.
Line: 56
Column: 1
extension_dtypes = []
def setup(*args, **kwargs):
# This function just needs to be imported into each benchmark file to
# set up the random seed before each function.
# https://asv.readthedocs.io/en/latest/writing_benchmarks.html
np.random.seed(1234)
Reported by Pylint.
Line: 70
Column: 5
fname = None
def remove(self, f):
"""Remove created files"""
try:
os.remove(f) # noqa: PDF008
except OSError:
# On Windows, attempting to remove a file that is in use
Reported by Pylint.
pandas/core/roperator.py
12 issues
Line: 8
Column: 1
import operator
def radd(left, right):
return right + left
def rsub(left, right):
return right - left
Reported by Pylint.
Line: 12
Column: 1
return right + left
def rsub(left, right):
return right - left
def rmul(left, right):
return right * left
Reported by Pylint.
Line: 16
Column: 1
return right - left
def rmul(left, right):
return right * left
def rdiv(left, right):
return right / left
Reported by Pylint.
Line: 20
Column: 1
return right * left
def rdiv(left, right):
return right / left
def rtruediv(left, right):
return right / left
Reported by Pylint.
Line: 24
Column: 1
return right / left
def rtruediv(left, right):
return right / left
def rfloordiv(left, right):
return right // left
Reported by Pylint.
Line: 28
Column: 1
return right / left
def rfloordiv(left, right):
return right // left
def rmod(left, right):
# check if right is a string as % is the string
Reported by Pylint.
Line: 32
Column: 1
return right // left
def rmod(left, right):
# check if right is a string as % is the string
# formatting operation; this is a TypeError
# otherwise perform the op
if isinstance(right, str):
typ = type(left).__name__
Reported by Pylint.
Line: 43
Column: 1
return right % left
def rdivmod(left, right):
return divmod(right, left)
def rpow(left, right):
return right ** left
Reported by Pylint.
Line: 47
Column: 1
return divmod(right, left)
def rpow(left, right):
return right ** left
def rand_(left, right):
return operator.and_(right, left)
Reported by Pylint.
Line: 51
Column: 1
return right ** left
def rand_(left, right):
return operator.and_(right, left)
def ror_(left, right):
return operator.or_(right, left)
Reported by Pylint.
pandas/core/tools/numeric.py
12 issues
Line: 5
Column: 1
import numpy as np
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_downcast_numeric
from pandas.core.dtypes.common import (
ensure_object,
is_datetime_or_timedelta_dtype,
Reported by Pylint.
Line: 223
Column: 14
# GH33013: for IntegerArray & FloatingArray need to reconstruct masked array
if mask is not None:
data = np.zeros(mask.shape, dtype=values.dtype)
data[~mask] = values
from pandas.core.arrays import (
FloatingArray,
IntegerArray,
)
Reported by Pylint.
Line: 171
Column: 16
# save mask to reconstruct the full array after casting
mask: np.ndarray | None = None
if isinstance(values, NumericArray):
mask = values._mask
values = values._data[~mask]
values_dtype = getattr(values, "dtype", None)
if is_numeric_dtype(values_dtype):
pass
Reported by Pylint.
Line: 172
Column: 18
mask: np.ndarray | None = None
if isinstance(values, NumericArray):
mask = values._mask
values = values._data[~mask]
values_dtype = getattr(values, "dtype", None)
if is_numeric_dtype(values_dtype):
pass
elif is_datetime_or_timedelta_dtype(values_dtype):
Reported by Pylint.
Line: 234
Column: 16
values = klass(data, mask.copy())
if is_series:
return arg._constructor(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy
return pd.Index(values, name=arg.name)
elif is_scalars:
Reported by Pylint.
Line: 1
Column: 1
from __future__ import annotations
import numpy as np
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_downcast_numeric
from pandas.core.dtypes.common import (
ensure_object,
Reported by Pylint.
Line: 27
Column: 1
from pandas.core.arrays.numeric import NumericArray
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
Reported by Pylint.
Line: 27
Column: 1
from pandas.core.arrays.numeric import NumericArray
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
Reported by Pylint.
Line: 27
Column: 1
from pandas.core.arrays.numeric import NumericArray
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
Reported by Pylint.
Line: 197
Column: 42
if downcast in ("integer", "signed"):
typecodes = np.typecodes["Integer"]
elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):
typecodes = np.typecodes["UnsignedInteger"]
elif downcast == "float":
typecodes = np.typecodes["Float"]
# pandas support goes only to np.float32,
Reported by Pylint.