The following issues were found
pandas/core/arrays/base.py
37 issues
Line: 25
Column: 1
import numpy as np
from pandas._libs import lib
from pandas._typing import (
ArrayLike,
Dtype,
FillnaOptions,
PositionalIndexer,
Reported by Pylint.
Line: 77
Column: 5
if TYPE_CHECKING:
class ExtensionArraySupportsAnyAll("ExtensionArray"):
def any(self, *, skipna: bool = True) -> bool:
pass
def all(self, *, skipna: bool = True) -> bool:
pass
Reported by Pylint.
Line: 544
Column: 3
else:
return self.copy()
# FIXME: Really hard-code here?
if isinstance(dtype, StringDtype):
# allow conversion to StringArrays
return dtype.construct_array_type()._from_sequence(self, copy=False)
return np.array(self, dtype=dtype, copy=copy)
Reported by Pylint.
Line: 547
Column: 20
# FIXME: Really hard-code here?
if isinstance(dtype, StringDtype):
# allow conversion to StringArrays
return dtype.construct_array_type()._from_sequence(self, copy=False)
return np.array(self, dtype=dtype, copy=copy)
def isna(self) -> np.ndarray | ExtensionArraySupportsAnyAll:
"""
Reported by Pylint.
Line: 590
Column: 5
# Note: this is used in `ExtensionArray.argsort`.
return np.array(self)
def argsort(
self,
ascending: bool = True,
kind: str = "quicksort",
na_position: str = "last",
*args,
Reported by Pylint.
Line: 1210
Column: 1
# Reshaping
# ------------------------------------------------------------------------
def transpose(self, *axes: int) -> ExtensionArray:
"""
Return a transposed view on this array.
Because ExtensionArrays are always 1D, this is a no-op. It is included
for compatibility with np.ndarray.
Reported by Pylint.
Line: 1223
Column: 21
def T(self) -> ExtensionArray:
return self.transpose()
def ravel(self, order: Literal["C", "F", "A", "K"] | None = "C") -> ExtensionArray:
"""
Return a flattened view on this array.
Parameters
----------
Reported by Pylint.
Line: 1271
Column: 16
# of objects
@cache_readonly
def _can_hold_na(self) -> bool:
return self.dtype._can_hold_na
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
"""
Return a scalar result of performing the reduction operation.
Reported by Pylint.
Line: 1
Column: 1
"""
An interface for extending pandas with custom arrays.
.. warning::
This is an experimental API and subject to breaking changes
without warning.
"""
from __future__ import annotations
Reported by Pylint.
Line: 77
Column: 5
if TYPE_CHECKING:
class ExtensionArraySupportsAnyAll("ExtensionArray"):
def any(self, *, skipna: bool = True) -> bool:
pass
def all(self, *, skipna: bool = True) -> bool:
pass
Reported by Pylint.
pandas/tests/indexes/datetimelike.py
37 issues
Line: 4
Column: 1
""" generic datetimelike tests """
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.indexes.common import Base
Reported by Pylint.
Line: 56
Column: 18
idx = simple_index
idx_view = idx.view("i8")
result = self._index_cls(idx)
tm.assert_index_equal(result, idx)
idx_view = idx.view(self._index_cls)
result = self._index_cls(idx)
tm.assert_index_equal(result, idx_view)
Reported by Pylint.
Line: 59
Column: 29
result = self._index_cls(idx)
tm.assert_index_equal(result, idx)
idx_view = idx.view(self._index_cls)
result = self._index_cls(idx)
tm.assert_index_equal(result, idx_view)
def test_map_callable(self, simple_index):
index = simple_index
Reported by Pylint.
Line: 60
Column: 18
tm.assert_index_equal(result, idx)
idx_view = idx.view(self._index_cls)
result = self._index_cls(idx)
tm.assert_index_equal(result, idx_view)
def test_map_callable(self, simple_index):
index = simple_index
expected = index + index.freq
Reported by Pylint.
Line: 11
Column: 1
from pandas.tests.indexes.common import Base
class DatetimeLike(Base):
def test_argsort_matches_array(self, simple_index):
idx = simple_index
idx = idx.insert(1, pd.NaT)
result = idx.argsort()
Reported by Pylint.
Line: 11
Column: 1
from pandas.tests.indexes.common import Base
class DatetimeLike(Base):
def test_argsort_matches_array(self, simple_index):
idx = simple_index
idx = idx.insert(1, pd.NaT)
result = idx.argsort()
Reported by Pylint.
Line: 17
Column: 20
idx = idx.insert(1, pd.NaT)
result = idx.argsort()
expected = idx._data.argsort()
tm.assert_numpy_array_equal(result, expected)
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
Reported by Pylint.
Line: 23
Column: 16
def test_can_hold_identifiers(self, simple_index):
idx = simple_index
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is False
def test_shift_identity(self, simple_index):
idx = simple_index
tm.assert_index_equal(idx, idx.shift(0))
Reported by Pylint.
Line: 87
Column: 24
# don't compare the freqs
if isinstance(expected, (pd.DatetimeIndex, pd.TimedeltaIndex)):
expected = expected._with_freq(None)
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
expected = pd.Index([pd.NaT] + index[1:].tolist())
Reported by Pylint.
Line: 11
Column: 1
from pandas.tests.indexes.common import Base
class DatetimeLike(Base):
def test_argsort_matches_array(self, simple_index):
idx = simple_index
idx = idx.insert(1, pd.NaT)
result = idx.argsort()
Reported by Pylint.
pandas/tests/arrays/masked/test_arithmetic.py
37 issues
Line: 6
Column: 1
from typing import Any
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import ExtensionArray
Reported by Pylint.
Line: 28
Column: 16
return request.param
def check_skip(data, op_name):
if isinstance(data.dtype, pd.BooleanDtype) and "sub" in op_name:
pytest.skip("subtract not implemented for boolean")
# Test equivalence of scalars, numpy arrays with array ops
Reported by Pylint.
Line: 37
Column: 40
# -----------------------------------------------------------------------------
def test_array_scalar_like_equivalence(data, all_arithmetic_operators):
data, scalar = data
op = tm.get_op_from_name(all_arithmetic_operators)
check_skip(data, all_arithmetic_operators)
scalar_array = pd.array([scalar] * len(data), dtype=data.dtype)
Reported by Pylint.
Line: 44
Column: 3
scalar_array = pd.array([scalar] * len(data), dtype=data.dtype)
# TODO also add len-1 array (np.array([scalar], dtype=data.dtype.numpy_dtype))
for scalar in [scalar, data.dtype.type(scalar)]:
result = op(data, scalar)
expected = op(data, scalar_array)
tm.assert_extension_array_equal(result, expected)
Reported by Pylint.
Line: 51
Column: 19
tm.assert_extension_array_equal(result, expected)
def test_array_NA(data, all_arithmetic_operators):
if "truediv" in all_arithmetic_operators:
pytest.skip("division with pd.NA raises")
data, _ = data
op = tm.get_op_from_name(all_arithmetic_operators)
check_skip(data, all_arithmetic_operators)
Reported by Pylint.
Line: 66
Column: 34
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_equivalence(data, all_arithmetic_operators):
data, scalar = data
op = tm.get_op_from_name(all_arithmetic_operators)
check_skip(data, all_arithmetic_operators)
numpy_array = np.array([scalar] * len(data), dtype=data.dtype.numpy_dtype)
Reported by Pylint.
Line: 79
Column: 3
if isinstance(expected, ExtensionArray):
tm.assert_extension_array_equal(result, expected)
else:
# TODO div still gives float ndarray -> remove this once we have Float EA
tm.assert_numpy_array_equal(result, expected)
# Test equivalence with Series and DataFrame ops
# -----------------------------------------------------------------------------
Reported by Pylint.
Line: 87
Column: 16
# -----------------------------------------------------------------------------
def test_frame(data, all_arithmetic_operators):
data, scalar = data
op = tm.get_op_from_name(all_arithmetic_operators)
check_skip(data, all_arithmetic_operators)
# DataFrame with scalar
Reported by Pylint.
Line: 100
Column: 17
tm.assert_frame_equal(result, expected)
def test_series(data, all_arithmetic_operators):
data, scalar = data
op = tm.get_op_from_name(all_arithmetic_operators)
check_skip(data, all_arithmetic_operators)
s = pd.Series(data)
Reported by Pylint.
Line: 135
Column: 31
# -----------------------------------------------------------------------------
def test_error_invalid_object(data, all_arithmetic_operators):
data, _ = data
op = all_arithmetic_operators
opa = getattr(data, op)
Reported by Pylint.
pandas/tests/util/test_assert_frame_equal.py
37 issues
Line: 1
Column: 1
import pytest
import pandas as pd
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture(params=[True, False])
def by_blocks_fixture(request):
Reported by Pylint.
Line: 71
Column: 5
The arguments passed to `tm.assert_frame_equal`.
"""
_assert_not_frame_equal(a, b, **kwargs)
_assert_not_frame_equal(b, a, **kwargs)
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_row_order_mismatch(check_like, obj_fixture):
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
Reported by Pylint.
Line: 75
Column: 53
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_row_order_mismatch(check_like, obj_fixture):
df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])
df2 = DataFrame({"A": [3, 2, 1], "B": [6, 5, 4]}, index=["c", "b", "a"])
if not check_like: # Do not ignore row-column orderings.
msg = f"{obj_fixture}.index are different"
Reported by Pylint.
Line: 94
Column: 47
(DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), DataFrame({"A": [1, 2, 3]})),
],
)
def test_frame_equal_shape_mismatch(df1, df2, obj_fixture):
msg = f"{obj_fixture} are different"
with pytest.raises(AssertionError, match=msg):
tm.assert_frame_equal(df1, df2, obj=obj_fixture)
Reported by Pylint.
Line: 149
Column: 49
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_index_mismatch(check_like, obj_fixture):
msg = f"""{obj_fixture}\\.index are different
{obj_fixture}\\.index values are different \\(33\\.33333 %\\)
\\[left\\]: Index\\(\\['a', 'b', 'c'\\], dtype='object'\\)
\\[right\\]: Index\\(\\['a', 'b', 'd'\\], dtype='object'\\)"""
Reported by Pylint.
Line: 164
Column: 51
@pytest.mark.parametrize("check_like", [True, False])
def test_frame_equal_columns_mismatch(check_like, obj_fixture):
msg = f"""{obj_fixture}\\.columns are different
{obj_fixture}\\.columns values are different \\(50\\.0 %\\)
\\[left\\]: Index\\(\\['A', 'B'\\], dtype='object'\\)
\\[right\\]: Index\\(\\['A', 'b'\\], dtype='object'\\)"""
Reported by Pylint.
Line: 178
Column: 56
tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
def test_frame_equal_block_mismatch(by_blocks_fixture, obj_fixture):
obj = obj_fixture
msg = f"""{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) are different
{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) values are different \\(33\\.33333 %\\)
\\[index\\]: \\[0, 1, 2\\]
Reported by Pylint.
Line: 178
Column: 37
tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)
def test_frame_equal_block_mismatch(by_blocks_fixture, obj_fixture):
obj = obj_fixture
msg = f"""{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) are different
{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) values are different \\(33\\.33333 %\\)
\\[index\\]: \\[0, 1, 2\\]
Reported by Pylint.
Line: 219
Column: 64
),
],
)
def test_frame_equal_unicode(df1, df2, msg, by_blocks_fixture, obj_fixture):
# see gh-20503
#
# Test ensures that `tm.assert_frame_equals` raises the right exception
# when comparing DataFrames containing differing unicode objects.
msg = msg.format(obj=obj_fixture)
Reported by Pylint.
Line: 219
Column: 45
),
],
)
def test_frame_equal_unicode(df1, df2, msg, by_blocks_fixture, obj_fixture):
# see gh-20503
#
# Test ensures that `tm.assert_frame_equals` raises the right exception
# when comparing DataFrames containing differing unicode objects.
msg = msg.format(obj=obj_fixture)
Reported by Pylint.
pandas/tseries/holiday.py
37 issues
Line: 9
Column: 1
)
import warnings
from dateutil.relativedelta import ( # noqa
FR,
MO,
SA,
SU,
TH,
Reported by Pylint.
Line: 9
Column: 1
)
import warnings
from dateutil.relativedelta import ( # noqa
FR,
MO,
SA,
SU,
TH,
Reported by Pylint.
Line: 9
Column: 1
)
import warnings
from dateutil.relativedelta import ( # noqa
FR,
MO,
SA,
SU,
TH,
Reported by Pylint.
Line: 9
Column: 1
)
import warnings
from dateutil.relativedelta import ( # noqa
FR,
MO,
SA,
SU,
TH,
Reported by Pylint.
Line: 9
Column: 1
)
import warnings
from dateutil.relativedelta import ( # noqa
FR,
MO,
SA,
SU,
TH,
Reported by Pylint.
Line: 240
Column: 9
if self.observance is not None:
info += f"observance={self.observance}"
repr = f"Holiday: {self.name} ({info})"
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Reported by Pylint.
Line: 337
Column: 30
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
Reported by Pylint.
Line: 1
Column: 1
from __future__ import annotations
from datetime import (
datetime,
timedelta,
)
import warnings
from dateutil.relativedelta import ( # noqa
Reported by Pylint.
Line: 37
Column: 1
)
def next_monday(dt: datetime) -> datetime:
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
Reported by Pylint.
Line: 42
Column: 5
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
Reported by Pylint.
pandas/tests/arrays/categorical/test_missing.py
37 issues
Line: 4
Column: 1
import collections
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Reported by Pylint.
Line: 44
Column: 37
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
Reported by Pylint.
Line: 47
Column: 37
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
c[1] = np.nan
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
Reported by Pylint.
Line: 53
Column: 37
# category!
c = Categorical(["a", "b", np.nan, "a"])
tm.assert_index_equal(c.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8))
def test_set_dtype_nans(self):
c = Categorical(["a", "b", np.nan])
result = c._set_dtype(CategoricalDtype(["a", "c"]))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8"))
Reported by Pylint.
Line: 57
Column: 18
def test_set_dtype_nans(self):
c = Categorical(["a", "b", np.nan])
result = c._set_dtype(CategoricalDtype(["a", "c"]))
tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8"))
def test_set_item_nan(self):
cat = Categorical([1, 2, 3])
cat[1] = np.nan
Reported by Pylint.
Line: 1
Column: 1
import collections
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Reported by Pylint.
Line: 19
Column: 1
import pandas._testing as tm
class TestCategoricalMissing:
def test_isna(self):
exp = np.array([False, False, True])
cat = Categorical(["a", "b", np.nan])
res = cat.isna()
Reported by Pylint.
Line: 20
Column: 5
class TestCategoricalMissing:
def test_isna(self):
exp = np.array([False, False, True])
cat = Categorical(["a", "b", np.nan])
res = cat.isna()
tm.assert_numpy_array_equal(res, exp)
Reported by Pylint.
Line: 20
Column: 5
class TestCategoricalMissing:
def test_isna(self):
exp = np.array([False, False, True])
cat = Categorical(["a", "b", np.nan])
res = cat.isna()
tm.assert_numpy_array_equal(res, exp)
Reported by Pylint.
Line: 27
Column: 5
tm.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = list(range(10))
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
Reported by Pylint.
pandas/tests/groupby/test_quantile.py
37 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
)
import pandas._testing as tm
Reported by Pylint.
Line: 80
Column: 9
def test_quantile_array2():
# https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959
df = DataFrame(
np.random.RandomState(0).randint(0, 5, size=(10, 3)), columns=list("ABC")
)
result = df.groupby("A").quantile([0.3, 0.7])
expected = DataFrame(
{
"B": [0.9, 2.1, 2.2, 3.4, 1.6, 2.4, 2.3, 2.7, 0.0, 0.0],
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
)
import pandas._testing as tm
Reported by Pylint.
Line: 34
Column: 1
),
# All NA
([np.nan] * 5, [np.nan] * 5),
],
)
@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
def test_quantile(interpolation, a_vals, b_vals, q):
if interpolation == "nearest" and q == 0.5 and b_vals == [4, 3, 2, 1]:
pytest.skip(
Reported by Pylint.
Line: 34
Column: 1
),
# All NA
([np.nan] * 5, [np.nan] * 5),
],
)
@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
def test_quantile(interpolation, a_vals, b_vals, q):
if interpolation == "nearest" and q == 0.5 and b_vals == [4, 3, 2, 1]:
pytest.skip(
Reported by Pylint.
Line: 46
Column: 5
a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)
b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation)
df = DataFrame(
{"key": ["a"] * len(a_vals) + ["b"] * len(b_vals), "val": a_vals + b_vals}
)
expected = DataFrame(
[a_expected, b_expected], columns=["val"], index=Index(["a", "b"], name="key")
Reported by Pylint.
Line: 58
Column: 1
tm.assert_frame_equal(result, expected)
def test_quantile_array():
# https://github.com/pandas-dev/pandas/issues/27526
df = DataFrame({"A": [0, 1, 2, 3, 4]})
result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25])
index = pd.MultiIndex.from_product([[0, 1], [0.25]])
Reported by Pylint.
Line: 60
Column: 5
def test_quantile_array():
# https://github.com/pandas-dev/pandas/issues/27526
df = DataFrame({"A": [0, 1, 2, 3, 4]})
result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25])
index = pd.MultiIndex.from_product([[0, 1], [0.25]])
expected = DataFrame({"A": [0.25, 2.50]}, index=index)
tm.assert_frame_equal(result, expected)
Reported by Pylint.
Line: 67
Column: 5
expected = DataFrame({"A": [0.25, 2.50]}, index=index)
tm.assert_frame_equal(result, expected)
df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]})
index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]])
result = df.groupby([0, 0, 1, 1]).quantile([0.25, 0.75])
expected = DataFrame(
{"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index
Reported by Pylint.
Line: 77
Column: 1
tm.assert_frame_equal(result, expected)
def test_quantile_array2():
# https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959
df = DataFrame(
np.random.RandomState(0).randint(0, 5, size=(10, 3)), columns=list("ABC")
)
result = df.groupby("A").quantile([0.3, 0.7])
Reported by Pylint.
pandas/tests/groupby/test_groupby_dropna.py
37 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"dropna, tuples, outputs",
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"dropna, tuples, outputs",
Reported by Pylint.
Line: 23
Column: 1
"c": [13.0, 12.3, 123.23],
"d": [13.0, 233.0, 123.0],
"e": [13.0, 12.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_one_group(
dropna, tuples, outputs, nulls_fixture
Reported by Pylint.
Line: 37
Column: 5
["B", "A", 123.23, 123, 1],
["A", "B", 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
Reported by Pylint.
Line: 40
Column: 5
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
Reported by Pylint.
Line: 45
Column: 9
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels(["A", "B", np.nan], level="b")
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
Reported by Pylint.
Line: 66
Column: 1
"c": [12.0, 13.3, 123.23, 1.0],
"d": [12.0, 234.0, 123.0, 1.0],
"e": [12.0, 13.0, 1.0, 1.0],
},
),
],
)
def test_groupby_dropna_multi_index_dataframe_nan_in_two_groups(
dropna, tuples, outputs, nulls_fixture, nulls_fixture2
Reported by Pylint.
Line: 81
Column: 5
[nulls_fixture2, "B", 1, 1, 1.0],
["A", nulls_fixture2, 1, 1, 1.0],
]
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
Reported by Pylint.
Line: 84
Column: 5
df = pd.DataFrame(df_list, columns=["a", "b", "c", "d", "e"])
grouped = df.groupby(["a", "b"], dropna=dropna).sum()
mi = pd.MultiIndex.from_tuples(tuples, names=list("ab"))
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])
Reported by Pylint.
Line: 89
Column: 9
# Since right now, by default MI will drop NA from levels when we create MI
# via `from_*`, so we need to add NA for level manually afterwards.
if not dropna:
mi = mi.set_levels([["A", "B", np.nan], ["A", "B", np.nan]])
expected = pd.DataFrame(outputs, index=mi)
tm.assert_frame_equal(grouped, expected)
Reported by Pylint.
pandas/tests/frame/methods/test_round.py
37 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
)
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
)
Reported by Pylint.
Line: 13
Column: 1
import pandas._testing as tm
class TestDataFrameRound:
def test_round(self):
# GH#2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
Reported by Pylint.
Line: 14
Column: 5
class TestDataFrameRound:
def test_round(self):
# GH#2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
Reported by Pylint.
Line: 14
Column: 5
class TestDataFrameRound:
def test_round(self):
# GH#2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
Reported by Pylint.
Line: 14
Column: 5
class TestDataFrameRound:
def test_round(self):
# GH#2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
Reported by Pylint.
Line: 14
Column: 5
class TestDataFrameRound:
def test_round(self):
# GH#2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
Reported by Pylint.
Line: 18
Column: 9
# GH#2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
Reported by Pylint.
Line: 22
Column: 9
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame({"col1": [1.0, 2.0, 3.0], "col2": [1.0, 2.0, 3.0]})
tm.assert_frame_equal(df.round(), expected_rounded)
Reported by Pylint.
Line: 74
Column: 9
with pytest.raises(TypeError, match=msg):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError, match=msg):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {"col1": 1, "col2": [1, 2]}
Reported by Pylint.
pandas/tests/io/parser/test_na_values.py
37 issues
Line: 8
Column: 1
from io import StringIO
import numpy as np
import pytest
from pandas._libs.parsers import STR_NA_VALUES
from pandas import (
DataFrame,
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import pytest
from pandas._libs.parsers import STR_NA_VALUES
from pandas import (
DataFrame,
Index,
MultiIndex,
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import pytest
from pandas._libs.parsers import STR_NA_VALUES
from pandas import (
DataFrame,
Index,
MultiIndex,
Reported by Pylint.
Line: 20
Column: 1
import pandas._testing as tm
def test_string_nas(all_parsers):
parser = all_parsers
data = """A,B,C
a,b,c
d,,f
,g,h
Reported by Pylint.
Line: 35
Column: 1
tm.assert_frame_equal(result, expected)
def test_detect_string_na(all_parsers):
parser = all_parsers
data = """A,B
foo,bar
NA,baz
NaN,nan
Reported by Pylint.
Line: 68
Column: 1
-999,1.2
2,-999
3,4.5
""",
"""A,B
-999,1.200
2,-999.000
3,4.500
""",
Reported by Pylint.
Line: 86
Column: 1
tm.assert_frame_equal(result, expected)
def test_default_na_values(all_parsers):
_NA_VALUES = {
"-1.#IND",
"1.#QNAN",
"1.#IND",
"-1.#QNAN",
Reported by Pylint.
Line: 87
Column: 5
def test_default_na_values(all_parsers):
_NA_VALUES = {
"-1.#IND",
"1.#QNAN",
"1.#IND",
"-1.#QNAN",
"#N/A",
Reported by Pylint.
Line: 107
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"#N/A N/A",
"",
}
assert _NA_VALUES == STR_NA_VALUES
parser = all_parsers
nv = len(_NA_VALUES)
def f(i, v):
Reported by Bandit.
Line: 110
Column: 5
assert _NA_VALUES == STR_NA_VALUES
parser = all_parsers
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ""
elif i > 0:
Reported by Pylint.