The following issues were found
pandas/compat/numpy/function.py
13 issues
Line: 24
Column: 1
from numpy import ndarray
from pandas._libs.lib import (
is_bool,
is_integer,
)
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
Reported by Pylint.
Line: 24
Column: 1
from numpy import ndarray
from pandas._libs.lib import (
is_bool,
is_integer,
)
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
Reported by Pylint.
Line: 24
Column: 1
from numpy import ndarray
from pandas._libs.lib import (
is_bool,
is_integer,
)
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
Reported by Pylint.
Line: 28
Column: 1
is_bool,
is_integer,
)
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
validate_args,
validate_args_and_kwargs,
validate_kwargs,
)
Reported by Pylint.
Line: 29
Column: 1
is_integer,
)
from pandas.errors import UnsupportedFunctionCall
from pandas.util._validators import (
validate_args,
validate_args_and_kwargs,
validate_kwargs,
)
Reported by Pylint.
Line: 36
Column: 1
)
class CompatValidator:
def __init__(
self,
defaults,
fname=None,
method: str | None = None,
Reported by Pylint.
Line: 36
Column: 1
)
class CompatValidator:
def __init__(
self,
defaults,
fname=None,
method: str | None = None,
Reported by Pylint.
Line: 49
Column: 5
self.defaults = defaults
self.max_fname_arg_count = max_fname_arg_count
def __call__(
self,
args,
kwargs,
fname=None,
max_fname_arg_count=None,
Reported by Pylint.
Line: 87
Column: 1
)
def process_skipna(skipna, args):
if isinstance(skipna, ndarray) or skipna is None:
args = (skipna,) + args
skipna = True
return skipna, args
Reported by Pylint.
Line: 311
Column: 1
)
def validate_window_func(name, args, kwargs) -> None:
numpy_args = ("axis", "dtype", "out")
msg = (
f"numpy operations are not valid with window objects. "
f"Use .{name}() directly instead "
)
Reported by Pylint.
asv_bench/benchmarks/io/parsers.py
13 issues
Line: 19
Column: 9
param_names = ["value"]
def setup(self, value):
self.objects = [value] * 1000000
def time_check_datetimes(self, value):
for obj in self.objects:
_does_string_look_like_datetime(obj)
Reported by Pylint.
Line: 21
Column: 36
def setup(self, value):
self.objects = [value] * 1000000
def time_check_datetimes(self, value):
for obj in self.objects:
_does_string_look_like_datetime(obj)
class ConcatDateCols:
Reported by Pylint.
Line: 34
Column: 13
def setup(self, value, dim):
count_elem = 10000
if dim == 1:
self.object = (np.array([value] * count_elem),)
if dim == 2:
self.object = (
np.array([value] * count_elem),
np.array([value] * count_elem),
)
Reported by Pylint.
Line: 36
Column: 13
if dim == 1:
self.object = (np.array([value] * count_elem),)
if dim == 2:
self.object = (
np.array([value] * count_elem),
np.array([value] * count_elem),
)
def time_check_concat(self, value, dim):
Reported by Pylint.
Line: 41
Column: 40
np.array([value] * count_elem),
)
def time_check_concat(self, value, dim):
concat_date_cols(self.object)
Reported by Pylint.
Line: 41
Column: 33
np.array([value] * count_elem),
)
def time_check_concat(self, value, dim):
concat_date_cols(self.object)
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
try:
from pandas._libs.tslibs.parsing import (
_does_string_look_like_datetime,
concat_date_cols,
)
except ImportError:
# Avoid whole benchmark suite import failure on asv (currently 0.4)
Reported by Pylint.
Line: 13
Column: 1
pass
class DoesStringLookLikeDatetime:
params = (["2Q2005", "0.0", "10000"],)
param_names = ["value"]
def setup(self, value):
Reported by Pylint.
Line: 18
Column: 5
params = (["2Q2005", "0.0", "10000"],)
param_names = ["value"]
def setup(self, value):
self.objects = [value] * 1000000
def time_check_datetimes(self, value):
for obj in self.objects:
_does_string_look_like_datetime(obj)
Reported by Pylint.
Line: 21
Column: 5
def setup(self, value):
self.objects = [value] * 1000000
def time_check_datetimes(self, value):
for obj in self.objects:
_does_string_look_like_datetime(obj)
class ConcatDateCols:
Reported by Pylint.
pandas/tests/groupby/test_apply_mutate.py
13 issues
Line: 1
Column: 1
import numpy as np
import pandas as pd
import pandas._testing as tm
def test_mutate_groups():
# GH3380
Reported by Pylint.
Line: 7
Column: 1
import pandas._testing as tm
def test_mutate_groups():
# GH3380
df = pd.DataFrame(
{
Reported by Pylint.
Line: 11
Column: 5
# GH3380
df = pd.DataFrame(
{
"cat1": ["a"] * 8 + ["b"] * 6,
"cat2": ["c"] * 2
+ ["d"] * 2
+ ["e"] * 2
Reported by Pylint.
Line: 26
Column: 5
}
)
def f_copy(x):
x = x.copy()
x["rank"] = x.val.rank(method="min")
return x.groupby("cat2")["rank"].min()
def f_no_copy(x):
Reported by Pylint.
Line: 31
Column: 5
x["rank"] = x.val.rank(method="min")
return x.groupby("cat2")["rank"].min()
def f_no_copy(x):
x["rank"] = x.val.rank(method="min")
return x.groupby("cat2")["rank"].min()
grpby_copy = df.groupby("cat1").apply(f_copy)
grpby_no_copy = df.groupby("cat1").apply(f_no_copy)
Reported by Pylint.
Line: 40
Column: 1
tm.assert_series_equal(grpby_copy, grpby_no_copy)
def test_no_mutate_but_looks_like():
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
Reported by Pylint.
Line: 45
Column: 5
# GH 8467
# first show's mutation indicator
# second does not, but should yield the same results
df = pd.DataFrame({"key": [1, 1, 1, 2, 2, 2, 3, 3, 3], "value": range(9)})
result1 = df.groupby("key", group_keys=True).apply(lambda x: x[:].key)
result2 = df.groupby("key", group_keys=True).apply(lambda x: x.key)
tm.assert_series_equal(result1, result2)
Reported by Pylint.
Line: 52
Column: 1
tm.assert_series_equal(result1, result2)
def test_apply_function_with_indexing():
# GH: 33058
df = pd.DataFrame(
{"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}
)
Reported by Pylint.
Line: 54
Column: 5
def test_apply_function_with_indexing():
# GH: 33058
df = pd.DataFrame(
{"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}
)
def fn(x):
x.col2[x.index[-1]] = 0
Reported by Pylint.
Line: 58
Column: 5
{"col1": ["A", "A", "A", "B", "B", "B"], "col2": [1, 2, 3, 4, 5, 6]}
)
def fn(x):
x.col2[x.index[-1]] = 0
return x.col2
result = df.groupby(["col1"], as_index=False).apply(fn)
expected = pd.Series(
Reported by Pylint.
pandas/tests/frame/methods/test_combine.py
13 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestCombine:
@pytest.mark.parametrize(
Reported by Pylint.
Line: 24
Column: 22
other = df.copy()
df.iloc[1, 0] = None
def combiner(a, b):
return b
result = df.combine(other, combiner)
tm.assert_frame_equal(result, other)
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestCombine:
@pytest.mark.parametrize(
Reported by Pylint.
Line: 8
Column: 1
import pandas._testing as tm
class TestCombine:
@pytest.mark.parametrize(
"data",
[
pd.date_range("2000", periods=4),
pd.date_range("2000", periods=4, tz="US/Central"),
Reported by Pylint.
Line: 16
Column: 5
pd.date_range("2000", periods=4, tz="US/Central"),
pd.period_range("2000", periods=4),
pd.timedelta_range(0, periods=4),
],
)
def test_combine_datetlike_udf(self, data):
# GH#23079
df = pd.DataFrame({"A": data})
other = df.copy()
Reported by Pylint.
Line: 16
Column: 5
pd.date_range("2000", periods=4, tz="US/Central"),
pd.period_range("2000", periods=4),
pd.timedelta_range(0, periods=4),
],
)
def test_combine_datetlike_udf(self, data):
# GH#23079
df = pd.DataFrame({"A": data})
other = df.copy()
Reported by Pylint.
Line: 20
Column: 9
)
def test_combine_datetlike_udf(self, data):
# GH#23079
df = pd.DataFrame({"A": data})
other = df.copy()
df.iloc[1, 0] = None
def combiner(a, b):
return b
Reported by Pylint.
Line: 24
Column: 9
other = df.copy()
df.iloc[1, 0] = None
def combiner(a, b):
return b
result = df.combine(other, combiner)
tm.assert_frame_equal(result, other)
Reported by Pylint.
Line: 24
Column: 9
other = df.copy()
df.iloc[1, 0] = None
def combiner(a, b):
return b
result = df.combine(other, combiner)
tm.assert_frame_equal(result, other)
Reported by Pylint.
Line: 30
Column: 5
result = df.combine(other, combiner)
tm.assert_frame_equal(result, other)
def test_combine_generic(self, float_frame):
df1 = float_frame
df2 = float_frame.loc[float_frame.index[:-5], ["A", "B", "C"]]
combined = df1.combine(df2, np.add)
combined2 = df2.combine(df1, np.add)
Reported by Pylint.
pandas/tests/groupby/conftest.py
13 issues
Line: 2
Column: 1
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
from pandas.core.groupby.base import (
Reported by Pylint.
Line: 53
Column: 13
@pytest.fixture
def tsframe(tsd):
return DataFrame(tsd)
@pytest.fixture
def df_mixed_floats():
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
from pandas import (
DataFrame,
MultiIndex,
)
import pandas._testing as tm
from pandas.core.groupby.base import (
Reported by Pylint.
Line: 16
Column: 1
@pytest.fixture(params=[True, False])
def as_index(request):
return request.param
@pytest.fixture
def mframe():
Reported by Pylint.
Line: 21
Column: 1
@pytest.fixture
def mframe():
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
Reported by Pylint.
Line: 31
Column: 1
@pytest.fixture
def df():
return DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
Reported by Pylint.
Line: 31
Column: 1
@pytest.fixture
def df():
return DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
Reported by Pylint.
Line: 43
Column: 1
@pytest.fixture
def ts():
return tm.makeTimeSeries()
@pytest.fixture
def tsd():
Reported by Pylint.
Line: 43
Column: 1
@pytest.fixture
def ts():
return tm.makeTimeSeries()
@pytest.fixture
def tsd():
Reported by Pylint.
Line: 48
Column: 1
@pytest.fixture
def tsd():
return tm.getTimeSeriesData()
@pytest.fixture
def tsframe(tsd):
Reported by Pylint.
pandas/io/excel/_xlsxwriter.py
13 issues
Line: 5
Column: 1
from typing import Any
import pandas._libs.json as json
from pandas._typing import StorageOptions
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import (
combine_kwargs,
Reported by Pylint.
Line: 5
Column: 1
from typing import Any
import pandas._libs.json as json
from pandas._typing import StorageOptions
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import (
combine_kwargs,
Reported by Pylint.
Line: 184
Column: 9
**kwargs,
):
# Use the xlsxwriter module as the Excel writer.
from xlsxwriter import Workbook
engine_kwargs = combine_kwargs(engine_kwargs, kwargs)
if mode == "a":
raise ValueError("Append mode is not supported with xlsxwriter!")
Reported by Pylint.
Line: 125
Column: 3
props[dst] = v
if isinstance(props.get("pattern"), str):
# TODO: support other fill patterns
props["pattern"] = 0 if props["pattern"] == "none" else 1
for k in ["border", "top", "right", "bottom", "left"]:
if isinstance(props.get(k), str):
try:
Reported by Pylint.
Line: 1
Column: 1
from __future__ import annotations
from typing import Any
import pandas._libs.json as json
from pandas._typing import StorageOptions
from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import (
Reported by Pylint.
Line: 15
Column: 1
)
class _XlsxStyler:
# Map from openpyxl-oriented styles to flatter xlsxwriter representation
# Ordering necessary for both determinism and because some are keyed by
# prefixes of others.
STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {
"font": [
Reported by Pylint.
Line: 87
Column: 5
}
@classmethod
def convert(cls, style_dict, num_format_str=None):
"""
converts a style_dict to an xlsxwriter format dict
Parameters
----------
Reported by Pylint.
Line: 115
Column: 17
# dst is a flat key
if dst in props:
continue
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
Reported by Pylint.
Line: 118
Column: 25
v = style_group
for k in src:
try:
v = v[k]
except (KeyError, TypeError):
break
else:
props[dst] = v
Reported by Pylint.
Line: 167
Column: 1
return props
class XlsxWriter(ExcelWriter):
engine = "xlsxwriter"
supported_extensions = (".xlsx",)
def __init__(
self,
Reported by Pylint.
pandas/tests/arrays/period/test_astype.py
13 issues
Line: 2
Column: 1
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import period_array
Reported by Pylint.
Line: 41
Column: 27
# Add the `.base`, since we now use `.asi8` which returns a view.
# We could maybe override it in PeriodArray to return ._data directly.
assert result.base is arr._data
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(np.int64, copy=True)
assert result is not arr._data
Reported by Pylint.
Line: 46
Column: 26
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(np.int64, copy=True)
assert result is not arr._data
tm.assert_numpy_array_equal(result, arr._data.view("i8"))
def test_astype_categorical():
arr = period_array(["2000", "2001", "2001", None], freq="D")
Reported by Pylint.
Line: 47
Column: 41
# astype(int..) deprecated
result = arr.astype(np.int64, copy=True)
assert result is not arr._data
tm.assert_numpy_array_equal(result, arr._data.view("i8"))
def test_astype_categorical():
arr = period_array(["2000", "2001", "2001", None], freq="D")
result = arr.astype("category")
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import PeriodDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import period_array
Reported by Pylint.
Line: 12
Column: 1
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype(dtype):
# We choose to ignore the sign and size of integers for
# Period/Datetime/Timedelta astype
arr = period_array(["2000", "2001", None], freq="D")
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# astype(int..) deprecated
expected = arr.astype(expected_dtype)
assert result.dtype == expected_dtype
tm.assert_numpy_array_equal(result, expected)
def test_astype_copies():
arr = period_array(["2000", "2001", None], freq="D")
Reported by Bandit.
Line: 33
Column: 1
tm.assert_numpy_array_equal(result, expected)
def test_astype_copies():
arr = period_array(["2000", "2001", None], freq="D")
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(np.int64, copy=False)
Reported by Pylint.
Line: 41
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
# Add the `.base`, since we now use `.asi8` which returns a view.
# We could maybe override it in PeriodArray to return ._data directly.
assert result.base is arr._data
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(np.int64, copy=True)
assert result is not arr._data
Reported by Bandit.
Line: 46
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(np.int64, copy=True)
assert result is not arr._data
tm.assert_numpy_array_equal(result, arr._data.view("i8"))
def test_astype_categorical():
arr = period_array(["2000", "2001", "2001", None], freq="D")
Reported by Bandit.
pandas/tests/arrays/period/test_reductions.py
13 issues
Line: 1
Column: 1
import pytest
import pandas as pd
from pandas.core.arrays import period_array
class TestReductions:
def test_min_max(self):
arr = period_array(
Reported by Pylint.
Line: 1
Column: 1
import pytest
import pandas as pd
from pandas.core.arrays import period_array
class TestReductions:
def test_min_max(self):
arr = period_array(
Reported by Pylint.
Line: 7
Column: 1
from pandas.core.arrays import period_array
class TestReductions:
def test_min_max(self):
arr = period_array(
[
"2000-01-03",
"2000-01-03",
Reported by Pylint.
Line: 8
Column: 5
class TestReductions:
def test_min_max(self):
arr = period_array(
[
"2000-01-03",
"2000-01-03",
"NaT",
Reported by Pylint.
Line: 8
Column: 5
class TestReductions:
def test_min_max(self):
arr = period_array(
[
"2000-01-03",
"2000-01-03",
"NaT",
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
result = arr.min()
expected = pd.Period("2000-01-02", freq="D")
assert result == expected
result = arr.max()
expected = pd.Period("2000-01-05", freq="D")
assert result == expected
Reported by Bandit.
Line: 27
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
result = arr.max()
expected = pd.Period("2000-01-05", freq="D")
assert result == expected
result = arr.min(skipna=False)
assert result is pd.NaT
result = arr.max(skipna=False)
Reported by Bandit.
Line: 30
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert result == expected
result = arr.min(skipna=False)
assert result is pd.NaT
result = arr.max(skipna=False)
assert result is pd.NaT
@pytest.mark.parametrize("skipna", [True, False])
Reported by Bandit.
Line: 33
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert result is pd.NaT
result = arr.max(skipna=False)
assert result is pd.NaT
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_empty(self, skipna):
arr = period_array([], freq="D")
result = arr.min(skipna=skipna)
Reported by Bandit.
Line: 36
Column: 5
assert result is pd.NaT
@pytest.mark.parametrize("skipna", [True, False])
def test_min_max_empty(self, skipna):
arr = period_array([], freq="D")
result = arr.min(skipna=skipna)
assert result is pd.NaT
result = arr.max(skipna=skipna)
Reported by Pylint.
pandas/core/strings/base.py
13 issues
Line: 1
Column: 1
from __future__ import annotations
import abc
from collections.abc import Callable # noqa: PDF001
import re
import numpy as np
from pandas._typing import Scalar
Reported by Pylint.
Line: 12
Column: 1
from pandas._typing import Scalar
class BaseStringArrayMethods(abc.ABC):
"""
Base class for extension arrays implementing string methods.
This is where our ExtensionArrays can override the implementation of
Series.str.<method>. We don't expect this to work with
Reported by Pylint.
Line: 29
Column: 9
"""
def _str_getitem(self, key):
if isinstance(key, slice):
return self._str_slice(start=key.start, stop=key.stop, step=key.step)
else:
return self._str_get(key)
@abc.abstractmethod
Reported by Pylint.
Line: 43
Column: 5
pass
@abc.abstractmethod
def _str_contains(self, pat, case=True, flags=0, na=None, regex=True):
pass
@abc.abstractmethod
def _str_startswith(self, pat, na=None):
pass
Reported by Pylint.
Line: 43
Column: 5
pass
@abc.abstractmethod
def _str_contains(self, pat, case=True, flags=0, na=None, regex=True):
pass
@abc.abstractmethod
def _str_startswith(self, pat, na=None):
pass
Reported by Pylint.
Line: 47
Column: 5
pass
@abc.abstractmethod
def _str_startswith(self, pat, na=None):
pass
@abc.abstractmethod
def _str_endswith(self, pat, na=None):
pass
Reported by Pylint.
Line: 51
Column: 5
pass
@abc.abstractmethod
def _str_endswith(self, pat, na=None):
pass
@abc.abstractmethod
def _str_replace(
self,
Reported by Pylint.
Line: 55
Column: 5
pass
@abc.abstractmethod
def _str_replace(
self,
pat: str | re.Pattern,
repl: str | Callable,
n: int = -1,
case: bool = True,
Reported by Pylint.
Line: 55
Column: 5
pass
@abc.abstractmethod
def _str_replace(
self,
pat: str | re.Pattern,
repl: str | Callable,
n: int = -1,
case: bool = True,
Reported by Pylint.
Line: 71
Column: 5
pass
@abc.abstractmethod
def _str_match(
self, pat: str, case: bool = True, flags: int = 0, na: Scalar = np.nan
):
pass
@abc.abstractmethod
Reported by Pylint.
pandas/tests/frame/methods/test_compare.py
13 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
Reported by Pylint.
Line: 9
Column: 1
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
Reported by Pylint.
Line: 11
Column: 5
@pytest.mark.parametrize("align_axis", [0, 1, "index", "columns"])
def test_compare_axis(align_axis):
# GH#30429
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
Reported by Pylint.
Line: 46
Column: 1
(True, False),
(False, True),
(True, True),
# False, False case is already covered in test_compare_axis
],
)
def test_compare_various_formats(keep_shape, keep_equal):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
Reported by Pylint.
Line: 50
Column: 5
],
)
def test_compare_various_formats(keep_shape, keep_equal):
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
Reported by Pylint.
Line: 94
Column: 1
tm.assert_frame_equal(result, expected)
def test_compare_with_equal_nulls():
# We want to make sure two NaNs are considered the same
# and dropped where applicable
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
Reported by Pylint.
Line: 97
Column: 5
def test_compare_with_equal_nulls():
# We want to make sure two NaNs are considered the same
# and dropped where applicable
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
Reported by Pylint.
Line: 111
Column: 1
tm.assert_frame_equal(result, expected)
def test_compare_with_non_equal_nulls():
# We want to make sure the relevant NaNs do not get dropped
# even if the entire row or column are NaNs
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
Reported by Pylint.
Line: 114
Column: 5
def test_compare_with_non_equal_nulls():
# We want to make sure the relevant NaNs do not get dropped
# even if the entire row or column are NaNs
df = pd.DataFrame(
{"col1": ["a", "b", "c"], "col2": [1.0, 2.0, np.nan], "col3": [1.0, 2.0, 3.0]},
columns=["col1", "col2", "col3"],
)
df2 = df.copy()
df2.loc[0, "col1"] = "c"
Reported by Pylint.