The following issues were found
pandas/tests/indexes/interval/test_interval_tree.py
47 issues
Line: 4
Column: 1
from itertools import permutations
import numpy as np
import pytest
from pandas._libs.interval import IntervalTree
from pandas.compat import IS64
import pandas._testing as tm
Reported by Pylint.
Line: 6
Column: 1
import numpy as np
import pytest
from pandas._libs.interval import IntervalTree
from pandas.compat import IS64
import pandas._testing as tm
Reported by Pylint.
Line: 6
Column: 1
import numpy as np
import pytest
from pandas._libs.interval import IntervalTree
from pandas.compat import IS64
import pandas._testing as tm
Reported by Pylint.
Line: 43
Column: 19
np.array([0, 1, 2, 3, 4, np.nan], dtype="float64"),
]
)
def tree(request, leaf_size):
left = request.param
return IntervalTree(left, left + 2, leaf_size=leaf_size)
class TestIntervalTree:
Reported by Pylint.
Line: 49
Column: 32
class TestIntervalTree:
def test_get_indexer(self, tree):
result = tree.get_indexer(np.array([1.0, 5.5, 6.5]))
expected = np.array([0, 4, -1], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(
Reported by Pylint.
Line: 63
Column: 41
"dtype, target_value, target_dtype",
[("int64", 2 ** 63 + 1, "uint64"), ("uint64", -1, "int64")],
)
def test_get_indexer_overflow(self, dtype, target_value, target_dtype):
left, right = np.array([0, 1], dtype=dtype), np.array([1, 2], dtype=dtype)
tree = IntervalTree(left, right)
result = tree.get_indexer(np.array([target_value], dtype=target_dtype))
expected = np.array([-1], dtype="intp")
Reported by Pylint.
Line: 65
Column: 9
)
def test_get_indexer_overflow(self, dtype, target_value, target_dtype):
left, right = np.array([0, 1], dtype=dtype), np.array([1, 2], dtype=dtype)
tree = IntervalTree(left, right)
result = tree.get_indexer(np.array([target_value], dtype=target_dtype))
expected = np.array([-1], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
Reported by Pylint.
Line: 71
Column: 43
expected = np.array([-1], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_non_unique(self, tree):
indexer, missing = tree.get_indexer_non_unique(np.array([1.0, 2.0, 6.5]))
result = indexer[:1]
expected = np.array([0], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
Reported by Pylint.
Line: 94
Column: 52
"dtype, target_value, target_dtype",
[("int64", 2 ** 63 + 1, "uint64"), ("uint64", -1, "int64")],
)
def test_get_indexer_non_unique_overflow(self, dtype, target_value, target_dtype):
left, right = np.array([0, 2], dtype=dtype), np.array([1, 3], dtype=dtype)
tree = IntervalTree(left, right)
target = np.array([target_value], dtype=target_dtype)
result_indexer, result_missing = tree.get_indexer_non_unique(target)
Reported by Pylint.
Line: 96
Column: 9
)
def test_get_indexer_non_unique_overflow(self, dtype, target_value, target_dtype):
left, right = np.array([0, 2], dtype=dtype), np.array([1, 3], dtype=dtype)
tree = IntervalTree(left, right)
target = np.array([target_value], dtype=target_dtype)
result_indexer, result_missing = tree.get_indexer_non_unique(target)
expected_indexer = np.array([-1], dtype="intp")
tm.assert_numpy_array_equal(result_indexer, expected_indexer)
Reported by Pylint.
pandas/tests/reshape/concat/test_index.py
47 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Reported by Pylint.
Line: 15
Column: 1
import pandas._testing as tm
class TestIndexConcat:
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
Reported by Pylint.
Line: 16
Column: 5
class TestIndexConcat:
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
Reported by Pylint.
Line: 16
Column: 5
class TestIndexConcat:
def test_concat_ignore_index(self, sort):
frame1 = DataFrame(
{"test1": ["a", "b", "c"], "test2": [1, 2, 3], "test3": [4.5, 3.2, 1.2]}
)
frame2 = DataFrame({"test3": [5.2, 2.2, 4.3]})
frame1.index = Index(["x", "y", "z"])
Reported by Pylint.
Line: 24
Column: 9
frame1.index = Index(["x", "y", "z"])
frame2.index = Index(["x", "y", "q"])
v1 = concat([frame1, frame2], axis=1, ignore_index=True, sort=sort)
nan = np.nan
expected = DataFrame(
[
[nan, nan, nan, 4.3],
Reported by Pylint.
Line: 51
Column: 5
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Reported by Pylint.
Line: 51
Column: 5
("idx1", "idx1", "idx2", None),
("idx1", "idx2", "idx3", None),
(None, None, None, None),
],
)
def test_concat_same_index_names(self, name_in1, name_in2, name_in3, name_out):
# GH13475
indices = [
Index(["a", "b", "c"], name=name_in1),
Reported by Pylint.
Line: 77
Column: 5
tm.assert_frame_equal(result, expected)
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
Reported by Pylint.
Line: 77
Column: 5
tm.assert_frame_equal(result, expected)
def test_concat_rename_index(self):
a = DataFrame(
np.random.rand(3, 3),
columns=list("ABC"),
index=Index(list("abc"), name="index_a"),
)
Reported by Pylint.
pandas/tests/frame/methods/test_join.py
47 issues
Line: 4
Column: 1
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Reported by Pylint.
Line: 79
Column: 21
),
],
)
def test_join(left, right, how, sort, expected):
result = left.join(right, how=how, sort=sort)
tm.assert_frame_equal(result, expected)
Reported by Pylint.
Line: 79
Column: 15
),
],
)
def test_join(left, right, how, sort, expected):
result = left.join(right, how=how, sort=sort)
tm.assert_frame_equal(result, expected)
Reported by Pylint.
Line: 149
Column: 3
s = df.pop(float_frame.columns[-1])
joined = df.join(s)
# TODO should this check_names ?
tm.assert_frame_equal(joined, float_frame, check_names=False)
s.name = None
with pytest.raises(ValueError, match="must have a name"):
df.join(s)
Reported by Pylint.
Line: 172
Column: 28
tm.assert_frame_equal(joined, expected.loc[:, joined.columns])
def test_join_period_index(frame_with_period_index):
other = frame_with_period_index.rename(columns=lambda key: f"{key}{key}")
joined_values = np.concatenate([frame_with_period_index.values] * 2, axis=1)
joined_cols = frame_with_period_index.columns.append(other.columns)
Reported by Pylint.
Line: 244
Column: 3
assert not np.isnan(joined.values).all()
# TODO what should join do with names ?
tm.assert_frame_equal(joined, expected, check_names=False)
def test_join_segfault(self):
# GH#1532
df1 = DataFrame({"a": [1, 1], "b": [1, 2], "x": [1, 2]})
Reported by Pylint.
Line: 1
Column: 1
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Reported by Pylint.
Line: 18
Column: 1
@pytest.fixture
def frame_with_period_index():
return DataFrame(
data=np.arange(20).reshape(4, 5),
columns=list("abcde"),
index=period_range(start="2000", freq="A", periods=4),
)
Reported by Pylint.
Line: 27
Column: 1
@pytest.fixture
def left():
return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right():
Reported by Pylint.
Line: 32
Column: 1
@pytest.fixture
def right():
return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2])
@pytest.mark.parametrize(
"how, sort, expected",
Reported by Pylint.
pandas/io/html.py
47 issues
Line: 543
Column: 9
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer("table")
def _parse_tables(self, doc, match, attrs):
element_name = self._strainer.name
Reported by Pylint.
Line: 599
Column: 9
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
bdoc = self._setup_build_doc()
if isinstance(bdoc, bytes) and self.encoding is not None:
udoc = bdoc.decode(self.encoding)
from_encoding = None
Reported by Pylint.
Line: 715
Column: 9
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.etree import XMLSyntaxError
from lxml.html import (
HTMLParser,
fromstring,
parse,
)
Reported by Pylint.
Line: 716
Column: 9
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.etree import XMLSyntaxError
from lxml.html import (
HTMLParser,
fromstring,
parse,
)
Reported by Pylint.
Line: 50
Column: 5
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
bs4 = import_optional_dependency("bs4", errors="ignore")
Reported by Pylint.
Line: 54
Column: 5
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
bs4 = import_optional_dependency("bs4", errors="ignore")
_HAS_BS4 = bs4 is not None
lxml = import_optional_dependency("lxml.etree", errors="ignore")
_HAS_LXML = lxml is not None
Reported by Pylint.
Line: 577
Column: 5
def _equals_tag(self, obj, tag):
return obj.name == tag
def _parse_td(self, row):
return row.find_all(("td", "th"), recursive=False)
def _parse_thead_tr(self, table):
return table.select("thead tr")
Reported by Pylint.
Line: 657
Column: 5
:class:`_HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _text_getter(self, obj):
return obj.text_content()
Reported by Pylint.
Line: 663
Column: 5
def _text_getter(self, obj):
return obj.text_content()
def _parse_td(self, row):
# Look for direct children only: the "row" element here may be a
# <thead> or <tfoot> (see _parse_thead_tr).
return row.xpath("./td|./th")
def _parse_tables(self, doc, match, kwargs):
Reported by Pylint.
Line: 668
Column: 5
# <thead> or <tfoot> (see _parse_thead_tr).
return row.xpath("./td|./th")
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
xpath_expr = f"//table//*[re:test(text(), {repr(pattern)})]/ancestor::table"
Reported by Pylint.
pandas/tests/series/test_missing.py
46 issues
Line: 76
Column: 3
td1[2] = td[2]
assert not isna(td1[2])
# FIXME: don't leave commented-out
# boolean setting
# this doesn't work, not sure numpy even supports it
# result = td[(td>np.timedelta64(timedelta(days=3))) &
# td<np.timedelta64(timedelta(days=7)))] = np.nan
# assert isna(result).sum() == 7
Reported by Pylint.
Line: 93
Column: 20
def test_valid(self, datetime_series):
ts = datetime_series.copy()
ts.index = ts.index._with_freq(None)
ts[::2] = np.NaN
result = ts.dropna()
assert len(result) == ts.count()
tm.assert_series_equal(result, ts[1::2])
Reported by Pylint.
Line: 106
Column: 25
# GH#19700
idx = Index([0, 1])
assert idx.hasnans is False
assert "hasnans" in idx._cache
ser = idx.to_series()
assert ser.hasnans is False
assert not hasattr(ser, "_cache")
ser.iloc[-1] = np.nan
assert ser.hasnans is True
Reported by Pylint.
Line: 1
Column: 1
from datetime import timedelta
import numpy as np
from pandas._libs import iNaT
import pandas as pd
from pandas import (
Categorical,
Reported by Pylint.
Line: 18
Column: 1
import pandas._testing as tm
class TestSeriesMissingData:
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
Reported by Pylint.
Line: 19
Column: 5
class TestSeriesMissingData:
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
Reported by Pylint.
Line: 19
Column: 5
class TestSeriesMissingData:
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
Reported by Pylint.
Line: 22
Column: 9
def test_categorical_nan_handling(self):
# NaNs are represented as -1 in labels
s = Series(Categorical(["a", "b", np.nan, "a"]))
tm.assert_index_equal(s.cat.categories, Index(["a", "b"]))
tm.assert_numpy_array_equal(
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
Reported by Pylint.
Line: 28
Column: 5
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
def test_isna_for_inf(self):
s = Series(["a", np.inf, np.nan, pd.NA, 1.0])
with pd.option_context("mode.use_inf_as_na", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, True, False])
Reported by Pylint.
Line: 28
Column: 5
s.values.codes, np.array([0, 1, -1, 0], dtype=np.int8)
)
def test_isna_for_inf(self):
s = Series(["a", np.inf, np.nan, pd.NA, 1.0])
with pd.option_context("mode.use_inf_as_na", True):
r = s.isna()
dr = s.dropna()
e = Series([False, True, True, True, False])
Reported by Pylint.
pandas/tests/dtypes/cast/test_promote.py
46 issues
Line: 9
Column: 1
from decimal import Decimal
import numpy as np
import pytest
from pandas._libs.tslibs import NaT
from pandas.core.dtypes.cast import maybe_promote
from pandas.core.dtypes.common import (
Reported by Pylint.
Line: 311
Column: 38
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_bool_with_any(any_numpy_dtype_reduced):
dtype = np.dtype(bool)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
Reported by Pylint.
Line: 325
Column: 38
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_bool(any_numpy_dtype_reduced):
dtype = np.dtype(any_numpy_dtype_reduced)
fill_value = True
# filling anything but bool with bool casts to object
expected_dtype = np.dtype(object) if dtype != bool else dtype
Reported by Pylint.
Line: 337
Column: 52
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(bytes_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
Reported by Pylint.
Line: 351
Column: 39
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype
fill_value = b"abc"
Reported by Pylint.
Line: 351
Column: 64
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_any_with_bytes(any_numpy_dtype_reduced, bytes_dtype):
dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype
fill_value = b"abc"
Reported by Pylint.
Line: 365
Column: 62
_check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
def test_maybe_promote_datetime64_with_any(datetime64_dtype, any_numpy_dtype_reduced):
dtype = np.dtype(datetime64_dtype)
fill_dtype = np.dtype(any_numpy_dtype_reduced)
# create array of given dtype; casts "1" to correct dtype
fill_value = np.array([1], dtype=fill_dtype)[0]
Reported by Pylint.
Line: 395
Column: 5
ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
)
def test_maybe_promote_any_with_datetime64(
any_numpy_dtype_reduced, datetime64_dtype, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
# filling datetime with anything but datetime casts to object
if is_datetime64_dtype(dtype):
Reported by Pylint.
Line: 395
Column: 30
ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
)
def test_maybe_promote_any_with_datetime64(
any_numpy_dtype_reduced, datetime64_dtype, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
# filling datetime with anything but datetime casts to object
if is_datetime64_dtype(dtype):
Reported by Pylint.
Line: 430
Column: 5
ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
)
def test_maybe_promote_any_numpy_dtype_with_datetimetz(
any_numpy_dtype_reduced, tz_aware_fixture, fill_value
):
dtype = np.dtype(any_numpy_dtype_reduced)
fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
Reported by Pylint.
pandas/tests/indexing/interval/test_interval_new.py
46 issues
Line: 4
Column: 1
import re
import numpy as np
import pytest
from pandas import (
Interval,
IntervalIndex,
Series,
Reported by Pylint.
Line: 37
Column: 13
# missing or not exact
with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")):
indexer_sl(ser)[Interval(3, 5, closed="left")]
with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")):
indexer_sl(ser)[Interval(3, 5)]
with pytest.raises(
Reported by Pylint.
Line: 40
Column: 13
indexer_sl(ser)[Interval(3, 5, closed="left")]
with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")):
indexer_sl(ser)[Interval(3, 5)]
with pytest.raises(
KeyError, match=re.escape("Interval(-2, 0, closed='right')")
):
indexer_sl(ser)[Interval(-2, 0)]
Reported by Pylint.
Line: 45
Column: 13
with pytest.raises(
KeyError, match=re.escape("Interval(-2, 0, closed='right')")
):
indexer_sl(ser)[Interval(-2, 0)]
with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")):
indexer_sl(ser)[Interval(5, 6)]
def test_loc_with_scalar(self, series_with_interval_index, indexer_sl):
Reported by Pylint.
Line: 48
Column: 13
indexer_sl(ser)[Interval(-2, 0)]
with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")):
indexer_sl(ser)[Interval(5, 6)]
def test_loc_with_scalar(self, series_with_interval_index, indexer_sl):
# loc with single label / list of labels:
# - Intervals: only exact matches
Reported by Pylint.
Line: 96
Column: 13
msg = "Interval objects are not currently supported"
with pytest.raises(NotImplementedError, match=msg):
indexer_sl(ser)[Interval(3, 6) :]
with pytest.raises(NotImplementedError, match=msg):
indexer_sl(ser)[Interval(3, 4, closed="left") :]
def test_slice_step_ne1(self, series_with_interval_index):
Reported by Pylint.
Line: 99
Column: 13
indexer_sl(ser)[Interval(3, 6) :]
with pytest.raises(NotImplementedError, match=msg):
indexer_sl(ser)[Interval(3, 4, closed="left") :]
def test_slice_step_ne1(self, series_with_interval_index):
# GH#31658 slice of scalar with step != 1
ser = series_with_interval_index.copy()
expected = ser.iloc[0:4:2]
Reported by Pylint.
Line: 119
Column: 13
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
ser[1.5:9.5:2]
def test_slice_interval_step(self, series_with_interval_index):
# GH#31658 allows for integer step!=1, not Interval step
ser = series_with_interval_index.copy()
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
Reported by Pylint.
Line: 126
Column: 13
ser = series_with_interval_index.copy()
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
ser[0 : 4 : Interval(0, 1)]
def test_loc_with_overlap(self, indexer_sl):
idx = IntervalIndex.from_tuples([(1, 5), (3, 7)])
ser = Series(range(len(idx)), index=idx)
Reported by Pylint.
Line: 144
Column: 9
# interval
expected = 0
result = indexer_sl(ser)[Interval(1, 5)]
result == expected
expected = ser
result = indexer_sl(ser)[[Interval(1, 5), Interval(3, 7)]]
tm.assert_series_equal(expected, result)
Reported by Pylint.
pandas/tests/io/formats/style/test_non_unique.py
46 issues
Line: 3
Column: 1
from textwrap import dedent
import pytest
from pandas import (
DataFrame,
IndexSlice,
)
Reported by Pylint.
Line: 26
Column: 12
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0)
def test_format_non_unique(df):
# GH 41269
Reported by Pylint.
Line: 30
Column: 28
return Styler(df, uuid_len=0)
def test_format_non_unique(df):
# GH 41269
# test dict
html = df.style.format({"d": "{:.1f}"}).to_html()
for val in ["1.000000<", "4.000000<", "7.000000<"]:
Reported by Pylint.
Line: 49
Column: 43
@pytest.mark.parametrize("func", ["apply", "applymap"])
def test_apply_applymap_non_unique_raises(df, func):
# GH 41269
if func == "apply":
op = lambda s: ["color: red;"] * len(s)
else:
op = lambda v: "color: red;"
Reported by Pylint.
Line: 57
Column: 9
op = lambda v: "color: red;"
with pytest.raises(KeyError, match="`Styler.apply` and `.applymap` are not"):
getattr(df.style, func)(op)._compute()
def test_table_styles_dict_non_unique_index(styler):
styles = styler.set_table_styles(
{"j": [{"selector": "td", "props": "a: v;"}]}, axis=1
Reported by Pylint.
Line: 60
Column: 45
getattr(df.style, func)(op)._compute()
def test_table_styles_dict_non_unique_index(styler):
styles = styler.set_table_styles(
{"j": [{"selector": "td", "props": "a: v;"}]}, axis=1
).table_styles
assert styles == [
{"selector": "td.row1", "props": [("a", "v")]},
Reported by Pylint.
Line: 70
Column: 47
]
def test_table_styles_dict_non_unique_columns(styler):
styles = styler.set_table_styles(
{"d": [{"selector": "td", "props": "a: v;"}]}, axis=0
).table_styles
assert styles == [
{"selector": "td.col1", "props": [("a", "v")]},
Reported by Pylint.
Line: 80
Column: 37
]
def test_tooltips_non_unique_raises(styler):
# ttips has unique keys
ttips = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
styler.set_tooltips(ttips=ttips) # OK
# ttips has non-unique columns
Reported by Pylint.
Line: 96
Column: 43
styler.set_tooltips(ttips=ttips)
def test_set_td_classes_non_unique_raises(styler):
# classes has unique keys
classes = DataFrame([["1", "2"], ["3", "4"]], columns=["c", "d"], index=["a", "b"])
styler.set_td_classes(classes=classes) # OK
# classes has non-unique columns
Reported by Pylint.
Line: 112
Column: 34
styler.set_td_classes(classes=classes)
def test_hide_columns_non_unique(styler):
ctx = styler.hide_columns(["d"])._translate(True, True)
assert ctx["head"][0][1]["display_value"] == "c"
assert ctx["head"][0][1]["is_visible"] is True
Reported by Pylint.
pandas/tests/arrays/integer/test_function.py
46 issues
Line: 2
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
Reported by Pylint.
Line: 29
Column: 63
a = pd.array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask)
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
with np.errstate(invalid="ignore"):
result = ufunc(s)
Reported by Pylint.
Line: 123
Column: 3
# https://github.com/pandas-dev/pandas/issues/33317
s = pd.Series([], dtype="Int64")
result = s.value_counts()
# TODO: The dtype of the index seems wrong (it's int64 for non-empty)
idx = pd.Index([], dtype="object")
expected = pd.Series([], index=idx, dtype="Int64")
tm.assert_series_equal(result, expected)
Reported by Pylint.
Line: 184
Column: 31
@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"])
def test_dataframe_reductions(op):
# https://github.com/pandas-dev/pandas/pull/32867
# ensure the integers are not cast to float during reductions
df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")})
result = df.max()
assert isinstance(result["a"], np.int64)
Reported by Pylint.
Line: 192
Column: 3
assert isinstance(result["a"], np.int64)
# TODO(jreback) - these need testing / are broken
# shift
# set_index (destroys type)
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
Reported by Pylint.
Line: 11
Column: 1
@pytest.mark.parametrize("ufunc", [np.abs, np.sign])
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = pd.array([1, 2, -3, np.nan])
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
Reported by Pylint.
Line: 13
Column: 5
# np.sign emits a warning with nans, <https://github.com/numpy/numpy/issues/15127>
@pytest.mark.filterwarnings("ignore:invalid value encountered in sign")
def test_ufuncs_single_int(ufunc):
a = pd.array([1, 2, -3, np.nan])
result = ufunc(a)
expected = pd.array(ufunc(a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
Reported by Pylint.
Line: 18
Column: 5
expected = pd.array(ufunc(a.astype(float)), dtype="Int64")
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(pd.array(ufunc(a.astype(float)), dtype="Int64"))
tm.assert_series_equal(result, expected)
Reported by Pylint.
Line: 25
Column: 1
@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt])
def test_ufuncs_single_float(ufunc):
a = pd.array([1, 2, -3, np.nan])
with np.errstate(invalid="ignore"):
result = ufunc(a)
expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask)
tm.assert_extension_array_equal(result, expected)
Reported by Pylint.
pandas/tests/apply/test_invalid_arg.py
46 issues
Line: 13
Column: 1
import re
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
Series,
Reported by Pylint.
Line: 77
Column: 14
def test_map_datetimetz_na_action():
values = date_range("2011-01-01", "2011-01-02", freq="H").tz_localize("Asia/Tokyo")
s = Series(values, name="XX")
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
Reported by Pylint.
Line: 77
Column: 14
def test_map_datetimetz_na_action():
values = date_range("2011-01-01", "2011-01-02", freq="H").tz_localize("Asia/Tokyo")
s = Series(values, name="XX")
with pytest.raises(NotImplementedError, match=tm.EMPTY_STRING_PATTERN):
s.map(lambda x: x, na_action="ignore")
Reported by Pylint.
Line: 219
Column: 5
row["D"] = 7
return row
def transform2(row):
if notna(row["C"]) and row["C"].startswith("shin") and row["A"] == "foo":
row["D"] = 7
return row
msg = "'float' object has no attribute 'startswith'"
Reported by Pylint.
Line: 1
Column: 1
# Tests specifically aimed at detecting bad arguments.
# This file is organized by reason for exception.
# 1. always invalid argument values
# 2. missing column(s)
# 3. incompatible ops/dtype/args/kwargs
# 4. invalid result shape/type
# If your test does not fit into one of these categories, add to this list.
from itertools import chain
Reported by Pylint.
Line: 27
Column: 1
@pytest.mark.parametrize("result_type", ["foo", 1])
def test_result_type_error(result_type, int_frame_const_col):
# allowed result_type
df = int_frame_const_col
msg = (
"invalid value for result_type, must be one of "
Reported by Pylint.
Line: 29
Column: 5
@pytest.mark.parametrize("result_type", ["foo", 1])
def test_result_type_error(result_type, int_frame_const_col):
# allowed result_type
df = int_frame_const_col
msg = (
"invalid value for result_type, must be one of "
"{None, 'reduce', 'broadcast', 'expand'}"
)
Reported by Pylint.
Line: 39
Column: 1
df.apply(lambda x: [1, 2, 3], axis=1, result_type=result_type)
def test_apply_invalid_axis_value():
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.apply(lambda x: x, 2)
Reported by Pylint.
Line: 40
Column: 5
def test_apply_invalid_axis_value():
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=["a", "a", "c"])
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.apply(lambda x: x, 2)
Reported by Pylint.
Line: 46
Column: 1
df.apply(lambda x: x, 2)
def test_applymap_invalid_na_action(float_frame):
# GH 23803
with pytest.raises(ValueError, match="na_action must be .*Got 'abc'"):
float_frame.applymap(lambda x: len(str(x)), na_action="abc")
Reported by Pylint.