The following issues were found
pandas/tests/test_common.py
66 issues
Line: 6
Column: 1
import string
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
from pandas.core import ops
Reported by Pylint.
Line: 28
Column: 20
class somecall:
def __call__(self):
return x # noqa
assert getname(fn) == "fn"
assert getname(lambda_)
assert getname(part1) == "fn"
assert getname(part2) == "fn"
Reported by Pylint.
Line: 54
Column: 31
# Check with seed
state = com.random_state(5)
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()
Reported by Pylint.
Line: 57
Column: 14
assert state.uniform() == npr.RandomState(5).uniform()
# Check with random state object
state2 = npr.RandomState(10)
assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()
# check with no arg random state
assert com.random_state() is np.random
Reported by Pylint.
Line: 58
Column: 50
# Check with random state object
state2 = npr.RandomState(10)
assert com.random_state(state2).uniform() == npr.RandomState(10).uniform()
# check with no arg random state
assert com.random_state() is np.random
# check array-like
Reported by Pylint.
Line: 68
Column: 12
state_arr_like = npr.randint(0, 2 ** 31, size=624, dtype="uint32")
assert (
com.random_state(state_arr_like).uniform()
== npr.RandomState(state_arr_like).uniform()
)
# Check BitGenerators
# GH32503
assert (
Reported by Pylint.
Line: 75
Column: 12
# GH32503
assert (
com.random_state(npr.MT19937(3)).uniform()
== npr.RandomState(npr.MT19937(3)).uniform()
)
assert (
com.random_state(npr.PCG64(11)).uniform()
== npr.RandomState(npr.PCG64(11)).uniform()
)
Reported by Pylint.
Line: 79
Column: 12
)
assert (
com.random_state(npr.PCG64(11)).uniform()
== npr.RandomState(npr.PCG64(11)).uniform()
)
# Error for floats or strings
msg = (
"random_state must be an integer, array-like, a BitGenerator, Generator, "
Reported by Pylint.
Line: 152
Column: 64
@pytest.mark.parametrize(
"obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)]
)
def test_serializable(obj):
# GH 35611
unpickled = tm.round_trip_pickle(obj)
assert type(obj) == type(unpickled)
Reported by Pylint.
Line: 106
Column: 12
],
)
def test_maybe_match_name(left, right, expected):
assert ops.common._maybe_match_name(left, right) == expected
def test_standardize_mapping():
# No uninitialized defaultdicts
msg = r"to_dict\(\) only accepts initialized defaultdicts"
Reported by Pylint.
pandas/tests/io/parser/test_c_parser_only.py
66 issues
Line: 18
Column: 1
import tarfile
import numpy as np
import pytest
from pandas.compat import IS64
from pandas.errors import ParserError
import pandas.util._test_decorators as td
Reported by Pylint.
Line: 66
Column: 20
parser.read_csv(
StringIO(data), compression="gzip", delim_whitespace=True
)
except Exception:
pass
def test_delim_whitespace_custom_terminator(c_parser_only):
# See gh-12912
Reported by Pylint.
Line: 188
Column: 49
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal(f"{val:.100}") - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
Reported by Pylint.
Line: 35
Column: 1
"malformed",
["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"],
ids=["words pointer", "stream pointer", "lines pointer"],
)
def test_buffer_overflow(c_parser_only, malformed):
# see gh-9205: test certain malformed input files that cause
# buffer overflows in tokenizer.c
msg = "Buffer overflow caught - possible malformed input file."
parser = c_parser_only
Reported by Pylint.
Line: 46
Column: 1
parser.read_csv(StringIO(malformed))
def test_buffer_rd_bytes(c_parser_only):
# see gh-12098: src->buffer in the C parser can be freed twice leading
# to a segfault if a corrupt gzip file is read with 'read_csv', and the
# buffer is filled more than once before gzip raises an Exception.
data = (
Reported by Pylint.
Line: 66
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b110_try_except_pass.html
parser.read_csv(
StringIO(data), compression="gzip", delim_whitespace=True
)
except Exception:
pass
def test_delim_whitespace_custom_terminator(c_parser_only):
# See gh-12912
Reported by Bandit.
Line: 70
Column: 1
pass
def test_delim_whitespace_custom_terminator(c_parser_only):
# See gh-12912
data = "a b c~1 2 3~4 5 6~7 8 9"
parser = c_parser_only
df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)
Reported by Pylint.
Line: 75
Column: 5
data = "a b c~1 2 3~4 5 6~7 8 9"
parser = c_parser_only
df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)
expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
tm.assert_frame_equal(df, expected)
def test_dtype_and_names_error(c_parser_only):
Reported by Pylint.
Line: 80
Column: 1
tm.assert_frame_equal(df, expected)
def test_dtype_and_names_error(c_parser_only):
# see gh-8833: passing both dtype and names
# resulting in an error reporting issue
parser = c_parser_only
data = """
1.0 1
Reported by Pylint.
Line: 147
Column: 1
("the dtype <U8 is not supported for parsing", {"dtype": {"A": "U8"}}),
],
ids=["dt64-0", "dt64-1", "td64", "<U8"],
)
def test_unsupported_dtype(c_parser_only, match, kwargs):
parser = c_parser_only
df = DataFrame(
np.random.rand(5, 2), columns=list("AB"), index=["1A", "1B", "1C", "1D", "1E"]
)
Reported by Pylint.
pandas/tests/indexes/timedeltas/test_timedelta.py
66 issues
Line: 4
Column: 1
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
Int64Index,
Reported by Pylint.
Line: 47
Column: 15
def test_pickle_after_set_freq(self):
tdi = timedelta_range("1 day", periods=4, freq="s")
tdi = tdi._with_freq(None)
res = tm.round_trip_pickle(tdi)
tm.assert_index_equal(res, tdi)
def test_isin(self):
Reported by Pylint.
Line: 47
Column: 15
def test_pickle_after_set_freq(self):
tdi = timedelta_range("1 day", periods=4, freq="s")
tdi = tdi._with_freq(None)
res = tm.round_trip_pickle(tdi)
tm.assert_index_equal(res, tdi)
def test_isin(self):
Reported by Pylint.
Line: 68
Column: 30
def test_misc_coverage(self):
rng = timedelta_range("1 day", periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
def test_map(self):
# test_map_dictlike generally tests
Reported by Pylint.
Line: 68
Column: 30
def test_misc_coverage(self):
rng = timedelta_range("1 day", periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
def test_map(self):
# test_map_dictlike generally tests
Reported by Pylint.
Line: 69
Column: 32
rng = timedelta_range("1 day", periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
def test_map(self):
# test_map_dictlike generally tests
rng = timedelta_range("1 day", periods=10)
Reported by Pylint.
Line: 86
Column: 26
rng = timedelta_range("1 days", "10 days")
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
Reported by Pylint.
Line: 86
Column: 26
rng = timedelta_range("1 days", "10 days")
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
Reported by Pylint.
Line: 92
Column: 31
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
tm.assert_index_equal(rng.days, Index([1, 1], dtype="int64"))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype="int64"),
)
tm.assert_index_equal(
Reported by Pylint.
Line: 92
Column: 31
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
tm.assert_index_equal(rng.days, Index([1, 1], dtype="int64"))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype="int64"),
)
tm.assert_index_equal(
Reported by Pylint.
pandas/tests/series/test_ufunc.py
66 issues
Line: 5
Column: 1
import string
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import SparseArray
Reported by Pylint.
Line: 51
Column: 55
@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
def test_binary_ufunc_with_array(flip, sparse, ufunc, arrays_for_binary_ufunc):
# Test that ufunc(pd.Series(a), array) == pd.Series(ufunc(a, b))
a1, a2 = arrays_for_binary_ufunc
if sparse:
a1 = SparseArray(a1, dtype=pd.SparseDtype("int64", 0))
a2 = SparseArray(a2, dtype=pd.SparseDtype("int64", 0))
Reported by Pylint.
Line: 77
Column: 55
@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
def test_binary_ufunc_with_index(flip, sparse, ufunc, arrays_for_binary_ufunc):
# Test that
# * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b))
# * ufunc(Index, pd.Series) dispatches to pd.Series (returns a pd.Series)
a1, a2 = arrays_for_binary_ufunc
if sparse:
Reported by Pylint.
Line: 107
Column: 35
@pytest.mark.parametrize("shuffle", [True, False], ids=["unaligned", "aligned"])
@pytest.mark.parametrize("flip", [True, False], ids=["flipped", "straight"])
def test_binary_ufunc_with_series(
flip, shuffle, sparse, ufunc, arrays_for_binary_ufunc
):
# Test that
# * func(pd.Series(a), pd.Series(b)) == pd.Series(ufunc(a, b))
# with alignment between the indices
a1, a2 = arrays_for_binary_ufunc
Reported by Pylint.
Line: 147
Column: 51
@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("flip", [True, False])
def test_binary_ufunc_scalar(ufunc, sparse, flip, arrays_for_binary_ufunc):
# Test that
# * ufunc(pd.Series, scalar) == pd.Series(ufunc(array, scalar))
# * ufunc(pd.Series, scalar) == ufunc(scalar, pd.Series)
arr, _ = arrays_for_binary_ufunc
if sparse:
Reported by Pylint.
Line: 170
Column: 3
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("ufunc", [np.divmod]) # TODO: any others?
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("shuffle", SHUFFLE)
@pytest.mark.filterwarnings("ignore:divide by zero:RuntimeWarning")
def test_multiple_output_binary_ufuncs(ufunc, sparse, shuffle, arrays_for_binary_ufunc):
# Test that
Reported by Pylint.
Line: 174
Column: 64
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("shuffle", SHUFFLE)
@pytest.mark.filterwarnings("ignore:divide by zero:RuntimeWarning")
def test_multiple_output_binary_ufuncs(ufunc, sparse, shuffle, arrays_for_binary_ufunc):
# Test that
# the same conditions from binary_ufunc_scalar apply to
# ufuncs with multiple outputs.
if sparse and ufunc is np.divmod:
pytest.skip("sparse divmod not implemented.")
Reported by Pylint.
Line: 207
Column: 40
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
def test_multiple_output_ufunc(sparse, arrays_for_binary_ufunc):
# Test that the same conditions from unary input apply to multi-output
# ufuncs
arr, _ = arrays_for_binary_ufunc
if sparse:
Reported by Pylint.
Line: 228
Column: 56
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
def test_binary_ufunc_drops_series_name(ufunc, sparse, arrays_for_binary_ufunc):
# Drop the names when they differ.
a1, a2 = arrays_for_binary_ufunc
s1 = pd.Series(a1, name="a")
s2 = pd.Series(a2, name="b")
Reported by Pylint.
Line: 228
Column: 48
@pytest.mark.parametrize("sparse", SPARSE, ids=SPARSE_IDS)
@pytest.mark.parametrize("ufunc", BINARY_UFUNCS)
def test_binary_ufunc_drops_series_name(ufunc, sparse, arrays_for_binary_ufunc):
# Drop the names when they differ.
a1, a2 = arrays_for_binary_ufunc
s1 = pd.Series(a1, name="a")
s2 = pd.Series(a2, name="b")
Reported by Pylint.
pandas/tests/io/parser/test_parse_dates.py
66 issues
Line: 13
Column: 1
from io import StringIO
from dateutil.parser import parse as du_parse
from hypothesis import (
given,
settings,
strategies as st,
)
import numpy as np
Reported by Pylint.
Line: 19
Column: 1
strategies as st,
)
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas.compat import (
Reported by Pylint.
Line: 20
Column: 1
)
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas.compat import (
is_platform_windows,
Reported by Pylint.
Line: 22
Column: 1
import pytest
import pytz
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas.compat import (
is_platform_windows,
np_array_datetime64_compat,
)
Reported by Pylint.
Line: 23
Column: 1
import pytz
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas.compat import (
is_platform_windows,
np_array_datetime64_compat,
)
Reported by Pylint.
Line: 23
Column: 1
import pytz
from pandas._libs.tslibs import parsing
from pandas._libs.tslibs.parsing import parse_datetime_string
from pandas.compat import (
is_platform_windows,
np_array_datetime64_compat,
)
Reported by Pylint.
Line: 851
Column: 25
df = parser.read_csv(
StringIO(data),
names=["time", "Q", "NTU"],
date_parser=lambda d: du_parse(d, **kwargs),
header=0,
index_col=0,
parse_dates=True,
na_values=["NA"],
)
Reported by Pylint.
Line: 875
Column: 29
parser.read_csv(
StringIO(data),
names=["time", "Q", "NTU"],
date_parser=lambda d: du_parse(d, **kwargs),
skiprows=[0],
index_col=0,
parse_dates=True,
na_values=["NA"],
)
Reported by Pylint.
Line: 1147
Column: 58
@pytest.mark.parametrize("parse_dates", [(1,), np.array([4, 5]), {1, 3, 3}])
def test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):
parser = all_parsers
msg = (
"Only booleans, lists, and dictionaries "
"are accepted for the 'parse_dates' parameter"
)
Reported by Pylint.
Line: 1583
Column: 9
result = call(date_string, **kwargs)
except ValueError as er:
msg = str(er)
pass
return msg, result
@given(date_strategy)
@settings(deadline=None)
Reported by Pylint.
pandas/tests/extension/test_categorical.py
65 issues
Line: 19
Column: 1
import string
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
Reported by Pylint.
Line: 90
Column: 33
class TestInterface(base.BaseInterfaceTests):
@pytest.mark.skip(reason="Memory usage doesn't match")
def test_memory_usage(self, data):
# Is this deliberate?
super().test_memory_usage(data)
def test_contains(self, data, data_missing):
# GH-37867
Reported by Pylint.
Line: 94
Column: 29
# Is this deliberate?
super().test_memory_usage(data)
def test_contains(self, data, data_missing):
# GH-37867
# na value handling in Categorical.__contains__ is deprecated.
# See base.BaseInterFaceTests.test_contains for more details.
na_value = data.dtype.na_value
Reported by Pylint.
Line: 94
Column: 35
# Is this deliberate?
super().test_memory_usage(data)
def test_contains(self, data, data_missing):
# GH-37867
# na value handling in Categorical.__contains__ is deprecated.
# See base.BaseInterFaceTests.test_contains for more details.
na_value = data.dtype.na_value
Reported by Pylint.
Line: 99
Column: 9
# na value handling in Categorical.__contains__ is deprecated.
# See base.BaseInterFaceTests.test_contains for more details.
na_value = data.dtype.na_value
# ensure data without missing values
data = data[~data.isna()]
# first elements are non-missing
assert data[0] in data
Reported by Pylint.
Line: 120
Column: 26
class TestConstructors(base.BaseConstructorsTests):
def test_empty(self, dtype):
cls = dtype.construct_array_type()
result = cls._empty((4,), dtype=dtype)
assert isinstance(result, cls)
# the dtype we passed is not initialized, so will not match the
Reported by Pylint.
Line: 122
Column: 18
class TestConstructors(base.BaseConstructorsTests):
def test_empty(self, dtype):
cls = dtype.construct_array_type()
result = cls._empty((4,), dtype=dtype)
assert isinstance(result, cls)
# the dtype we passed is not initialized, so will not match the
# dtype on our result.
assert result.dtype == CategoricalDtype([])
Reported by Pylint.
Line: 136
Column: 35
class TestGetitem(base.BaseGetitemTests):
@pytest.mark.skip(reason="Backwards compatibility")
def test_getitem_scalar(self, data):
# CategoricalDtype.type isn't "correct" since it should
# be a parent of the elements (object). But don't want
# to break things by changing.
super().test_getitem_scalar(data)
Reported by Pylint.
Line: 149
Column: 37
class TestMissing(base.BaseMissingTests):
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_pad(self, data_missing):
super().test_fillna_limit_pad(data_missing)
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
Reported by Pylint.
Line: 153
Column: 42
super().test_fillna_limit_pad(data_missing)
@pytest.mark.skip(reason="Not implemented")
def test_fillna_limit_backfill(self, data_missing):
super().test_fillna_limit_backfill(data_missing)
class TestReduce(base.BaseNoReduceTests):
pass
Reported by Pylint.
pandas/tests/frame/methods/test_to_records.py
65 issues
Line: 4
Column: 1
from collections import abc
import numpy as np
import pytest
from pandas import (
CategoricalDtype,
DataFrame,
MultiIndex,
Reported by Pylint.
Line: 1
Column: 1
from collections import abc
import numpy as np
import pytest
from pandas import (
CategoricalDtype,
DataFrame,
MultiIndex,
Reported by Pylint.
Line: 17
Column: 1
import pandas._testing as tm
class TestDataFrameToRecords:
def test_to_records_timeseries(self):
index = date_range("1/1/2000", periods=10)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
result = df.to_records()
Reported by Pylint.
Line: 18
Column: 5
class TestDataFrameToRecords:
def test_to_records_timeseries(self):
index = date_range("1/1/2000", periods=10)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
result = df.to_records()
assert result["index"].dtype == "M8[ns]"
Reported by Pylint.
Line: 18
Column: 5
class TestDataFrameToRecords:
def test_to_records_timeseries(self):
index = date_range("1/1/2000", periods=10)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
result = df.to_records()
assert result["index"].dtype == "M8[ns]"
Reported by Pylint.
Line: 20
Column: 9
class TestDataFrameToRecords:
def test_to_records_timeseries(self):
index = date_range("1/1/2000", periods=10)
df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
result = df.to_records()
assert result["index"].dtype == "M8[ns]"
result = df.to_records(index=False)
Reported by Pylint.
Line: 23
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
df = DataFrame(np.random.randn(10, 3), index=index, columns=["a", "b", "c"])
result = df.to_records()
assert result["index"].dtype == "M8[ns]"
result = df.to_records(index=False)
def test_to_records_dt64(self):
df = DataFrame(
Reported by Bandit.
Line: 27
Column: 5
result = df.to_records(index=False)
def test_to_records_dt64(self):
df = DataFrame(
[["one", "two", "three"], ["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"),
)
Reported by Pylint.
Line: 27
Column: 5
result = df.to_records(index=False)
def test_to_records_dt64(self):
df = DataFrame(
[["one", "two", "three"], ["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"),
)
Reported by Pylint.
Line: 28
Column: 9
result = df.to_records(index=False)
def test_to_records_dt64(self):
df = DataFrame(
[["one", "two", "three"], ["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"),
)
expected = df.index.values[0]
Reported by Pylint.
pandas/core/indexes/range.py
65 issues
Line: 18
Column: 1
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import (
Dtype,
npt,
)
Reported by Pylint.
Line: 19
Column: 1
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import (
Dtype,
npt,
)
from pandas.compat.numpy import function as nv
Reported by Pylint.
Line: 19
Column: 1
import numpy as np
from pandas._libs import index as libindex
from pandas._libs.lib import no_default
from pandas._typing import (
Dtype,
npt,
)
from pandas.compat.numpy import function as nv
Reported by Pylint.
Line: 402
Column: 10
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
Reported by Pylint.
Line: 429
Column: 16
# --------------------------------------------------------------------
def repeat(self, repeats, axis=None) -> Int64Index:
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
Reported by Pylint.
Line: 432
Column: 16
return self._int64index.repeat(repeats, axis=axis)
def delete(self, loc) -> Int64Index: # type: ignore[override]
return self._int64index.delete(loc)
def take(
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
Reported by Pylint.
Line: 438
Column: 20
self, indices, axis: int = 0, allow_fill: bool = True, fill_value=None, **kwargs
) -> Int64Index:
with rewrite_exception("Int64Index", type(self).__name__):
return self._int64index.take(
indices,
axis=axis,
allow_fill=allow_fill,
fill_value=fill_value,
**kwargs,
Reported by Pylint.
Line: 508
Column: 43
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
Reported by Pylint.
Line: 535
Column: 16
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[npt.NDArray[np.intp], RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
Reported by Pylint.
Line: 686
Column: 16
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return self._int64index._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
Reported by Pylint.
pandas/tests/indexes/test_index_new.py
65 issues
Line: 7
Column: 1
from decimal import Decimal
import numpy as np
import pytest
from pandas.core.dtypes.common import is_unsigned_integer_dtype
from pandas import (
NA,
Reported by Pylint.
Line: 178
Column: 16
result = Index(cat, dti.dtype)
tm.assert_index_equal(result, dti)
dti2 = dti.tz_localize("Asia/Tokyo")
cat2 = Categorical(dti2)
result = Index(cat2, dti2.dtype)
tm.assert_index_equal(result, dti2)
ii = IntervalIndex.from_breaks(range(5))
Reported by Pylint.
Line: 178
Column: 16
result = Index(cat, dti.dtype)
tm.assert_index_equal(result, dti)
dti2 = dti.tz_localize("Asia/Tokyo")
cat2 = Categorical(dti2)
result = Index(cat2, dti2.dtype)
tm.assert_index_equal(result, dti2)
ii = IntervalIndex.from_breaks(range(5))
Reported by Pylint.
Line: 222
Column: 20
def test_constructor_datetime64_values_mismatched_period_dtype(self):
dti = date_range("2016-01-01", periods=3)
result = Index(dti, dtype="Period[D]")
expected = dti.to_period("D")
tm.assert_index_equal(result, expected)
class TestIndexConstructorUnwrapping:
# Test passing different arraylike values to pd.Index
Reported by Pylint.
Line: 222
Column: 20
def test_constructor_datetime64_values_mismatched_period_dtype(self):
dti = date_range("2016-01-01", periods=3)
result = Index(dti, dtype="Period[D]")
expected = dti.to_period("D")
tm.assert_index_equal(result, expected)
class TestIndexConstructorUnwrapping:
# Test passing different arraylike values to pd.Index
Reported by Pylint.
Line: 160
Column: 16
rng = Index(range(5))
result = Index(rng, dtype=dtype)
assert result.dtype == dtype
result = Index(range(5), dtype=dtype)
assert result.dtype == dtype
@pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])
Reported by Pylint.
Line: 163
Column: 16
assert result.dtype == dtype
result = Index(range(5), dtype=dtype)
assert result.dtype == dtype
@pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])
def test_constructor_categorical_values_mismatched_non_ea_dtype(self, dtype):
cat = Categorical([1, 2, 3])
Reported by Pylint.
Line: 170
Column: 16
cat = Categorical([1, 2, 3])
result = Index(cat, dtype=dtype)
assert result.dtype == dtype
def test_constructor_categorical_values_mismatched_dtype(self):
dti = date_range("2016-01-01", periods=3)
cat = Categorical(dti)
result = Index(cat, dti.dtype)
Reported by Pylint.
Line: 33
Column: 1
import pandas._testing as tm
class TestIndexConstructorInference:
@pytest.mark.parametrize("na_value", [None, np.nan])
@pytest.mark.parametrize("vtype", [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH#18505 : valid tuples containing NaN
values = [(1, "two"), (3.0, na_value)]
Reported by Pylint.
Line: 36
Column: 5
class TestIndexConstructorInference:
@pytest.mark.parametrize("na_value", [None, np.nan])
@pytest.mark.parametrize("vtype", [list, tuple, iter])
def test_construction_list_tuples_nan(self, na_value, vtype):
# GH#18505 : valid tuples containing NaN
values = [(1, "two"), (3.0, na_value)]
result = Index(vtype(values))
expected = MultiIndex.from_tuples(values)
tm.assert_index_equal(result, expected)
Reported by Pylint.
asv_bench/benchmarks/algorithms.py
65 issues
Line: 5
Column: 1
import numpy as np
import pandas as pd
from .pandas_vb_common import tm
for imp in ["pandas.util", "pandas.tools.hashing"]:
try:
Reported by Pylint.
Line: 7
Column: 1
import pandas as pd
from .pandas_vb_common import tm
for imp in ["pandas.util", "pandas.tools.hashing"]:
try:
hashing = import_module(imp)
break
Reported by Pylint.
Line: 172
Column: 1
self.array.argsort()
from .pandas_vb_common import setup # noqa: F401 isort:skip
Reported by Pylint.
Line: 36
Column: 29
]
param_names = ["unique", "sort", "dtype"]
def setup(self, unique, sort, dtype):
N = 10 ** 5
string_index = tm.makeStringIndex(N)
string_arrow = None
if dtype == "string[pyarrow]":
try:
Reported by Pylint.
Line: 44
Column: 17
try:
string_arrow = pd.array(string_index, dtype="string[pyarrow]")
except ImportError:
raise NotImplementedError
data = {
"int": pd.Int64Index(np.arange(N)),
"uint": pd.UInt64Index(np.arange(N)),
"float": pd.Float64Index(np.random.randn(N)),
Reported by Pylint.
Line: 61
Column: 9
}[dtype]
if not unique:
data = data.repeat(5)
self.data = data
def time_factorize(self, unique, sort, dtype):
pd.factorize(self.data, sort=sort)
Reported by Pylint.
Line: 63
Column: 30
data = data.repeat(5)
self.data = data
def time_factorize(self, unique, sort, dtype):
pd.factorize(self.data, sort=sort)
class Duplicated:
Reported by Pylint.
Line: 63
Column: 44
data = data.repeat(5)
self.data = data
def time_factorize(self, unique, sort, dtype):
pd.factorize(self.data, sort=sort)
class Duplicated:
Reported by Pylint.
Line: 76
Column: 29
]
param_names = ["unique", "keep", "dtype"]
def setup(self, unique, keep, dtype):
N = 10 ** 5
data = {
"int": pd.Int64Index(np.arange(N)),
"uint": pd.UInt64Index(np.arange(N)),
"float": pd.Float64Index(np.random.randn(N)),
Reported by Pylint.
Line: 90
Column: 9
}[dtype]
if not unique:
data = data.repeat(5)
self.idx = data
# cache is_unique
self.idx.is_unique
def time_duplicated(self, unique, keep, dtype):
self.idx.duplicated(keep=keep)
Reported by Pylint.