The following issues were found
caffe2/python/ideep/pre_convert_test.py
18 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import (
brew,
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import (
brew,
core,
Reported by Pylint.
Line: 21
Column: 22
import caffe2.python.hypothesis_test_util as hu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class PreConvertTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(15, 16),
batch_size=st.integers(1, 3))
def test_preConvert(self, input_channels, batch_size):
def AddModel(model, data):
Reported by Pylint.
Line: 91
Column: 21
if not np.allclose(output_dict[blob], output_dict_cosim[blob], atol=0.001, rtol=0.0001):
print("blob {} error".format(blob))
print(np.max(np.abs(output_dict[blob] - output_dict_cosim[blob])))
self.assertTrue(False)
workspace.ResetWorkspace()
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 22
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class PreConvertTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(15, 16),
batch_size=st.integers(1, 3))
def test_preConvert(self, input_channels, batch_size):
def AddModel(model, data):
conv1 = brew.conv(model, data, 'conv1', dim_in=input_channels,
Reported by Pylint.
Line: 25
Column: 5
class PreConvertTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(15, 16),
batch_size=st.integers(1, 3))
def test_preConvert(self, input_channels, batch_size):
def AddModel(model, data):
conv1 = brew.conv(model, data, 'conv1', dim_in=input_channels,
dim_out=10, kernel=3, stride=1, pad=1, training_mode=1)
deconv1 = brew.conv_transpose(model, conv1, 'deconv1', dim_in=10, dim_out=10,
kernel=2, stride=2, pad=0, training_mode=1)
Reported by Pylint.
Line: 25
Column: 5
class PreConvertTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(15, 16),
batch_size=st.integers(1, 3))
def test_preConvert(self, input_channels, batch_size):
def AddModel(model, data):
conv1 = brew.conv(model, data, 'conv1', dim_in=input_channels,
dim_out=10, kernel=3, stride=1, pad=1, training_mode=1)
deconv1 = brew.conv_transpose(model, conv1, 'deconv1', dim_in=10, dim_out=10,
kernel=2, stride=2, pad=0, training_mode=1)
Reported by Pylint.
Line: 25
Column: 5
class PreConvertTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(15, 16),
batch_size=st.integers(1, 3))
def test_preConvert(self, input_channels, batch_size):
def AddModel(model, data):
conv1 = brew.conv(model, data, 'conv1', dim_in=input_channels,
dim_out=10, kernel=3, stride=1, pad=1, training_mode=1)
deconv1 = brew.conv_transpose(model, conv1, 'deconv1', dim_in=10, dim_out=10,
kernel=2, stride=2, pad=0, training_mode=1)
Reported by Pylint.
Line: 26
Column: 9
@given(input_channels=st.integers(15, 16),
batch_size=st.integers(1, 3))
def test_preConvert(self, input_channels, batch_size):
def AddModel(model, data):
conv1 = brew.conv(model, data, 'conv1', dim_in=input_channels,
dim_out=10, kernel=3, stride=1, pad=1, training_mode=1)
deconv1 = brew.conv_transpose(model, conv1, 'deconv1', dim_in=10, dim_out=10,
kernel=2, stride=2, pad=0, training_mode=1)
fc1 = brew.fc(model, deconv1, 'fc1', dim_in=10 * 56 * 56, dim_out=3)
Reported by Pylint.
benchmarks/sparse/spmv.py
18 issues
Line: 3
Column: 1
import argparse
import sys
import torch
from .utils import gen_sparse_csr, gen_sparse_coo, gen_sparse_coo_and_csr, Event
def test_sparse_csr(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
Reported by Pylint.
Line: 4
Column: 1
import argparse
import sys
import torch
from .utils import gen_sparse_csr, gen_sparse_coo, gen_sparse_coo_and_csr, Event
def test_sparse_csr(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
Reported by Pylint.
Line: 6
Column: 21
import torch
from .utils import gen_sparse_csr, gen_sparse_coo, gen_sparse_coo_and_csr, Event
def test_sparse_csr(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
Line: 6
Column: 24
import torch
from .utils import gen_sparse_csr, gen_sparse_coo, gen_sparse_coo_and_csr, Event
def test_sparse_csr(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
Line: 6
Column: 29
import torch
from .utils import gen_sparse_csr, gen_sparse_coo, gen_sparse_coo_and_csr, Event
def test_sparse_csr(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
csr = gen_sparse_csr((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
Line: 22
Column: 29
return sum(times) / len(times)
def test_sparse_coo(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
coo = gen_sparse_coo((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
Line: 22
Column: 21
return sum(times) / len(times)
def test_sparse_coo(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
coo = gen_sparse_coo((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
Line: 22
Column: 24
return sum(times) / len(times)
def test_sparse_coo(m, nnz, test_count):
start_timer = Event(enable_timing=True)
stop_timer = Event(enable_timing=True)
coo = gen_sparse_coo((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
Line: 38
Column: 29
return sum(times) / len(times)
def test_sparse_coo_and_csr(m, nnz, test_count):
start = Event(enable_timing=True)
stop = Event(enable_timing=True)
coo, csr = gen_sparse_coo_and_csr((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
Line: 38
Column: 37
return sum(times) / len(times)
def test_sparse_coo_and_csr(m, nnz, test_count):
start = Event(enable_timing=True)
stop = Event(enable_timing=True)
coo, csr = gen_sparse_coo_and_csr((m, m), nnz)
vector = torch.randn(m, dtype=torch.double)
Reported by Pylint.
caffe2/python/modeling/gradient_clipping.py
18 issues
Line: 27
Column: 5
GRAD_CLIP_METHODS = [BY_NORM, BY_VALUE]
CLIP_GRADIENT_NORM_TYPES = [L2_NORM, L1_NORM]
def __init__(self, grad_clip_method, clip_norm_type='l2_norm',
clip_threshold=0.1, use_parameter_norm=False,
compute_norm_ratio=False, clip_max=1, clip_min=-1,
blobs_to_include=None, blobs_to_exclude=None):
"""
Clips gradient to avoid gradient magnitude explosion or vanishing gradient.
Reported by Pylint.
Line: 69
Column: 5
self.blobs_to_include = blobs_to_include
self.blobs_to_exclude = blobs_to_exclude
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
assert grad_map is not None
CPU = core.DeviceOption(caffe2_pb2.CPU)
Reported by Pylint.
Line: 70
Column: 20
self.blobs_to_exclude = blobs_to_exclude
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
assert grad_map is not None
CPU = core.DeviceOption(caffe2_pb2.CPU)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from caffe2.proto import caffe2_pb2
from caffe2.python.optimizer import get_param_device
from caffe2.python.modeling.net_modifier import NetModifier
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python.optimizer import get_param_device
from caffe2.python.modeling.net_modifier import NetModifier
import logging
logger = logging.getLogger(__name__)
class GradientClipping(NetModifier):
Reported by Pylint.
Line: 16
Column: 1
logger = logging.getLogger(__name__)
class GradientClipping(NetModifier):
L1_NORM = 'l1_norm'
L2_NORM = 'l2_norm'
BY_NORM = 'by_norm'
Reported by Pylint.
Line: 16
Column: 1
logger = logging.getLogger(__name__)
class GradientClipping(NetModifier):
L1_NORM = 'l1_norm'
L2_NORM = 'l2_norm'
BY_NORM = 'by_norm'
Reported by Pylint.
Line: 16
Column: 1
logger = logging.getLogger(__name__)
class GradientClipping(NetModifier):
L1_NORM = 'l1_norm'
L2_NORM = 'l2_norm'
BY_NORM = 'by_norm'
Reported by Pylint.
Line: 27
Column: 5
GRAD_CLIP_METHODS = [BY_NORM, BY_VALUE]
CLIP_GRADIENT_NORM_TYPES = [L2_NORM, L1_NORM]
def __init__(self, grad_clip_method, clip_norm_type='l2_norm',
clip_threshold=0.1, use_parameter_norm=False,
compute_norm_ratio=False, clip_max=1, clip_min=-1,
blobs_to_include=None, blobs_to_exclude=None):
"""
Clips gradient to avoid gradient magnitude explosion or vanishing gradient.
Reported by Pylint.
Line: 51
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
blobs_to_exclude: names of blobs whose gradient is not to be clipped.
"""
assert grad_clip_method in self.GRAD_CLIP_METHODS, (
"This method of clipping, {}, has not been implemented.".format(
clip_norm_type))
if clip_norm_type is not None:
assert clip_norm_type in self.CLIP_GRADIENT_NORM_TYPES, (
"This method of clipping, {}, has not been implemented.".format(
Reported by Bandit.
android/pytorch_android/src/main/java/org/pytorch/IValue.java
18 issues
Line: 24
* may return references to their internal state from {@code toX()}.
*/
@DoNotStrip
public class IValue {
private static final int TYPE_CODE_NULL = 1;
private static final int TYPE_CODE_TENSOR = 2;
private static final int TYPE_CODE_BOOL = 3;
private static final int TYPE_CODE_LONG = 4;
Reported by PMD.
Line: 24
* may return references to their internal state from {@code toX()}.
*/
@DoNotStrip
public class IValue {
private static final int TYPE_CODE_NULL = 1;
private static final int TYPE_CODE_TENSOR = 2;
private static final int TYPE_CODE_BOOL = 3;
private static final int TYPE_CODE_LONG = 4;
Reported by PMD.
Line: 24
* may return references to their internal state from {@code toX()}.
*/
@DoNotStrip
public class IValue {
private static final int TYPE_CODE_NULL = 1;
private static final int TYPE_CODE_TENSOR = 2;
private static final int TYPE_CODE_BOOL = 3;
private static final int TYPE_CODE_LONG = 4;
Reported by PMD.
Line: 43
private static final int TYPE_CODE_DICT_STRING_KEY = 13;
private static final int TYPE_CODE_DICT_LONG_KEY = 14;
@DoNotStrip private final int mTypeCode;
@DoNotStrip private Object mData;
@DoNotStrip
private IValue(int typeCode) {
this.mTypeCode = typeCode;
Reported by PMD.
Line: 44
private static final int TYPE_CODE_DICT_LONG_KEY = 14;
@DoNotStrip private final int mTypeCode;
@DoNotStrip private Object mData;
@DoNotStrip
private IValue(int typeCode) {
this.mTypeCode = typeCode;
}
Reported by PMD.
Line: 165
/** Creates a new {@code IValue} of type {@code List[bool]}. */
@DoNotStrip
public static IValue listFrom(boolean... list) {
final IValue iv = new IValue(TYPE_CODE_BOOL_LIST);
iv.mData = list;
return iv;
}
/** Creates a new {@code IValue} of type {@code List[int]}. */
Reported by PMD.
Line: 172
}
/** Creates a new {@code IValue} of type {@code List[int]}. */
@DoNotStrip
public static IValue listFrom(long... list) {
final IValue iv = new IValue(TYPE_CODE_LONG_LIST);
iv.mData = list;
return iv;
}
/** Creates a new {@code IValue} of type {@code List[float]}. */
Reported by PMD.
Line: 179
}
/** Creates a new {@code IValue} of type {@code List[float]}. */
@DoNotStrip
public static IValue listFrom(double... list) {
final IValue iv = new IValue(TYPE_CODE_DOUBLE_LIST);
iv.mData = list;
return iv;
}
Reported by PMD.
Line: 187
/** Creates a new {@code IValue} of type {@code List[Tensor]}. */
@DoNotStrip
public static IValue listFrom(Tensor... list) {
final IValue iv = new IValue(TYPE_CODE_TENSOR_LIST);
iv.mData = list;
return iv;
}
Reported by PMD.
Line: 195
/** Creates a new {@code IValue} of type {@code List[T]}. All elements must have the same type. */
@DoNotStrip
public static IValue listFrom(IValue... array) {
final int size = array.length;
if (size > 0) {
final int typeCode0 = array[0].mTypeCode;
for (int i = 1; i < size; i++) {
if (typeCode0 != array[i].mTypeCode) {
Reported by PMD.
c10/util/variant.h
18 issues
Line: 1139
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
trait<Ts, std::is_trivially_destructible, std::is_destructible>()...);
};
namespace access {
struct recursive_union {
#ifdef MPARK_RETURN_TYPE_DEDUCTION
template <typename V>
inline static constexpr auto&& get_alt(V&& v, in_place_index_t<0>) {
Reported by FlawFinder.
Line: 1205
Column: 7
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
template <typename Visitor, typename... Vs>
using dispatch_result_t = decltype(lib::invoke(
std::declval<Visitor>(),
access::base::get_alt<0>(std::declval<Vs>())...));
template <typename Expected>
struct expected {
template <typename Actual>
inline static constexpr bool but_got() {
Reported by FlawFinder.
Line: 1267
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
using Expected = R;
using Actual = decltype(lib::invoke(
lib::forward<F>(f),
access::base::get_alt<ITs::value>(
lib::forward<typename ITs::type>(visited_vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<ITs::value>(
lib::forward<typename ITs::type>(visited_vs))...);
Reported by FlawFinder.
Line: 1271
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
lib::forward<typename ITs::type>(visited_vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<ITs::value>(
lib::forward<typename ITs::type>(visited_vs))...);
}
template <std::size_t B, typename F, typename V, typename... Vs>
MPARK_ALWAYS_INLINE static constexpr R dispatch(
Reported by FlawFinder.
Line: 1378
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
using Expected = R;
using Actual = decltype(lib::invoke(
lib::forward<F>(f),
access::base::get_alt<I>(lib::forward<Vs>(vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<I>(lib::forward<Vs>(vs))...);
}
Reported by FlawFinder.
Line: 1381
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
access::base::get_alt<I>(lib::forward<Vs>(vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<I>(lib::forward<Vs>(vs))...);
}
template <std::size_t B, typename F, typename V, typename... Vs>
MPARK_ALWAYS_INLINE static constexpr R dispatch_at(
std::size_t index,
Reported by FlawFinder.
Line: 1504
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
using Expected = dispatch_result_t<F, Vs...>;
using Actual = decltype(lib::invoke(
lib::forward<F>(f),
access::base::get_alt<Is>(lib::forward<Vs>(vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<Is>(lib::forward<Vs>(vs))...);
}
Reported by FlawFinder.
Line: 1507
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
access::base::get_alt<Is>(lib::forward<Vs>(vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<Is>(lib::forward<Vs>(vs))...);
}
#ifdef MPARK_RETURN_TYPE_DEDUCTION
template <std::size_t... Is>
inline static constexpr auto impl(lib::index_sequence<Is...>) {
Reported by FlawFinder.
Line: 1564
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
using Expected = dispatch_result_t<F, Vs...>;
using Actual = decltype(lib::invoke(
lib::forward<F>(f),
access::base::get_alt<I>(lib::forward<Vs>(vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<I>(lib::forward<Vs>(vs))...);
}
Reported by FlawFinder.
Line: 1567
Column: 11
CWE codes:
362/367!
Suggestion:
Set up the correct permissions (e.g., using setuid()) and try to open the file directly
access::base::get_alt<I>(lib::forward<Vs>(vs))...));
return visit_return_type_check<Expected, Actual>::invoke(
lib::forward<F>(f),
access::base::get_alt<I>(lib::forward<Vs>(vs))...);
}
template <std::size_t... Is>
inline static constexpr AUTO impl(lib::index_sequence<Is...>)
AUTO_RETURN(make_farray(&dispatch<Is>...))
Reported by FlawFinder.
torch/nn/modules/distance.py
18 issues
Line: 1
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
Reported by Pylint.
Line: 2
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
Reported by Pylint.
Line: 1
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
Reported by Pylint.
Line: 4
Column: 1
from .module import Module
from .. import functional as F
from torch import Tensor
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
Reported by Pylint.
Line: 7
Column: 1
from torch import Tensor
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
.. math ::
\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
Reported by Pylint.
Line: 9
Column: 1
class PairwiseDistance(Module):
r"""
Computes the batchwise pairwise distance between vectors :math:`v_1`, :math:`v_2` using the p-norm:
.. math ::
\Vert x \Vert _p = \left( \sum_{i=1}^n \vert x_i \vert ^ p \right) ^ {1/p}.
Args:
Reported by Pylint.
Line: 35
Column: 5
eps: float
keepdim: bool
def __init__(self, p: float = 2., eps: float = 1e-6, keepdim: bool = False) -> None:
super(PairwiseDistance, self).__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
Reported by Pylint.
Line: 36
Column: 9
keepdim: bool
def __init__(self, p: float = 2., eps: float = 1e-6, keepdim: bool = False) -> None:
super(PairwiseDistance, self).__init__()
self.norm = p
self.eps = eps
self.keepdim = keepdim
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
Reported by Pylint.
Line: 41
Column: 5
self.eps = eps
self.keepdim = keepdim
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
class CosineSimilarity(Module):
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along `dim`.
Reported by Pylint.
Line: 41
Column: 5
self.eps = eps
self.keepdim = keepdim
def forward(self, x1: Tensor, x2: Tensor) -> Tensor:
return F.pairwise_distance(x1, x2, self.norm, self.eps, self.keepdim)
class CosineSimilarity(Module):
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along `dim`.
Reported by Pylint.
torch/utils/data/_utils/collate.py
18 issues
Line: 27
Column: 16
if elem_type.__name__ == 'ndarray' \
and np_str_obj_array_pattern.search(data.dtype.str) is not None:
return data
return torch.as_tensor(data)
elif isinstance(data, collections.abc.Mapping):
return {key: default_convert(data[key]) for key in data}
elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, collections.abc.Sequence) and not isinstance(data, string_classes):
Reported by Pylint.
Line: 56
Column: 16
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
Reported by Pylint.
Line: 64
Column: 37
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
Reported by Pylint.
Line: 66
Column: 20
return default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
Reported by Pylint.
Line: 68
Column: 16
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
Reported by Pylint.
Line: 68
Column: 42
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
Reported by Pylint.
Line: 70
Column: 16
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
Reported by Pylint.
Line: 54
Column: 23
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
Reported by Pylint.
Line: 9
Column: 1
"""
import torch
import re
import collections
from torch._six import string_classes
np_str_obj_array_pattern = re.compile(r'[SaUO]')
Reported by Pylint.
Line: 10
Column: 1
import torch
import re
import collections
from torch._six import string_classes
np_str_obj_array_pattern = re.compile(r'[SaUO]')
Reported by Pylint.
torch/onnx/symbolic_opset7.py
18 issues
Line: 29
Column: 1
# NOTE: max, min, sum, mean: broadcasting is not supported in opset 7.
# torch.max (same for torch.min) actually has two interfaces smashed together:
# torch.max(x, dim, keepdim) and torch.max(x, y)
def max(g, self, dim_or_y=None, keepdim=None):
# torch.max(input, other)
if keepdim is None and dim_or_y is not None:
warnings.warn("Multidirectional broadcasting is not supported in opset 7. "
"This might cause the onnx model to be incorrect, if inputs to max operators "
"have different shapes")
Reported by Pylint.
Line: 38
Column: 1
return sym_opset9.max(g, self, dim_or_y, keepdim)
def min(g, self, dim_or_y=None, keepdim=None):
# torch.min(input, other)
if keepdim is None and dim_or_y is not None:
warnings.warn("Multidirectional broadcasting is not supported in opset 7. "
"This might cause the onnx model to be incorrect, if inputs to min operators "
"have different shapes")
Reported by Pylint.
Line: 59
Column: 16
if rounding_mode == "floor":
return _floor_divide(g, self, other)
else:
return sym_opset9._div_rounding_mode(g, self, other, rounding_mode)
def _floor_divide(g, self, other):
if sym_help._is_fp(self) or sym_help._is_fp(other):
out = sym_opset9.true_divide(g, self, other)
Reported by Pylint.
Line: 63
Column: 33
def _floor_divide(g, self, other):
if sym_help._is_fp(self) or sym_help._is_fp(other):
out = sym_opset9.true_divide(g, self, other)
return g.op("Floor", out)
else:
raise RuntimeError("Integer floor division requires ONNX opset 9 or greater")
Reported by Pylint.
Line: 63
Column: 8
def _floor_divide(g, self, other):
if sym_help._is_fp(self) or sym_help._is_fp(other):
out = sym_opset9.true_divide(g, self, other)
return g.op("Floor", out)
else:
raise RuntimeError("Integer floor division requires ONNX opset 9 or greater")
Reported by Pylint.
Line: 1
Column: 1
from torch.onnx.symbolic_helper import _block_list_in_opset, parse_args
import torch.onnx.symbolic_helper as sym_help
import torch.onnx.symbolic_opset9 as sym_opset9
import warnings
# Note [ONNX operators that are added/updated from opset 7 to opset 8]
Reported by Pylint.
Line: 6
Column: 1
import torch.onnx.symbolic_opset9 as sym_opset9
import warnings
# Note [ONNX operators that are added/updated from opset 7 to opset 8]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# New operators:
Reported by Pylint.
Line: 29
Column: 1
# NOTE: max, min, sum, mean: broadcasting is not supported in opset 7.
# torch.max (same for torch.min) actually has two interfaces smashed together:
# torch.max(x, dim, keepdim) and torch.max(x, y)
def max(g, self, dim_or_y=None, keepdim=None):
# torch.max(input, other)
if keepdim is None and dim_or_y is not None:
warnings.warn("Multidirectional broadcasting is not supported in opset 7. "
"This might cause the onnx model to be incorrect, if inputs to max operators "
"have different shapes")
Reported by Pylint.
Line: 29
Column: 1
# NOTE: max, min, sum, mean: broadcasting is not supported in opset 7.
# torch.max (same for torch.min) actually has two interfaces smashed together:
# torch.max(x, dim, keepdim) and torch.max(x, y)
def max(g, self, dim_or_y=None, keepdim=None):
# torch.max(input, other)
if keepdim is None and dim_or_y is not None:
warnings.warn("Multidirectional broadcasting is not supported in opset 7. "
"This might cause the onnx model to be incorrect, if inputs to max operators "
"have different shapes")
Reported by Pylint.
Line: 38
Column: 1
return sym_opset9.max(g, self, dim_or_y, keepdim)
def min(g, self, dim_or_y=None, keepdim=None):
# torch.min(input, other)
if keepdim is None and dim_or_y is not None:
warnings.warn("Multidirectional broadcasting is not supported in opset 7. "
"This might cause the onnx model to be incorrect, if inputs to min operators "
"have different shapes")
Reported by Pylint.
torch/package/importer.py
18 issues
Line: 8
Column: 1
from types import ModuleType
from typing import Any, List, Optional, Tuple, Dict
from ._mangling import demangle, get_mangle_prefix, is_mangled
class ObjNotFoundError(Exception):
"""Raised when an importer cannot find an object by searching for its name."""
Reported by Pylint.
Line: 14
Column: 5
class ObjNotFoundError(Exception):
"""Raised when an importer cannot find an object by searching for its name."""
pass
class ObjMismatchError(Exception):
"""Raised when an importer found a different object with the same name as the user-provided one."""
Reported by Pylint.
Line: 20
Column: 5
class ObjMismatchError(Exception):
"""Raised when an importer found a different object with the same name as the user-provided one."""
pass
class Importer(ABC):
"""Represents an environment to import modules from.
Reported by Pylint.
Line: 52
Column: 9
The contract is the same as for importlib.import_module.
"""
pass
def get_name(self, obj: Any, name: Optional[str] = None) -> Tuple[str, str]:
"""Given an object, return a name that can be used to retrieve the
object from this environment.
Reported by Pylint.
Line: 76
Column: 3
if name is None and obj and _Pickler.dispatch.get(type(obj)) is None:
# Honor the string return variant of __reduce__, which will give us
# a global name to search for in this environment.
# TODO: I guess we should do copyreg too?
reduce = getattr(obj, "__reduce__", None)
if reduce is not None:
try:
rv = reduce()
if isinstance(rv, str):
Reported by Pylint.
Line: 83
Column: 24
rv = reduce()
if isinstance(rv, str):
name = rv
except Exception:
pass
if name is None:
name = getattr(obj, "__qualname__", None)
if name is None:
name = obj.__name__
Reported by Pylint.
Line: 1
Column: 1
import importlib
from abc import ABC, abstractmethod
from pickle import _getattribute, _Pickler # type: ignore[attr-defined]
from pickle import whichmodule as _pickle_whichmodule # type: ignore[attr-defined]
from types import ModuleType
from typing import Any, List, Optional, Tuple, Dict
from ._mangling import demangle, get_mangle_prefix, is_mangled
Reported by Pylint.
Line: 3
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle
import importlib
from abc import ABC, abstractmethod
from pickle import _getattribute, _Pickler # type: ignore[attr-defined]
from pickle import whichmodule as _pickle_whichmodule # type: ignore[attr-defined]
from types import ModuleType
from typing import Any, List, Optional, Tuple, Dict
from ._mangling import demangle, get_mangle_prefix, is_mangled
Reported by Bandit.
Line: 4
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b403-import-pickle
import importlib
from abc import ABC, abstractmethod
from pickle import _getattribute, _Pickler # type: ignore[attr-defined]
from pickle import whichmodule as _pickle_whichmodule # type: ignore[attr-defined]
from types import ModuleType
from typing import Any, List, Optional, Tuple, Dict
from ._mangling import demangle, get_mangle_prefix, is_mangled
Reported by Bandit.
Line: 18
Column: 1
class ObjMismatchError(Exception):
"""Raised when an importer found a different object with the same name as the user-provided one."""
pass
class Importer(ABC):
Reported by Pylint.
torch/nn/parameter.py
18 issues
Line: 25
Column: 20
"""
def __new__(cls, data=None, requires_grad=True):
if data is None:
data = torch.tensor([])
return torch.Tensor._make_subclass(cls, data, requires_grad)
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
Reported by Pylint.
Line: 32
Column: 63
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(self.data.clone(memory_format=torch.preserve_format), self.requires_grad)
memo[id(self)] = result
return result
def __repr__(self):
return 'Parameter containing:\n' + super(Parameter, self).__repr__()
Reported by Pylint.
Line: 66
Column: 9
torch.Tensor.cpu,
torch.Tensor.to,
torch.Tensor.get_device,
torch._has_compatible_shallow_copy_type,
]
def materialize(self, shape, device=None, dtype=None):
r"""Create a Parameter or Tensor with the same properties of the uninitialized one.
Given a shape, it materializes a parameter in the same device
Reported by Pylint.
Line: 86
Column: 21
device = self.data.device
if dtype is None:
dtype = self.data.dtype
self.data = torch.empty(shape, device=device, dtype=dtype)
self.__class__ = self.cls_to_become
@property
def shape(self):
raise RuntimeError(
Reported by Pylint.
Line: 153
Column: 16
def __new__(cls, requires_grad=True, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
data = torch.tensor([], **factory_kwargs)
return torch.Tensor._make_subclass(cls, data, requires_grad)
class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor):
r"""A buffer that is not initialized.
Reported by Pylint.
Line: 177
Column: 16
def __new__(cls, requires_grad=False, device=None, dtype=None) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
data = torch.tensor([], **factory_kwargs)
return torch.Tensor._make_subclass(cls, data, requires_grad)
Reported by Pylint.
Line: 66
Column: 9
torch.Tensor.cpu,
torch.Tensor.to,
torch.Tensor.get_device,
torch._has_compatible_shallow_copy_type,
]
def materialize(self, shape, device=None, dtype=None):
r"""Create a Parameter or Tensor with the same properties of the uninitialized one.
Given a shape, it materializes a parameter in the same device
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch._C import _disabled_torch_function_impl
from collections import OrderedDict
class Parameter(torch.Tensor):
r"""A kind of Tensor that is to be considered a module parameter.
Parameters are :class:`~torch.Tensor` subclasses, that have a
Reported by Pylint.
Line: 3
Column: 1
import torch
from torch._C import _disabled_torch_function_impl
from collections import OrderedDict
class Parameter(torch.Tensor):
r"""A kind of Tensor that is to be considered a module parameter.
Parameters are :class:`~torch.Tensor` subclasses, that have a
Reported by Pylint.
Line: 29
Column: 9
return torch.Tensor._make_subclass(cls, data, requires_grad)
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(self.data.clone(memory_format=torch.preserve_format), self.requires_grad)
memo[id(self)] = result
return result
Reported by Pylint.