The following issues were found
tools/code_coverage/package/tool/parser/llvm_coverage_segment.py
7 issues
Line: 45
Column: 9
assert (
len(raw_segment) == 5 or len(raw_segment) == 6
), "list is not compatible with llvmcom export:"
" Expected to have 5 or 6 elements"
if len(raw_segment) == 5:
ret.append(
LlvmCoverageSegment(
raw_segment[0],
raw_segment[1],
Reported by Pylint.
Line: 1
Column: 1
from typing import List, NamedTuple, Optional, Tuple
class LlvmCoverageSegment(NamedTuple):
line: int
col: int
segment_count: int
has_count: int
is_region_entry: int
Reported by Pylint.
Line: 4
Column: 1
from typing import List, NamedTuple, Optional, Tuple
class LlvmCoverageSegment(NamedTuple):
line: int
col: int
segment_count: int
has_count: int
is_region_entry: int
Reported by Pylint.
Line: 13
Column: 5
is_gap_entry: Optional[int]
@property
def has_coverage(self) -> bool:
return self.segment_count > 0
@property
def is_executable(self) -> bool:
return self.has_count > 0
Reported by Pylint.
Line: 17
Column: 5
return self.segment_count > 0
@property
def is_executable(self) -> bool:
return self.has_count > 0
def get_coverage(
self, prev_segment: "LlvmCoverageSegment"
) -> Tuple[List[int], List[int]]:
Reported by Pylint.
Line: 20
Column: 5
def is_executable(self) -> bool:
return self.has_count > 0
def get_coverage(
self, prev_segment: "LlvmCoverageSegment"
) -> Tuple[List[int], List[int]]:
# Code adapted from testpilot.testinfra.runners.gtestcoveragerunner.py
if not prev_segment.is_executable:
return [], []
Reported by Pylint.
Line: 42
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"""
ret: List[LlvmCoverageSegment] = []
for raw_segment in raw_segments:
assert (
len(raw_segment) == 5 or len(raw_segment) == 6
), "list is not compatible with llvmcom export:"
" Expected to have 5 or 6 elements"
if len(raw_segment) == 5:
ret.append(
Reported by Bandit.
torch/distributed/_sharding_spec/_internals.py
7 issues
Line: 32
Column: 30
def __post_init__(self):
if isinstance(self.placement, str):
self.placement = torch.distributed._remote_device(self.placement)
if len(self.shard_offsets) != len(self.shard_lengths):
raise ValueError(
f'shard_offsets and shard_lengths should have '
f'the same number of elements, found {len(self.shard_offsets)} '
Reported by Pylint.
Line: 76
Column: 3
Raises:
``ValueError`` if there's overlap in any two shards.
"""
# TODO: evaluate optimizing this if needed.
for i in range(len(shards)):
for j in range(i + 1, len(shards)):
if _check_shard_metadata_pair_overlap(shards[i], shards[j]):
raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')
Reported by Pylint.
Line: 120
Column: 3
tensor_volume *= size
if total_shard_volume != tensor_volume:
# TODO: Can we improve this error message to point out the gaps?
raise ValueError(
f'Total volume of shards: {total_shard_volume} '
f'does not match tensor volume: {tensor_volume}, in other words '
f'all the individual shards do not cover the entire tensor')
Reported by Pylint.
Line: 1
Column: 1
from typing import List, Union
from dataclasses import dataclass
from torch.distributed.remote_device import _remote_device
import torch
@dataclass
class ShardMetadata(object):
"""
Reported by Pylint.
Line: 8
Column: 1
import torch
@dataclass
class ShardMetadata(object):
"""
Represents a shard of the overall Tensor including its
offsets, lengths and device placement.
Args:
Reported by Pylint.
Line: 77
Column: 5
``ValueError`` if there's overlap in any two shards.
"""
# TODO: evaluate optimizing this if needed.
for i in range(len(shards)):
for j in range(i + 1, len(shards)):
if _check_shard_metadata_pair_overlap(shards[i], shards[j]):
raise ValueError(f'Shards {shards[i]} and {shards[j]} overlap')
Reported by Pylint.
Line: 112
Column: 1
if shard.shard_offsets[i] + shard.shard_lengths[i] > tensor_dims[i]:
raise ValueError(
f'Shard offset {shard.shard_offsets[i]} and length '
f'{shard.shard_lengths[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}')
total_shard_volume += shard_volume
tensor_volume = 1
for size in tensor_dims:
tensor_volume *= size
Reported by Pylint.
torch/distributed/elastic/events/api.py
6 issues
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
Reported by Pylint.
Line: 49
Column: 5
return self.serialize()
@staticmethod
def deserialize(data: Union[str, "Event"]) -> "Event":
if isinstance(data, Event):
return data
if isinstance(data, str):
data_dict = json.loads(data)
data_dict["source"] = EventSource[data_dict["source"]]
Reported by Pylint.
Line: 57
Column: 5
data_dict["source"] = EventSource[data_dict["source"]]
return Event(**data_dict)
def serialize(self) -> str:
return json.dumps(asdict(self))
class NodeState(str, Enum):
"""
Reported by Pylint.
Line: 73
Column: 1
@dataclass
class RdzvEvent:
"""
Dataclass to represent any rendezvous event.
Args:
name: Event name. (E.g. Current action being performed)
Reported by Pylint.
Line: 105
Column: 5
return self.serialize()
@staticmethod
def deserialize(data: Union[str, "RdzvEvent"]) -> "RdzvEvent":
if isinstance(data, RdzvEvent):
return data
if isinstance(data, str):
data_dict = json.loads(data)
data_dict["node_state"] = NodeState[data_dict["node_state"]]
Reported by Pylint.
Line: 113
Column: 5
data_dict["node_state"] = NodeState[data_dict["node_state"]]
return RdzvEvent(**data_dict)
def serialize(self) -> str:
return json.dumps(asdict(self))
Reported by Pylint.
test/typing/reveal/torch_optim.py
6 issues
Line: 1
Column: 1
import torch
def foo(opt: torch.optim.Optimizer) -> None:
opt.zero_grad()
opt_adagrad = torch.optim.Adagrad([torch.tensor(0.0)])
reveal_type(opt_adagrad) # E: {Adagrad}
foo(opt_adagrad)
Reported by Pylint.
Line: 8
Column: 1
opt.zero_grad()
opt_adagrad = torch.optim.Adagrad([torch.tensor(0.0)])
reveal_type(opt_adagrad) # E: {Adagrad}
foo(opt_adagrad)
opt_adam = torch.optim.Adam([torch.tensor(0.0)], lr=1e-2, eps=1e-6)
reveal_type(opt_adam) # E: {Adam}
foo(opt_adam)
Reported by Pylint.
Line: 12
Column: 1
foo(opt_adagrad)
opt_adam = torch.optim.Adam([torch.tensor(0.0)], lr=1e-2, eps=1e-6)
reveal_type(opt_adam) # E: {Adam}
foo(opt_adam)
Reported by Pylint.
Line: 1
Column: 1
import torch
def foo(opt: torch.optim.Optimizer) -> None:
opt.zero_grad()
opt_adagrad = torch.optim.Adagrad([torch.tensor(0.0)])
reveal_type(opt_adagrad) # E: {Adagrad}
foo(opt_adagrad)
Reported by Pylint.
Line: 4
Column: 1
import torch
def foo(opt: torch.optim.Optimizer) -> None:
opt.zero_grad()
opt_adagrad = torch.optim.Adagrad([torch.tensor(0.0)])
reveal_type(opt_adagrad) # E: {Adagrad}
foo(opt_adagrad)
Reported by Pylint.
Line: 4
Column: 1
import torch
def foo(opt: torch.optim.Optimizer) -> None:
opt.zero_grad()
opt_adagrad = torch.optim.Adagrad([torch.tensor(0.0)])
reveal_type(opt_adagrad) # E: {Adagrad}
foo(opt_adagrad)
Reported by Pylint.
test/package/package_a/__init__.py
6 issues
Line: 1
Column: 1
result = "package_a"
class PackageAObject:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 1
Column: 1
result = "package_a"
class PackageAObject:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 4
Column: 1
result = "package_a"
class PackageAObject:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 4
Column: 1
result = "package_a"
class PackageAObject:
__slots__ = ["obj"]
def __init__(self, obj):
self.obj = obj
Reported by Pylint.
Line: 10
Column: 5
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
Reported by Pylint.
Line: 10
Column: 5
def __init__(self, obj):
self.obj = obj
def return_result(self):
return result
Reported by Pylint.
test/package/package_b/subpackage_2.py
6 issues
Line: 1
Column: 1
__import__("math", fromlist=[])
__import__("xml.sax.xmlreader")
result = "subpackage_2"
class PackageBSubpackage2Object_0:
pass
Reported by Pylint.
Line: 4
Column: 1
__import__("math", fromlist=[])
__import__("xml.sax.xmlreader")
result = "subpackage_2"
class PackageBSubpackage2Object_0:
pass
Reported by Pylint.
Line: 7
Column: 1
result = "subpackage_2"
class PackageBSubpackage2Object_0:
pass
def dynamic_import_test(name: str):
__import__(name)
Reported by Pylint.
Line: 7
Column: 1
result = "subpackage_2"
class PackageBSubpackage2Object_0:
pass
def dynamic_import_test(name: str):
__import__(name)
Reported by Pylint.
Line: 7
Column: 1
result = "subpackage_2"
class PackageBSubpackage2Object_0:
pass
def dynamic_import_test(name: str):
__import__(name)
Reported by Pylint.
Line: 11
Column: 1
pass
def dynamic_import_test(name: str):
__import__(name)
Reported by Pylint.
test/package/test_trace_dep/__init__.py
6 issues
Line: 1
Column: 1
import torch
import yaml
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
Reported by Pylint.
Line: 1
Column: 1
import torch
import yaml
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
Reported by Pylint.
Line: 5
Column: 1
import yaml
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
Reported by Pylint.
Line: 5
Column: 1
import yaml
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
Reported by Pylint.
Line: 6
Column: 5
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
Reported by Pylint.
Line: 6
Column: 5
class SumMod(torch.nn.Module):
def forward(self, inp):
return torch.sum(inp)
Reported by Pylint.
torch/ao/nn/sparse/quantized/utils.py
6 issues
Line: 1
Column: 1
import threading
def is_valid_linear_block_sparse_pattern(row_block_size, col_block_size):
return (row_block_size == 1 and col_block_size == 4) or \
(row_block_size == 8 and col_block_size == 1)
# This is a stop-gap measure as current flow does not allow module
# specific block sparse pattern.
# Infact there is no way to convey sparse pattern via module config
Reported by Pylint.
Line: 3
Column: 1
import threading
def is_valid_linear_block_sparse_pattern(row_block_size, col_block_size):
return (row_block_size == 1 and col_block_size == 4) or \
(row_block_size == 8 and col_block_size == 1)
# This is a stop-gap measure as current flow does not allow module
# specific block sparse pattern.
# Infact there is no way to convey sparse pattern via module config
Reported by Pylint.
Line: 13
Column: 1
# of quantization flow. Thus using the global context to convey
# sparsity pattern.
# Once the flow supports it, this should be removed.
class LinearBlockSparsePattern:
rlock = threading.RLock()
row_block_size = 1
col_block_size = 4
prev_row_block_size = 1
prev_col_block_size = 4
Reported by Pylint.
Line: 21
Column: 1
prev_col_block_size = 4
def __init__(self, row_block_size=1, col_block_size=4):
assert(is_valid_linear_block_sparse_pattern(row_block_size, col_block_size))
LinearBlockSparsePattern.rlock.acquire()
LinearBlockSparsePattern.prev_row_block_size = LinearBlockSparsePattern.row_block_size
LinearBlockSparsePattern.prev_col_block_size = LinearBlockSparsePattern.col_block_size
LinearBlockSparsePattern.row_block_size = row_block_size
LinearBlockSparsePattern.col_block_size = col_block_size
Reported by Pylint.
Line: 21
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
prev_col_block_size = 4
def __init__(self, row_block_size=1, col_block_size=4):
assert(is_valid_linear_block_sparse_pattern(row_block_size, col_block_size))
LinearBlockSparsePattern.rlock.acquire()
LinearBlockSparsePattern.prev_row_block_size = LinearBlockSparsePattern.row_block_size
LinearBlockSparsePattern.prev_col_block_size = LinearBlockSparsePattern.col_block_size
LinearBlockSparsePattern.row_block_size = row_block_size
LinearBlockSparsePattern.col_block_size = col_block_size
Reported by Bandit.
Line: 37
Column: 5
LinearBlockSparsePattern.rlock.release()
@staticmethod
def block_size():
return LinearBlockSparsePattern.row_block_size, LinearBlockSparsePattern.col_block_size
Reported by Pylint.
torch/distributions/half_normal.py
6 issues
Line: 10
Column: 1
from torch.distributions.transformed_distribution import TransformedDistribution
class HalfNormal(TransformedDistribution):
r"""
Creates a half-normal distribution parameterized by `scale` where::
X ~ Normal(0, scale)
Y = |X| ~ HalfNormal(scale)
Reported by Pylint.
Line: 63
Column: 5
self._validate_sample(value)
return 2 * self.base_dist.cdf(value) - 1
def icdf(self, prob):
return self.base_dist.icdf((prob + 1) / 2)
def entropy(self):
return self.base_dist.entropy() - math.log(2)
Reported by Pylint.
Line: 1
Column: 1
import math
from torch._six import inf
from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from torch.distributions.normal import Normal
from torch.distributions.transformed_distribution import TransformedDistribution
Reported by Pylint.
Line: 32
Column: 9
def __init__(self, scale, validate_args=None):
base_dist = Normal(0, scale, validate_args=False)
super(HalfNormal, self).__init__(base_dist, AbsTransform(),
validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(HalfNormal, _instance)
return super(HalfNormal, self).expand(batch_shape, _instance=new)
Reported by Pylint.
Line: 37
Column: 16
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(HalfNormal, _instance)
return super(HalfNormal, self).expand(batch_shape, _instance=new)
@property
def scale(self):
return self.base_dist.scale
Reported by Pylint.
Line: 40
Column: 5
return super(HalfNormal, self).expand(batch_shape, _instance=new)
@property
def scale(self):
return self.base_dist.scale
@property
def mean(self):
return self.scale * math.sqrt(2 / math.pi)
Reported by Pylint.
torch/csrc/jit/tensorexpr/block_codegen.cpp
6 issues
Line: 222
Column: 14
CWE codes:
120
20
void BlockPrinter::PrintBufferInfo(const std::unordered_set<BufPtr>& bufs) {
emitIndent();
os() << "buffers {";
for (auto& read : bufs) {
os() << std::endl;
emitIndent();
emitIndent();
os() << block_analysis_->getFlatInputName(read) << " = ";
os() << "{{"
Reported by FlawFinder.
Line: 226
Column: 47
CWE codes:
120
20
os() << std::endl;
emitIndent();
emitIndent();
os() << block_analysis_->getFlatInputName(read) << " = ";
os() << "{{"
<< "bs_DPE"
<< "}}";
}
os() << std::endl;
Reported by FlawFinder.
Line: 283
Column: 14
CWE codes:
120
20
}
void BlockPrinter::PrintDMAs(const std::unordered_set<BufPtr>& bufs) {
for (auto& read : bufs) {
emitIndent();
os() << "dma_in(";
os() << block_analysis_->getFlatInputName(read);
os() << ")" << std::endl;
}
Reported by FlawFinder.
Line: 286
Column: 47
CWE codes:
120
20
for (auto& read : bufs) {
emitIndent();
os() << "dma_in(";
os() << block_analysis_->getFlatInputName(read);
os() << ")" << std::endl;
}
}
void BlockPrinter::PrintAdjustBuffers(const std::unordered_set<BufPtr>& bufs) {
for (auto& read : bufs) {
Reported by FlawFinder.
Line: 291
Column: 14
CWE codes:
120
20
}
}
void BlockPrinter::PrintAdjustBuffers(const std::unordered_set<BufPtr>& bufs) {
for (auto& read : bufs) {
emitIndent();
os() << "adjust_buffer(";
os() << block_analysis_->getFlatInputName(read);
os() << ")" << std::endl;
}
Reported by FlawFinder.
Line: 294
Column: 47
CWE codes:
120
20
for (auto& read : bufs) {
emitIndent();
os() << "adjust_buffer(";
os() << block_analysis_->getFlatInputName(read);
os() << ")" << std::endl;
}
}
void BlockPrinter::visit(LoadPtr v) {
Reported by FlawFinder.