The following issues were found
caffe2/python/layers/layer_normalization.py
11 issues
Line: 72
Column: 27
output_blobs = [net.NextScopedBlob('ln_output'), net.NextScopedBlob('ln_mean'),
net.NextScopedBlob('ln_stdev')]
normalized, mean, stdev = net.LayerNorm(input_blob,
output_blobs,
axis=self.axis,
epsilon=self.epsilon)
scaled = net.Mul(
Reported by Pylint.
Line: 72
Column: 21
output_blobs = [net.NextScopedBlob('ln_output'), net.NextScopedBlob('ln_mean'),
net.NextScopedBlob('ln_stdev')]
normalized, mean, stdev = net.LayerNorm(input_blob,
output_blobs,
axis=self.axis,
epsilon=self.epsilon)
scaled = net.Mul(
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
Reported by Pylint.
Line: 12
Column: 1
import numpy as np
class LayerNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='layer_normalization',
Reported by Pylint.
Line: 13
Column: 5
class LayerNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='layer_normalization',
scale_optim=None,
Reported by Pylint.
Line: 26
Column: 9
scale_init_value=1.0,
**kwargs
):
super(LayerNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type: {}".format(input_record))
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
super(LayerNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type: {}".format(input_record))
self.input_shape = input_record.field_type().shape
self.axis = axis
Reported by Bandit.
Line: 35
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.input_shape = input_record.field_type().shape
self.axis = axis
assert len(self.input_shape) >= 1, (
"This layer supports only >= 2D tensors")
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar(
(np.float32, self.input_shape),
Reported by Bandit.
Line: 57
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if self.use_layer_norm_op:
self.epsilon = epsilon
else:
assert len(self.input_shape) == 1, (
"When using alternative implementation, "
"input data can only be 2D"
)
self.epsilon = model.maybe_add_global_constant(
"%s_epsilon" % self.name, float(epsilon)
Reported by Bandit.
Line: 65
Column: 5
"%s_epsilon" % self.name, float(epsilon)
)
def add_ops_with_layer_norm_op(self, net):
input_blob = self.input_record.field_blobs()
ln_output = self.output_schema.field_blobs()
output_blobs = [net.NextScopedBlob('ln_output'), net.NextScopedBlob('ln_mean'),
net.NextScopedBlob('ln_stdev')]
Reported by Pylint.
caffe2/python/layers/sampling_train.py
11 issues
Line: 70
Column: 18
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
Reported by Pylint.
Line: 70
Column: 48
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
Reported by Pylint.
Line: 1
Column: 1
## @package sampling_train
# Module caffe2.python.layers.sampling_train
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
Reported by Pylint.
Line: 13
Column: 1
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
Reported by Pylint.
Line: 14
Column: 5
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
Reported by Pylint.
Line: 24
Column: 9
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
Reported by Pylint.
Line: 29
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
Reported by Bandit.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
Reported by Bandit.
Line: 32
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
Reported by Bandit.
Line: 34
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
Reported by Bandit.
caffe2/python/functional.py
11 issues
Line: 12
Column: 12
from collections import namedtuple
from six import string_types
OpSchema = workspace.C.OpSchema
def namedtupledict(typename, field_names, *args, **kwargs):
field_names_map = {n: i for i, n in enumerate(field_names)}
# Some output names are invalid python identifier, e.g. "0"
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from caffe2.python.onnx.workspace import Workspace
from collections import namedtuple
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from caffe2.python.onnx.workspace import Workspace
from collections import namedtuple
from six import string_types
OpSchema = workspace.C.OpSchema
Reported by Pylint.
Line: 15
Column: 1
OpSchema = workspace.C.OpSchema
def namedtupledict(typename, field_names, *args, **kwargs):
field_names_map = {n: i for i, n in enumerate(field_names)}
# Some output names are invalid python identifier, e.g. "0"
kwargs.setdefault('rename', True)
data = namedtuple(typename, field_names, *args, **kwargs)
Reported by Pylint.
Line: 30
Column: 1
return data
class _Functional(object):
def __getattribute__(self, op_type):
def op_func(*inputs, **args):
ws = Workspace()
schema = OpSchema.get(op_type)
input_prefix = 'input_'
Reported by Pylint.
Line: 30
Column: 1
return data
class _Functional(object):
def __getattribute__(self, op_type):
def op_func(*inputs, **args):
ws = Workspace()
schema = OpSchema.get(op_type)
input_prefix = 'input_'
Reported by Pylint.
Line: 32
Column: 9
class _Functional(object):
def __getattribute__(self, op_type):
def op_func(*inputs, **args):
ws = Workspace()
schema = OpSchema.get(op_type)
input_prefix = 'input_'
output_prefix = 'output_'
Reported by Pylint.
Line: 33
Column: 13
class _Functional(object):
def __getattribute__(self, op_type):
def op_func(*inputs, **args):
ws = Workspace()
schema = OpSchema.get(op_type)
input_prefix = 'input_'
output_prefix = 'output_'
def get_name_list(prefix, num, max_num):
Reported by Pylint.
Line: 94
Column: 13
# There could be input-output inplace enforcement; replace the
# output names with input ones if such enforcements exist
for i in range(len(input_names)):
for j in range(len(output_names)):
if schema.inplace_enforced(i, j):
output_names[j] = input_names[i]
op = core.CreateOperator(
Reported by Pylint.
Line: 95
Column: 17
# There could be input-output inplace enforcement; replace the
# output names with input ones if such enforcements exist
for i in range(len(input_names)):
for j in range(len(output_names)):
if schema.inplace_enforced(i, j):
output_names[j] = input_names[i]
op = core.CreateOperator(
op_type, input_names, output_names, **args
Reported by Pylint.
benchmarks/framework_overhead_benchmark/utils.py
11 issues
Line: 3
Column: 1
import time
from collections import namedtuple
from torch.utils import ThroughputBenchmark
NUM_LOOP_ITERS = 1000
BenchmarkConfig = namedtuple('BenchmarkConfig', 'num_warmup_iters num_iters')
ModuleConfig = namedtuple('ModuleConfig', 'pt_fn c2_op num_params graph_mode')
def ms_to_us(time_ms):
Reported by Pylint.
Line: 1
Column: 1
import time
from collections import namedtuple
from torch.utils import ThroughputBenchmark
NUM_LOOP_ITERS = 1000
BenchmarkConfig = namedtuple('BenchmarkConfig', 'num_warmup_iters num_iters')
ModuleConfig = namedtuple('ModuleConfig', 'pt_fn c2_op num_params graph_mode')
def ms_to_us(time_ms):
Reported by Pylint.
Line: 9
Column: 1
BenchmarkConfig = namedtuple('BenchmarkConfig', 'num_warmup_iters num_iters')
ModuleConfig = namedtuple('ModuleConfig', 'pt_fn c2_op num_params graph_mode')
def ms_to_us(time_ms):
return (time_ms * 1e3)
def secs_to_us(time_s):
return (time_s * 1e6)
Reported by Pylint.
Line: 10
Column: 1
ModuleConfig = namedtuple('ModuleConfig', 'pt_fn c2_op num_params graph_mode')
def ms_to_us(time_ms):
return (time_ms * 1e3)
def secs_to_us(time_s):
return (time_s * 1e6)
def secs_to_ms(time_s):
Reported by Pylint.
Line: 12
Column: 1
def ms_to_us(time_ms):
return (time_ms * 1e3)
def secs_to_us(time_s):
return (time_s * 1e6)
def secs_to_ms(time_s):
return (time_s * 1e3)
Reported by Pylint.
Line: 13
Column: 1
return (time_ms * 1e3)
def secs_to_us(time_s):
return (time_s * 1e6)
def secs_to_ms(time_s):
return (time_s * 1e3)
def benchmark_using_throughput_benchmark(config, module):
Reported by Pylint.
Line: 15
Column: 1
def secs_to_us(time_s):
return (time_s * 1e6)
def secs_to_ms(time_s):
return (time_s * 1e3)
def benchmark_using_throughput_benchmark(config, module):
print("Benchmarking via ThroughputBenchmark")
bench = ThroughputBenchmark(module.module)
Reported by Pylint.
Line: 16
Column: 1
return (time_s * 1e6)
def secs_to_ms(time_s):
return (time_s * 1e3)
def benchmark_using_throughput_benchmark(config, module):
print("Benchmarking via ThroughputBenchmark")
bench = ThroughputBenchmark(module.module)
bench.add_input(*module.tensor_inputs)
Reported by Pylint.
Line: 18
Column: 1
def secs_to_ms(time_s):
return (time_s * 1e3)
def benchmark_using_throughput_benchmark(config, module):
print("Benchmarking via ThroughputBenchmark")
bench = ThroughputBenchmark(module.module)
bench.add_input(*module.tensor_inputs)
stats = bench.benchmark(1, config.num_warmup_iters, config.num_iters)
return stats.latency_avg_ms / NUM_LOOP_ITERS
Reported by Pylint.
Line: 25
Column: 1
stats = bench.benchmark(1, config.num_warmup_iters, config.num_iters)
return stats.latency_avg_ms / NUM_LOOP_ITERS
def benchmark_module(config, module, use_throughput_benchmark=False):
if use_throughput_benchmark:
return benchmark_using_throughput_benchmark(config, module)
module.forward(config.num_warmup_iters)
print("Running module for {} iterations".format(config.num_iters))
start = time.time()
Reported by Pylint.
benchmarks/functional_autograd_benchmark/utils.py
11 issues
Line: 1
Column: 1
import torch
from collections import defaultdict
from torch import nn, Tensor
from typing import List, Tuple, Dict, Union, Callable
# Type helpers
InputsType = Union[Tensor, Tuple[Tensor, ...]]
Reported by Pylint.
Line: 5
Column: 1
from collections import defaultdict
from torch import nn, Tensor
from typing import List, Tuple, Dict, Union, Callable
# Type helpers
InputsType = Union[Tensor, Tuple[Tensor, ...]]
# A Getter takes in a device and returns a callable and the inputs to that callable
Reported by Pylint.
Line: 56
Column: 15
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
_del_nested_attr(mod, name.split("."))
names.append(name)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in orig_params)
Reported by Pylint.
Line: 1
Column: 1
import torch
from collections import defaultdict
from torch import nn, Tensor
from typing import List, Tuple, Dict, Union, Callable
# Type helpers
InputsType = Union[Tensor, Tuple[Tensor, ...]]
Reported by Pylint.
Line: 3
Column: 1
import torch
from collections import defaultdict
from torch import nn, Tensor
from typing import List, Tuple, Dict, Union, Callable
# Type helpers
InputsType = Union[Tensor, Tuple[Tensor, ...]]
Reported by Pylint.
Line: 6
Column: 1
from collections import defaultdict
from torch import nn, Tensor
from typing import List, Tuple, Dict, Union, Callable
# Type helpers
InputsType = Union[Tensor, Tuple[Tensor, ...]]
# A Getter takes in a device and returns a callable and the inputs to that callable
GetterReturnType = Tuple[Callable[..., Tensor], InputsType]
Reported by Pylint.
Line: 16
Column: 1
# V here refers to the v in either vjp, jvp, vhp or hvp
VType = Union[None, Tensor, Tuple[Tensor, ...]]
# Type used to store timing results. The first key is the model name, the second key
# is the task name, the result is a Tuple of: speedup, mean_before, var_before, mean_after, var_after.
TimingResultType = Dict[str, Dict[str, Tuple[float, ...]]]
# Utilities to make nn.Module "functional"
# In particular the goal is to be able to provide a function that takes as input
# the parameters and evaluate the nn.Module using fixed inputs.
Reported by Pylint.
Line: 56
Column: 15
orig_params = tuple(mod.parameters())
# Remove all the parameters in the model
names = []
for name, p in list(mod.named_parameters()):
_del_nested_attr(mod, name.split("."))
names.append(name)
# Make params regular Tensors instead of nn.Parameter
params = tuple(p.detach().requires_grad_() for p in orig_params)
Reported by Pylint.
Line: 70
Column: 15
Note that the `params` are regular Tensors (that can have history) and so are left
as Tensors. This means that mod.parameters() will still be empty after this call.
"""
for name, p in zip(names, params):
_set_nested_attr(mod, name.split("."), p)
# Utilities to read/write markdown table-like content.
def to_markdown_table(res: TimingResultType, header: Tuple[str, ...] = None) -> str:
if header is None:
Reported by Pylint.
Line: 74
Column: 1
_set_nested_attr(mod, name.split("."), p)
# Utilities to read/write markdown table-like content.
def to_markdown_table(res: TimingResultType, header: Tuple[str, ...] = None) -> str:
if header is None:
header = ("model", "task", "mean", "var")
out = ""
def write_line(*args):
Reported by Pylint.
caffe2/python/layers/feature_sparse_to_dense.py
10 issues
Line: 167
Column: 3
"Unsupported input type: {0}".format(feature_specs.feature_type)
)
# TODO(amalevich): This schema is producing ranges. And thus if there is
# something using it it should support ranges as well. It might be
# confusing, if we don't add better support for ranges/have it as a
# first layer
self.output_schema = schema.Struct(*outputs)
Reported by Pylint.
Line: 173
Column: 3
# first layer
self.output_schema = schema.Struct(*outputs)
# TODO(amalevich): Consider moving this data to schema, instead
# Structs doesn't support attaching metadata to them and clonning
# will break things badly, but this is the most elegant way to pass
# this info around. Should we change it or it'll be too much work and
# not worse it?
for field, feature_specs in input_specs:
Reported by Pylint.
Line: 221
Column: 3
record[field].values.items(), self.output_schema[field].values()
)
elif feature_specs.feature_type == "ID_SCORE_LIST":
# TODO: merge this to the case above?
id_list_ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob("id_score_list_ranges"),
)
net.SparseToDenseMask(
Reported by Pylint.
Line: 1
Column: 1
# @package sparse_to_dense
# Module caffe2.python.layers.sparse_to_dense
from collections import defaultdict
import numpy as np
from caffe2.python import schema
from caffe2.python.layers.layers import AccessedFeatures, ModelLayer
Reported by Pylint.
Line: 12
Column: 1
from caffe2.python.layers.layers import AccessedFeatures, ModelLayer
class FeatureSparseToDense(ModelLayer):
def __init__(
self,
model,
input_record,
input_specs,
Reported by Pylint.
Line: 13
Column: 5
class FeatureSparseToDense(ModelLayer):
def __init__(
self,
model,
input_record,
input_specs,
name="feature_sparse_to_dense",
Reported by Pylint.
Line: 29
Column: 9
Default_dense_value can only be 0.0 or float("NaN"). Any input that isn't
None will be NaN.
"""
super(FeatureSparseToDense, self).__init__(model, name, input_record, **kwargs)
if default_dense_value is None:
default_dense_value = 0.0
default_dense_value = float(default_dense_value)
assert (
np.isnan(default_dense_value) or default_dense_value == 0.0
Reported by Pylint.
Line: 33
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
if default_dense_value is None:
default_dense_value = 0.0
default_dense_value = float(default_dense_value)
assert (
np.isnan(default_dense_value) or default_dense_value == 0.0
), "default_dense_value can only be 0.0 or NaN"
self.input_specs = input_specs
self.default_float_value = (
Reported by Bandit.
Line: 47
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
outputs = []
for field, feature_specs in self.input_specs:
assert len(feature_specs.feature_names) == len(feature_specs.feature_ids)
if feature_specs.feature_type == "FLOAT":
outputs.append(
(
field,
schema.Scalar(
Reported by Bandit.
Line: 300
Column: 5
# generated based on the inputSpecs.
net.Alias(value_values_blob, self.output_schema[field].values())
def get_metadata(self):
metadata = []
for field, feature_specs in self.input_specs:
metadata.append(
(
{
Reported by Pylint.
caffe2/python/docs/parser.py
10 issues
Line: 1
Column: 1
## @package parser
# Module caffe2.python.docs.parser
import re
Reported by Pylint.
Line: 10
Column: 1
import re
class Parser(object):
# List of tuples (regex_str, lambda(regex_match, formatter))
# If a lambda returns True it will be called repeatedly with replacement
# otherwise it will only be called on text that hasn't been parsed yet.
regexes = [
# Code blocks of various formats
Reported by Pylint.
Line: 10
Column: 1
import re
class Parser(object):
# List of tuples (regex_str, lambda(regex_match, formatter))
# If a lambda returns True it will be called repeatedly with replacement
# otherwise it will only be called on text that hasn't been parsed yet.
regexes = [
# Code blocks of various formats
Reported by Pylint.
Line: 49
Column: 5
self.lines = []
self.formatter = formatter
def parseText(self):
UNPARSED = 0
PARSED = 1
parsed_block = [(UNPARSED, self.text)]
for regex, func in self.regexes:
index = 0
Reported by Pylint.
Line: 49
Column: 5
self.lines = []
self.formatter = formatter
def parseText(self):
UNPARSED = 0
PARSED = 1
parsed_block = [(UNPARSED, self.text)]
for regex, func in self.regexes:
index = 0
Reported by Pylint.
Line: 50
Column: 9
self.formatter = formatter
def parseText(self):
UNPARSED = 0
PARSED = 1
parsed_block = [(UNPARSED, self.text)]
for regex, func in self.regexes:
index = 0
while index < len(parsed_block):
Reported by Pylint.
Line: 51
Column: 9
def parseText(self):
UNPARSED = 0
PARSED = 1
parsed_block = [(UNPARSED, self.text)]
for regex, func in self.regexes:
index = 0
while index < len(parsed_block):
label, text = parsed_block[index]
Reported by Pylint.
Line: 59
Column: 1
label, text = parsed_block[index]
# Already been parsed
if (label == PARSED):
index += 1
continue
match = re.search(regex, text)
if match:
Reported by Pylint.
Line: 69
Column: 21
start = match.start(0)
end = match.end(0)
f = self.formatter.clone()
merge = func(match, f)
if merge:
merged = text[:start] + f.dump() + text[end:]
parsed_block.insert(index, (UNPARSED, merged))
Reported by Pylint.
Line: 94
Column: 5
self.lines += [i for _, i in parsed_block]
self.text = ' '.join(self.lines)
def parse(self):
self.parseText()
return self.text
Reported by Pylint.
caffe2/python/models/seq2seq/seq2seq_model_helper.py
10 issues
Line: 70
Column: 39
if namescope is None:
namescope = scope.CurrentNameScope()
else:
if not namescope.endswith(scope._NAMESCOPE_SEPARATOR):
namescope += scope._NAMESCOPE_SEPARATOR
if namescope == '':
return self.non_trainable_params[:]
else:
Reported by Pylint.
Line: 71
Column: 30
namescope = scope.CurrentNameScope()
else:
if not namescope.endswith(scope._NAMESCOPE_SEPARATOR):
namescope += scope._NAMESCOPE_SEPARATOR
if namescope == '':
return self.non_trainable_params[:]
else:
return [
Reported by Pylint.
Line: 1
Column: 1
## @package seq2seq_model_helper
# Module caffe2.python.models.seq2seq.seq2seq_model_helper
from caffe2.python import scope
from caffe2.python.model_helper import ModelHelper
Reported by Pylint.
Line: 12
Column: 1
from caffe2.python.model_helper import ModelHelper
class Seq2SeqModelHelper(ModelHelper):
def __init__(self, init_params=True, **kwargs):
arg_scope = {
'use_cudnn': kwargs.pop('use_cudnn', True),
'cudnn_exhaustive_search': kwargs.pop('cudnn_exhaustive_search', False),
Reported by Pylint.
Line: 23
Column: 9
if kwargs.get('ws_nbytes_limit', None):
arg_scope['ws_nbytes_limit'] = kwargs.pop('ws_nbytes_limit')
super(Seq2SeqModelHelper, self).__init__(
init_params=init_params,
arg_scope=arg_scope,
**kwargs
)
self.non_trainable_params = []
Reported by Pylint.
Line: 30
Column: 5
)
self.non_trainable_params = []
def AddParam(self, name, init=None, init_value=None, trainable=True):
"""Adds a parameter to the model's net and it's initializer if needed
Args:
init: a tuple (<initialization_op_name>, <initialization_op_kwargs>)
init_value: int, float or str. Can be used instead of `init` as a
Reported by Pylint.
Line: 40
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
trainable: bool, whether to compute gradient for this param or not
"""
if init_value is not None:
assert init is None
assert type(init_value) in [int, float, str]
init = ('ConstantFill', dict(
shape=[1],
value=init_value,
))
Reported by Bandit.
Line: 41
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
"""
if init_value is not None:
assert init is None
assert type(init_value) in [int, float, str]
init = ('ConstantFill', dict(
shape=[1],
value=init_value,
))
Reported by Bandit.
Line: 63
Column: 5
return param
def GetNonTrainableParams(self, namescope=None):
'''
Returns the params in current namescope
'''
if namescope is None:
namescope = scope.CurrentNameScope()
Reported by Pylint.
Line: 73
Column: 9
if not namescope.endswith(scope._NAMESCOPE_SEPARATOR):
namescope += scope._NAMESCOPE_SEPARATOR
if namescope == '':
return self.non_trainable_params[:]
else:
return [
p for p in self.non_trainable_params
if p.GetNameScope() == namescope
Reported by Pylint.
caffe2/python/modeling/initializers_test.py
10 issues
Line: 29
Column: 9
)
# operator name set, no initializer class set
fc4 = brew.fc(model, fc3, "fc4", dim_in=1, dim_out=1,
WeightInitializer=None,
weight_init=("ConstantFill", {})
)
@unittest.skipIf(not workspace.has_gpu_support, 'No GPU support')
Reported by Pylint.
Line: 46
Column: 9
)
# specified operator, PseudoFP16Initializer
fc3 = brew.fc(model, fc2, "fc3", dim_in=1, dim_out=1,
weight_init=("ConstantFill", {}),
WeightInitializer=PseudoFP16Initializer
)
def test_fc_external_initializer(self):
Reported by Pylint.
Line: 54
Column: 9
def test_fc_external_initializer(self):
model = model_helper.ModelHelper(name="test", init_params=False)
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1) # noqa
self.assertEqual(len(model.net.Proto().op), 1)
self.assertEqual(len(model.param_init_net.Proto().op), 0)
Reported by Pylint.
Line: 1
Column: 1
import unittest
from caffe2.python import brew, model_helper, workspace
from caffe2.python.modeling.initializers import (
Initializer, PseudoFP16Initializer)
Reported by Pylint.
Line: 12
Column: 1
Initializer, PseudoFP16Initializer)
class InitializerTest(unittest.TestCase):
def test_fc_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
Reported by Pylint.
Line: 13
Column: 5
class InitializerTest(unittest.TestCase):
def test_fc_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# no operator name set, will use default
Reported by Pylint.
Line: 13
Column: 5
class InitializerTest(unittest.TestCase):
def test_fc_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# no operator name set, will use default
Reported by Pylint.
Line: 35
Column: 5
)
@unittest.skipIf(not workspace.has_gpu_support, 'No GPU support')
def test_fc_fp16_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# default operator, PseudoFP16Initializer
Reported by Pylint.
Line: 35
Column: 5
)
@unittest.skipIf(not workspace.has_gpu_support, 'No GPU support')
def test_fc_fp16_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# default operator, PseudoFP16Initializer
Reported by Pylint.
Line: 51
Column: 5
WeightInitializer=PseudoFP16Initializer
)
def test_fc_external_initializer(self):
model = model_helper.ModelHelper(name="test", init_params=False)
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1) # noqa
self.assertEqual(len(model.net.Proto().op), 1)
self.assertEqual(len(model.param_init_net.Proto().op), 0)
Reported by Pylint.
.github/scripts/generate_binary_build_matrix.py
10 issues
Line: 39
Column: 3
WHEEL_CONTAINER_IMAGES = {
**{
# TODO: Re-do manylinux CUDA image tagging scheme to be similar to
# ROCM so we don't have to do this replacement
gpu_arch: f"pytorch/manylinux-cuda{gpu_arch.replace('.', '')}"
for gpu_arch in CUDA_ARCHES
},
**{
Reported by Pylint.
Line: 61
Column: 3
LIBTORCH_CONTAINER_IMAGES = {
**{
# TODO: Re-do manylinux CUDA image tagging scheme to be similar to
# ROCM so we don't have to do this replacement
(gpu_arch, "pre-cxx11"): f"pytorch/manylinux-cuda{gpu_arch.replace('.', '')}"
for gpu_arch in CUDA_ARCHES
},
**{
Reported by Pylint.
Line: 28
Column: 1
]
def arch_type(arch_version: str) -> str:
if arch_version in CUDA_ARCHES:
return "cuda"
elif arch_version in ROCM_ARCHES:
return "rocm"
else: # arch_version should always be "cpu" in this case
Reported by Pylint.
Line: 29
Column: 5
def arch_type(arch_version: str) -> str:
if arch_version in CUDA_ARCHES:
return "cuda"
elif arch_version in ROCM_ARCHES:
return "rocm"
else: # arch_version should always be "cpu" in this case
return "cpu"
Reported by Pylint.
Line: 82
Column: 1
]
def is_pull_request() -> bool:
return False
# return os.environ.get("GITHUB_HEAD_REF")
def snip_if(is_pr: bool, versions: List[str]) -> List[str]:
Reported by Pylint.
Line: 94
Column: 1
return [versions[-1]] if is_pr else versions
def generate_conda_matrix(is_pr: bool) -> List[Dict[str, str]]:
return [
{
"python_version": python_version,
"gpu_arch_type": arch_type(arch_version),
"gpu_arch_version": arch_version,
Reported by Pylint.
Line: 108
Column: 1
]
def generate_libtorch_matrix(is_pr: bool) -> List[Dict[str, str]]:
libtorch_variants = [
"shared-with-deps",
"shared-without-deps",
"static-with-deps",
"static-without-deps",
Reported by Pylint.
Line: 133
Column: 1
]
def generate_wheels_matrix(is_pr: bool) -> List[Dict[str, str]]:
arches = ["cpu"]
arches += snip_if(is_pr, CUDA_ARCHES)
arches += snip_if(is_pr, ROCM_ARCHES)
return [
{
Reported by Pylint.
Line: 149
Column: 1
]
def from_includes(includes: List[Dict[str, str]]) -> str:
return json.dumps({"include": includes})
def main() -> None:
parser = argparse.ArgumentParser()
Reported by Pylint.
Line: 153
Column: 1
return json.dumps({"include": includes})
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('mode', choices=['conda', 'libtorch', 'wheels'])
args = parser.parse_args()
is_pr = is_pull_request()
Reported by Pylint.