The following issues were found
torch/nn/modules/linear.py
35 issues
Line: 6
Column: 1
import torch
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .module import Module
from .lazy import LazyModuleMixin
Reported by Pylint.
Line: 7
Column: 1
from torch import Tensor
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .module import Module
from .lazy import LazyModuleMixin
class Identity(Module):
Reported by Pylint.
Line: 8
Column: 1
from torch.nn.parameter import Parameter, UninitializedParameter
from .. import functional as F
from .. import init
from .module import Module
from .lazy import LazyModuleMixin
class Identity(Module):
r"""A placeholder identity operator that is argument-insensitive.
Reported by Pylint.
Line: 9
Column: 1
from .. import functional as F
from .. import init
from .module import Module
from .lazy import LazyModuleMixin
class Identity(Module):
r"""A placeholder identity operator that is argument-insensitive.
Reported by Pylint.
Line: 81
Column: 33
super(Linear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
Reported by Pylint.
Line: 83
Column: 35
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
Reported by Pylint.
Line: 170
Column: 33
self.in1_features = in1_features
self.in2_features = in2_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in1_features, in2_features), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
Reported by Pylint.
Line: 173
Column: 35
self.weight = Parameter(torch.empty((out_features, in1_features, in2_features), **factory_kwargs))
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self) -> None:
Reported by Pylint.
Line: 28
Column: 1
torch.Size([128, 20])
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input: Tensor) -> Tensor:
return input
Reported by Pylint.
Line: 28
Column: 1
torch.Size([128, 20])
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, input: Tensor) -> Tensor:
return input
Reported by Pylint.
caffe2/python/operator_test/upsample_op_test.py
35 issues
Line: 23
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
Reported by Pylint.
Line: 24
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestUpSample(serial.SerializedTestCase):
Reported by Pylint.
Line: 71
Column: 24
]
for op, inputs in ops:
def ref(X, scales=None):
output_height = np.int32(height * height_scale)
output_width = np.int32(width * width_scale)
Y = np.random.rand(
batch_size, num_channels, output_height,
Reported by Pylint.
Line: 159
Column: 28
]
for op, inputs in ops:
def ref(dY, X, scales=None):
dX = np.zeros_like(X)
rheight = ((height - 1) / (output_height - 1)
if output_height > 1
else float(0))
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 26
Column: 1
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestUpSample(serial.SerializedTestCase):
@given(height_scale=st.floats(1.0, 4.0) | st.just(2.0),
width_scale=st.floats(1.0, 4.0) | st.just(2.0),
Reported by Pylint.
Line: 29
Column: 1
import unittest
class TestUpSample(serial.SerializedTestCase):
@given(height_scale=st.floats(1.0, 4.0) | st.just(2.0),
width_scale=st.floats(1.0, 4.0) | st.just(2.0),
height=st.integers(4, 32),
width=st.integers(4, 32),
num_channels=st.integers(1, 4),
Reported by Pylint.
Line: 39
Column: 5
seed=st.integers(0, 65535),
**hu.gcs)
@settings(max_examples=50, deadline=None)
def test_upsample(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
Reported by Pylint.
Line: 39
Column: 5
seed=st.integers(0, 65535),
**hu.gcs)
@settings(max_examples=50, deadline=None)
def test_upsample(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
Reported by Pylint.
Line: 39
Column: 5
seed=st.integers(0, 65535),
**hu.gcs)
@settings(max_examples=50, deadline=None)
def test_upsample(self, height_scale, width_scale, height, width,
num_channels, batch_size, seed,
gc, dc):
np.random.seed(seed)
Reported by Pylint.
test/onnx/test_pytorch_onnx_shape_inference.py
35 issues
Line: 2
Column: 1
import unittest
import torch
import numpy as np
def expect_tensor(scalar_type, shape=None):
def verify(actual_type):
np.testing.assert_equal(actual_type.scalarType(), scalar_type)
# if shape is not None:
Reported by Pylint.
Line: 16
Column: 5
return verify
class TestONNXShapeInference(unittest.TestCase):
from torch.onnx.symbolic_helper import _onnx_main_opset
opset_version = _onnx_main_opset
def run_test(self, g, n, type_assertion_funcs):
if not isinstance(type_assertion_funcs, list):
type_assertion_funcs = [type_assertion_funcs]
Reported by Pylint.
Line: 23
Column: 9
if not isinstance(type_assertion_funcs, list):
type_assertion_funcs = [type_assertion_funcs]
torch._C._jit_pass_onnx_graph_shape_type_inference(g, {}, self.opset_version)
for out, type_assertion_func in zip(n.outputs(), type_assertion_funcs):
type_assertion_func(out.type())
def create_empty_graph(self):
g = torch._C.Graph()
Reported by Pylint.
Line: 23
Column: 9
if not isinstance(type_assertion_funcs, list):
type_assertion_funcs = [type_assertion_funcs]
torch._C._jit_pass_onnx_graph_shape_type_inference(g, {}, self.opset_version)
for out, type_assertion_func in zip(n.outputs(), type_assertion_funcs):
type_assertion_func(out.type())
def create_empty_graph(self):
g = torch._C.Graph()
Reported by Pylint.
Line: 28
Column: 13
type_assertion_func(out.type())
def create_empty_graph(self):
g = torch._C.Graph()
# kick off initialization for ConstantMap.
torch._C._jit_pass_onnx_graph_shape_type_inference(g, {}, self.opset_version)
return g
def insert_tensor_constant(self, g, tensor):
Reported by Pylint.
Line: 30
Column: 9
def create_empty_graph(self):
g = torch._C.Graph()
# kick off initialization for ConstantMap.
torch._C._jit_pass_onnx_graph_shape_type_inference(g, {}, self.opset_version)
return g
def insert_tensor_constant(self, g, tensor):
return g.op("Constant", value_t=tensor)
Reported by Pylint.
Line: 30
Column: 9
def create_empty_graph(self):
g = torch._C.Graph()
# kick off initialization for ConstantMap.
torch._C._jit_pass_onnx_graph_shape_type_inference(g, {}, self.opset_version)
return g
def insert_tensor_constant(self, g, tensor):
return g.op("Constant", value_t=tensor)
Reported by Pylint.
Line: 39
Column: 9
def test_cast(self):
# Test cast with input of unknown scalar type.
g = self.create_empty_graph()
input = g.addInput()
cast_out = g.op("Cast", input, to_i=1)
self.run_test(g, cast_out.node(), expect_tensor("Float"))
def test_constant_of_shape(self):
# Test ConstantOfShape with input of onnx::Shape node.
Reported by Pylint.
Line: 57
Column: 23
g = self.create_empty_graph()
constants = [self.insert_tensor_constant(g, torch.tensor(i + 1)) for i in range(rank)]
shape = g.op("prim::ListConstruct", *constants)
shape.setType(torch._C.ListType.ofInts())
constant_of_shape = g.op("ConstantOfShape", shape, value_t=torch.tensor([2.0]))
self.run_test(g, constant_of_shape.node(), expect_tensor("Float", shape=(1, 2, 3, 4)))
def test_constant_of_shape_dynamic(self):
# Test ConstantOfShape with input of prim::ListConstruct of dynamic tensor
Reported by Pylint.
Line: 67
Column: 23
g = self.create_empty_graph()
inputs = [g.addInput() for i in range(rank)]
shape = g.op("prim::ListConstruct", *inputs)
shape.setType(torch._C.ListType.ofInts())
constant_of_shape = g.op("ConstantOfShape", shape, value_t=torch.tensor([2.0]))
self.run_test(g, constant_of_shape.node(), expect_tensor("Float", shape=(None, None, None, None)))
def test_reshape(self):
g = self.create_empty_graph()
Reported by Pylint.
caffe2/python/visualize.py
35 issues
Line: 48
Column: 20
raise ValueError("The input patch shape isn't correct.")
# determine color
if len(patch.shape) == 2 and cmap is None:
cmap = cm.gray
pyplot.imshow(patch, cmap=cmap)
return patch
def ShowMultiple(self, patches, ncols=None, cmap=None, bg_func=np.mean):
"""Visualize multiple patches.
Reported by Pylint.
Line: 76
Column: 28
patches = patches.reshape(patches.shape[:-1])
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
elif patches.shape[3] == 3:
# color patches
image_shape = tuple(image_size) + (3, )
else:
raise ValueError("The input patch shape isn't expected.")
Reported by Pylint.
Line: 85
Column: 24
else:
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
image = np.ones(image_shape) * bg_func(patches)
for pid in range(num_patches):
row = pid // ncols * patch_size_expand[0]
col = pid % ncols * patch_size_expand[1]
image[row:row+patches.shape[1], col:col+patches.shape[2]] = \
Reported by Pylint.
Line: 16
Column: 1
from matplotlib import cm, pyplot
def ChannelFirst(arr):
"""Convert a HWC array to CHW."""
ndim = arr.ndim
return arr.swapaxes(ndim - 1, ndim - 2).swapaxes(ndim - 2, ndim - 3)
Reported by Pylint.
Line: 22
Column: 1
return arr.swapaxes(ndim - 1, ndim - 2).swapaxes(ndim - 2, ndim - 3)
def ChannelLast(arr):
"""Convert a CHW array to HWC."""
ndim = arr.ndim
return arr.swapaxes(ndim - 3, ndim - 2).swapaxes(ndim - 2, ndim - 1)
Reported by Pylint.
Line: 28
Column: 1
return arr.swapaxes(ndim - 3, ndim - 2).swapaxes(ndim - 2, ndim - 1)
class PatchVisualizer(object):
"""PatchVisualizer visualizes patches.
"""
def __init__(self, gap=1):
self.gap = gap
Reported by Pylint.
Line: 35
Column: 5
def __init__(self, gap=1):
self.gap = gap
def ShowSingle(self, patch, cmap=None):
"""Visualizes one single patch.
The input patch could be a vector (in which case we try to infer the shape
of the patch), a 2-D matrix, or a 3-D matrix whose 3rd dimension has 3
channels.
Reported by Pylint.
Line: 52
Column: 5
pyplot.imshow(patch, cmap=cmap)
return patch
def ShowMultiple(self, patches, ncols=None, cmap=None, bg_func=np.mean):
"""Visualize multiple patches.
In the passed in patches matrix, each row is a patch, in the shape of either
n*n, n*n*1 or n*n*3, either in a flattened format (so patches would be a
2-D array), or a multi-dimensional tensor. We will try our best to figure
Reported by Pylint.
Line: 96
Column: 5
pyplot.axis('off')
return image
def ShowImages(self, patches, *args, **kwargs):
"""Similar to ShowMultiple, but always normalize the values between 0 and 1
for better visualization of image-type data.
"""
patches = patches - np.min(patches)
patches /= np.max(patches) + np.finfo(np.float64).eps
Reported by Pylint.
Line: 104
Column: 5
patches /= np.max(patches) + np.finfo(np.float64).eps
return self.ShowMultiple(patches, *args, **kwargs)
def ShowChannels(self, patch, cmap=None, bg_func=np.mean):
""" This function shows the channels of a patch.
The incoming patch should have shape [w, h, num_channels], and each channel
will be visualized as a separate gray patch.
"""
Reported by Pylint.
test/cpp/jit/tests_setup.py
35 issues
Line: 3
Column: 1
import sys
import os
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
Reported by Pylint.
Line: 20
Column: 13
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
pass
class EvalModeForLoadedModule(FileSetup):
path = 'dropout_model.pt'
Reported by Pylint.
Line: 1
Column: 1
import sys
import os
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
Reported by Pylint.
Line: 6
Column: 1
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
Reported by Pylint.
Line: 6
Column: 1
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
Reported by Pylint.
Line: 7
Column: 5
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
Reported by Pylint.
Line: 10
Column: 5
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
class FileSetup(object):
path = None
Reported by Pylint.
Line: 14
Column: 1
raise NotImplementedError()
class FileSetup(object):
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
Reported by Pylint.
Line: 14
Column: 1
raise NotImplementedError()
class FileSetup(object):
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
Reported by Pylint.
Line: 14
Column: 1
raise NotImplementedError()
class FileSetup(object):
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
Reported by Pylint.
caffe2/python/regularizer_test.py
35 issues
Line: 4
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from caffe2.python import core, layer_model_instantiator, regularizer, schema, workspace
from caffe2.python.layer_test_util import LayersTestCase
from caffe2.python.optimizer import SgdOptimizer
Reported by Pylint.
Line: 12
Column: 1
from caffe2.python.optimizer import SgdOptimizer
from caffe2.python.regularizer import L1Norm, RegularizationBy
from caffe2.python.regularizer_context import RegularizerContext, UseRegularizer
from hypothesis import given
class TestRegularizerContext(LayersTestCase):
@given(X=hu.arrays(dims=[2, 5]))
def test_regularizer_context(self, X):
Reported by Pylint.
Line: 94
Column: 81
lb=hu.floats(min_value=-1.0, max_value=1.0),
**hu.gcs_cpu_only
)
def test_bounded_grad_proj(self, X, left_open, right_open, eps, ub, lb, gc, dc):
if ub - (eps if right_open else 0.) < lb + (eps if left_open else 0.):
return
param = core.BlobReference("X")
workspace.FeedBlob(param, X)
train_init_net, train_net = self.get_training_nets()
Reported by Pylint.
Line: 94
Column: 77
lb=hu.floats(min_value=-1.0, max_value=1.0),
**hu.gcs_cpu_only
)
def test_bounded_grad_proj(self, X, left_open, right_open, eps, ub, lb, gc, dc):
if ub - (eps if right_open else 0.) < lb + (eps if left_open else 0.):
return
param = core.BlobReference("X")
workspace.FeedBlob(param, X)
train_init_net, train_net = self.get_training_nets()
Reported by Pylint.
Line: 1
Column: 1
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from caffe2.python import core, layer_model_instantiator, regularizer, schema, workspace
from caffe2.python.layer_test_util import LayersTestCase
from caffe2.python.optimizer import SgdOptimizer
Reported by Pylint.
Line: 15
Column: 1
from hypothesis import given
class TestRegularizerContext(LayersTestCase):
@given(X=hu.arrays(dims=[2, 5]))
def test_regularizer_context(self, X):
weight_reg_out = L1Norm(0.2)
bias_reg_out = L1Norm(0)
regularizers = {"WEIGHT": weight_reg_out, "BIAS": bias_reg_out}
Reported by Pylint.
Line: 17
Column: 5
class TestRegularizerContext(LayersTestCase):
@given(X=hu.arrays(dims=[2, 5]))
def test_regularizer_context(self, X):
weight_reg_out = L1Norm(0.2)
bias_reg_out = L1Norm(0)
regularizers = {"WEIGHT": weight_reg_out, "BIAS": bias_reg_out}
output_dims = 2
Reported by Pylint.
Line: 17
Column: 5
class TestRegularizerContext(LayersTestCase):
@given(X=hu.arrays(dims=[2, 5]))
def test_regularizer_context(self, X):
weight_reg_out = L1Norm(0.2)
bias_reg_out = L1Norm(0)
regularizers = {"WEIGHT": weight_reg_out, "BIAS": bias_reg_out}
output_dims = 2
Reported by Pylint.
Line: 31
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
bias_reg = RegularizerContext.current().get_regularizer("BIAS")
optim = SgdOptimizer(0.15)
assert (
weight_reg == weight_reg_out
), "fail to get correct weight reg from context"
assert bias_reg == bias_reg_out, "fail to get correct bias reg from context"
fc_output = self.model.FC(
input_record,
Reported by Bandit.
Line: 34
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert (
weight_reg == weight_reg_out
), "fail to get correct weight reg from context"
assert bias_reg == bias_reg_out, "fail to get correct bias reg from context"
fc_output = self.model.FC(
input_record,
output_dims,
weight_optim=optim,
bias_optim=optim,
Reported by Bandit.
torch/testing/_internal/distributed/rpc/examples/parameter_server_test.py
35 issues
Line: 40
Column: 22
self.curr_update_size = 0
self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)
for p in self.model.parameters():
p.grad = torch.zeros_like(p)
def get_model(self):
return self.model
@staticmethod
Reported by Pylint.
Line: 77
Column: 22
def get_next_batch(self):
for _ in range(num_batches):
inputs = torch.randn(batch_size, in_features)
labels = torch.zeros(batch_size, out_features)
yield inputs, labels
def train(self):
name = rpc.get_worker_info().name
Reported by Pylint.
Line: 78
Column: 22
def get_next_batch(self):
for _ in range(num_batches):
inputs = torch.randn(batch_size, in_features)
labels = torch.zeros(batch_size, out_features)
yield inputs, labels
def train(self):
name = rpc.get_worker_info().name
m = self.ps_rref.rpc_sync().get_model()
Reported by Pylint.
Line: 121
Column: 12
@dist_init(setup_rpc=False)
def test_batch_updating_parameter_server(self):
if self.rank != 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
Reported by Pylint.
Line: 123
Column: 34
if self.rank != 0:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
Reported by Pylint.
Line: 125
Column: 22
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
else:
rpc.init_rpc(
Reported by Pylint.
Line: 131
Column: 34
)
else:
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
Reported by Pylint.
Line: 133
Column: 22
rpc.init_rpc(
name=worker_name(self.rank),
backend=self.rpc_backend,
rank=self.rank,
world_size=self.world_size,
rpc_backend_options=self.rpc_backend_options,
)
run_ps([f"{worker_name(r)}" for r in range(1, self.world_size)])
Reported by Pylint.
Line: 1
Column: 1
# If you need to modify this file to make this test pass, please also apply same edits accordingly to
# https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py
# and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server
import threading
from datetime import datetime
from time import perf_counter
import torch
Reported by Pylint.
Line: 1
Column: 1
# If you need to modify this file to make this test pass, please also apply same edits accordingly to
# https://github.com/pytorch/examples/blob/master/distributed/rpc/batch/parameter_server.py
# and https://pytorch.org/tutorials/intermediate/rpc_async_execution.html#batch-updating-parameter-server
import threading
from datetime import datetime
from time import perf_counter
import torch
Reported by Pylint.
caffe2/experiments/python/convnet_benchmarks.py
35 issues
Line: 87
Column: 1
from caffe2.python import cnn, workspace, core
import caffe2.python.SparseTransformer as SparseTransformer # type: ignore[import]
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
Reported by Pylint.
Line: 87
Column: 1
from caffe2.python import cnn, workspace, core
import caffe2.python.SparseTransformer as SparseTransformer # type: ignore[import]
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
Reported by Pylint.
Line: 90
Column: 9
import caffe2.python.SparseTransformer as SparseTransformer # type: ignore[import]
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
Reported by Pylint.
Line: 612
Column: 9
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for i in range(arg.warmup_iterations):
workspace.RunNet(model.net.Proto().name)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("run", model.net, arg.iterations))
start = time.time()
Reported by Pylint.
Line: 90
Column: 1
import caffe2.python.SparseTransformer as SparseTransformer # type: ignore[import]
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
Reported by Pylint.
Line: 90
Column: 1
import caffe2.python.SparseTransformer as SparseTransformer # type: ignore[import]
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
Reported by Pylint.
Line: 92
Column: 5
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
Reported by Pylint.
Line: 115
Column: 1
return model, d
def AlexNet(order):
model = cnn.CNNModelHelper(order, name="alexnet",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
Reported by Pylint.
Line: 115
Column: 1
return model, d
def AlexNet(order):
model = cnn.CNNModelHelper(order, name="alexnet",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
Reported by Pylint.
Line: 115
Column: 1
return model, d
def AlexNet(order):
model = cnn.CNNModelHelper(order, name="alexnet",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
Reported by Pylint.
test/quantization/eager/test_bias_correction_eager.py
35 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
Reported by Pylint.
Line: 3
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
Reported by Pylint.
Line: 4
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
Reported by Pylint.
Line: 6
Column: 1
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
from torch.quantization._correct_bias import (
_supported_modules,
Reported by Pylint.
Line: 7
Column: 1
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
from torch.quantization._correct_bias import (
_supported_modules,
_supported_modules_quantized,
Reported by Pylint.
Line: 8
Column: 1
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
from torch.quantization._correct_bias import (
_supported_modules,
_supported_modules_quantized,
bias_correction,
Reported by Pylint.
Line: 10
Column: 1
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
from torch.quantization._correct_bias import (
_supported_modules,
_supported_modules_quantized,
bias_correction,
get_module,
get_param,
Reported by Pylint.
Line: 54
Column: 17
if isinstance(submodule, ns.Shadow):
parent_name, child_name = parent_child_names(name)
parent = get_module(artificial_model, parent_name)
parent._modules[child_name] = submodule.orig_module
for name, artificial_submodule in artificial_model.named_modules():
if type(artificial_submodule) in _supported_modules_quantized:
submodule = get_module(float_model, name)
float_bias = get_param(submodule, 'bias')
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn as nn
from torch.testing._internal.common_quantization import QuantizationTestCase
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.quantization import default_qconfig
from torch.quantization import QuantWrapper
import torch.quantization._numeric_suite as ns
Reported by Pylint.
caffe2/python/operator_test/mkl_packed_fc_op_test.py
35 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 22
Column: 26
K=st.integers(128, 1024),
N=st.integers(128, 1024),
**hu.gcs_cpu_only)
@unittest.skipIf(not core.C.builtin_cpu_supports_avx2(),
"Intel MKL sgemm_pack has a known numerical issue with "
"non-avx2 machines that will be fixed in a later build.")
def test_packed_fc(self, seed, M, K, N, gc, dc):
np.random.seed(seed)
X = np.random.rand(M, K).astype(np.float32) - 0.5
Reported by Pylint.
Line: 47
Column: 26
)
self.assertReferenceChecks(gc, op, [X, W, b], ref)
@unittest.skipIf(not core.C.builtin_cpu_supports_avx2(),
"Intel MKL sgemm_pack has a known numerical issue with "
"non-avx2 machines that will be fixed in a later build.")
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
**hu.gcs_cpu_only)
Reported by Pylint.
Line: 25
Column: 49
@unittest.skipIf(not core.C.builtin_cpu_supports_avx2(),
"Intel MKL sgemm_pack has a known numerical issue with "
"non-avx2 machines that will be fixed in a later build.")
def test_packed_fc(self, seed, M, K, N, gc, dc):
np.random.seed(seed)
X = np.random.rand(M, K).astype(np.float32) - 0.5
W = np.random.rand(N, K).astype(np.float32) - 0.5
b = np.random.rand(N).astype(np.float32) - 0.5
Reported by Pylint.
Line: 53
Column: 57
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
**hu.gcs_cpu_only)
def test_packed_fc_axis(self, axis, num_output, gc, dc):
np.random.seed(1701)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
K = np.prod(X.shape[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
Reported by Pylint.
Line: 75
Column: 5
self.assertReferenceChecks(gc, op, [X, W, b], ref)
if __name__ == "__main__":
import unittest
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not core.IsOperator("PackedFC"),
"PackedFC is not supported in this caffe2 build.")
class PackedFCTest(hu.HypothesisTestCase):
@given(seed=st.integers(0, 65536),
M=st.integers(16, 32),
K=st.integers(128, 1024),
N=st.integers(128, 1024),
**hu.gcs_cpu_only)
Reported by Pylint.
Line: 24
Column: 5
**hu.gcs_cpu_only)
@unittest.skipIf(not core.C.builtin_cpu_supports_avx2(),
"Intel MKL sgemm_pack has a known numerical issue with "
"non-avx2 machines that will be fixed in a later build.")
def test_packed_fc(self, seed, M, K, N, gc, dc):
np.random.seed(seed)
X = np.random.rand(M, K).astype(np.float32) - 0.5
W = np.random.rand(N, K).astype(np.float32) - 0.5
b = np.random.rand(N).astype(np.float32) - 0.5
Reported by Pylint.