The following issues were found
caffe2/python/ideep/operator_fallback_op_test.py
19 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 16
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestFallbackOps(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 10),
Reported by Pylint.
Line: 29
Column: 52
**mu.gcs)
def test_in_place(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, gc, dc):
# To expose fallback in-place potential issue, the fallback op
# following ideep op must be run at least two iterations.
conv = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
Reported by Pylint.
Line: 93
Column: 13
print(Y1.flatten())
print(Y0.flatten())
print(np.max(np.abs(Y1 - Y0)))
self.assertTrue(False)
workspace.SwitchWorkspace(old_ws_name)
if __name__ == "__main__":
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 17
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestFallbackOps(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 10),
input_channels=st.integers(1, 3),
Reported by Pylint.
Line: 27
Column: 5
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
**mu.gcs)
def test_in_place(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, gc, dc):
# To expose fallback in-place potential issue, the fallback op
# following ideep op must be run at least two iterations.
conv = core.CreateOperator(
Reported by Pylint.
Line: 27
Column: 5
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
**mu.gcs)
def test_in_place(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, gc, dc):
# To expose fallback in-place potential issue, the fallback op
# following ideep op must be run at least two iterations.
conv = core.CreateOperator(
Reported by Pylint.
Line: 27
Column: 5
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
**mu.gcs)
def test_in_place(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, gc, dc):
# To expose fallback in-place potential issue, the fallback op
# following ideep op must be run at least two iterations.
conv = core.CreateOperator(
Reported by Pylint.
.circleci/ecr_gc_docker/docker_hub.py
19 issues
Line: 5
Column: 1
from collections import namedtuple
import boto3
import requests
import os
IMAGE_INFO = namedtuple(
Reported by Pylint.
Line: 15
Column: 34
)
def build_access_token(username, passwordtr):
r = requests.post(
"https://hub.docker.com/v2/users/login/",
data={"username": username, "password": password},
)
r.raise_for_status()
Reported by Pylint.
Line: 15
Column: 24
)
def build_access_token(username, passwordtr):
r = requests.post(
"https://hub.docker.com/v2/users/login/",
data={"username": username, "password": password},
)
r.raise_for_status()
Reported by Pylint.
Line: 21
Column: 5
data={"username": username, "password": password},
)
r.raise_for_status()
token = r.json().get("token")
return {"Authorization": "JWT " + token}
def list_repos(user, token):
r = requests.get("https://hub.docker.com/v2/repositories/" + user, headers=token)
Reported by Pylint.
Line: 25
Column: 22
return {"Authorization": "JWT " + token}
def list_repos(user, token):
r = requests.get("https://hub.docker.com/v2/repositories/" + user, headers=token)
r.raise_for_status()
ret = sorted(
repo["user"] + "/" + repo["name"] for repo in r.json().get("results", [])
)
Reported by Pylint.
Line: 37
Column: 15
return ret
def list_tags(repo, token):
r = requests.get(
"https://hub.docker.com/v2/repositories/" + repo + "/tags", headers=token
)
r.raise_for_status()
return [
Reported by Pylint.
Line: 37
Column: 21
return ret
def list_tags(repo, token):
r = requests.get(
"https://hub.docker.com/v2/repositories/" + repo + "/tags", headers=token
)
r.raise_for_status()
return [
Reported by Pylint.
Line: 54
Column: 16
]
def save_to_s3(tags):
table_content = ""
client = boto3.client("s3")
for t in tags:
table_content += (
"<tr><td>{repo}</td><td>{tag}</td><td>{size}</td>"
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
from collections import namedtuple
import boto3
import requests
import os
Reported by Pylint.
Line: 7
Column: 1
import boto3
import requests
import os
IMAGE_INFO = namedtuple(
"IMAGE_INFO", ("repo", "tag", "size", "last_updated_at", "last_updated_by")
)
Reported by Pylint.
benchmarks/distributed/rpc/parameter_server/server/server.py
19 issues
Line: 9
Column: 1
from metrics.MetricsLogger import MetricsLogger
from utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format
import torch
import torch.distributed.rpc as rpc
class ParameterServerBase(ABC):
Reported by Pylint.
Line: 10
Column: 1
from utils import sparse_rpc_format_to_tensor, sparse_tensor_to_rpc_format
import torch
import torch.distributed.rpc as rpc
class ParameterServerBase(ABC):
PARAMETER_SERVER_BATCH_METRIC = "parameter_server_batch_metric"
Reported by Pylint.
Line: 54
Column: 28
"""
return
def record_start(self, type, key, name, cuda=True):
r"""
A method that records the start event for a metric.
Args:
type (str): group id for metric
key (str): unique id for metric within a group
Reported by Pylint.
Line: 70
Column: 26
cuda
)
def record_end(self, type, key):
r"""
A method that records the end event for a metric
Args:
type (str): group id for metric
key (str): unique id for metric within a group
Reported by Pylint.
Line: 142
Column: 29
)
@staticmethod
def record_method(name, type="method_metric", cuda=True):
r"""
A decorator that records a metric for the decorated method.
Args:
name (str): description of the metric
type (str): group id for metric
Reported by Pylint.
Line: 154
Column: 17
@functools.wraps(function)
def wrapper(self, *args):
key = time.time()
self.__metrics_logger.record_start(type, key, name, cuda)
result = function(self, *args)
self.__metrics_logger.record_end(type, key)
return result
return wrapper
return decorator
Reported by Pylint.
Line: 156
Column: 17
key = time.time()
self.__metrics_logger.record_start(type, key, name, cuda)
result = function(self, *args)
self.__metrics_logger.record_end(type, key)
return result
return wrapper
return decorator
@staticmethod
Reported by Pylint.
Line: 169
Column: 16
server_rref (RRef): remote reference to the server
"""
self = server_rref.local_value()
return self.__metrics_logger.get_processed_metrics()
def clear_metrics(self):
r"""
A method that clears __metrics_logger recorded metrics.
"""
Reported by Pylint.
Line: 210
Column: 5
self.gradient_dict = {}
@staticmethod
def reset_state(server_rref):
r"""
A method that clears the state of the server.
Args:
server_rref (RRef): remote reference to the server
"""
Reported by Pylint.
Line: 239
Column: 5
self.futures.clear()
self.gradient_dict.clear()
def process_gradient(self, gradient, param_loc):
r"""
Stores the gradient if param_loc is not in gradient_dict.
Adds the gradient to param_loc if it is in gradient_dict.
Args:
gradient (torch.Tensor): tensor sent from trainer
Reported by Pylint.
caffe2/python/operator_test/apmeter_test.py
19 issues
Line: 7
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def calculate_ap(predictions, labels):
N, D = predictions.shape
Reported by Pylint.
Line: 38
Column: 63
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision(self, predictions, labels, gc, dc):
op = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=10,
Reported by Pylint.
Line: 66
Column: 76
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision_small_buffer(self, predictions, labels, gc, dc):
op_small_buffer = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=5,
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
def calculate_ap(predictions, labels):
N, D = predictions.shape
ap = np.zeros(D)
num_range = np.arange((N), dtype=np.float32) + 1
for k in range(D):
scores = predictions[:N, k]
Reported by Pylint.
Line: 14
Column: 5
def calculate_ap(predictions, labels):
N, D = predictions.shape
ap = np.zeros(D)
num_range = np.arange((N), dtype=np.float32) + 1
for k in range(D):
scores = predictions[:N, k]
label = labels[:N, k]
Reported by Pylint.
Line: 14
Column: 8
def calculate_ap(predictions, labels):
N, D = predictions.shape
ap = np.zeros(D)
num_range = np.arange((N), dtype=np.float32) + 1
for k in range(D):
scores = predictions[:N, k]
label = labels[:N, k]
Reported by Pylint.
Line: 15
Column: 5
def calculate_ap(predictions, labels):
N, D = predictions.shape
ap = np.zeros(D)
num_range = np.arange((N), dtype=np.float32) + 1
for k in range(D):
scores = predictions[:N, k]
label = labels[:N, k]
sortind = np.argsort(-scores, kind='mergesort')
Reported by Pylint.
Line: 27
Column: 1
return ap
class TestAPMeterOps(hu.HypothesisTestCase):
@given(predictions=hu.arrays(dims=[10, 3],
elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0.1,
max_value=1)),
Reported by Pylint.
caffe2/python/transformations.py
19 issues
Line: 31
Column: 12
@classmethod
def runTransform(cls, transform_name, net):
pb = net.Proto().SerializeToString()
if C.transform_exists(transform_name):
output = C.run_transform(transform_name, pb)
elif C.workspace_transform_exists(transform_name):
output = C.run_workspace_transform(transform_name, pb)
else:
raise AttributeError('Transformation {} not found.'.format(transform_name))
Reported by Pylint.
Line: 32
Column: 22
def runTransform(cls, transform_name, net):
pb = net.Proto().SerializeToString()
if C.transform_exists(transform_name):
output = C.run_transform(transform_name, pb)
elif C.workspace_transform_exists(transform_name):
output = C.run_workspace_transform(transform_name, pb)
else:
raise AttributeError('Transformation {} not found.'.format(transform_name))
net.Proto().ParseFromString(output)
Reported by Pylint.
Line: 33
Column: 14
pb = net.Proto().SerializeToString()
if C.transform_exists(transform_name):
output = C.run_transform(transform_name, pb)
elif C.workspace_transform_exists(transform_name):
output = C.run_workspace_transform(transform_name, pb)
else:
raise AttributeError('Transformation {} not found.'.format(transform_name))
net.Proto().ParseFromString(output)
Reported by Pylint.
Line: 34
Column: 22
if C.transform_exists(transform_name):
output = C.run_transform(transform_name, pb)
elif C.workspace_transform_exists(transform_name):
output = C.run_workspace_transform(transform_name, pb)
else:
raise AttributeError('Transformation {} not found.'.format(transform_name))
net.Proto().ParseFromString(output)
def __getattr__(self, transform_name):
Reported by Pylint.
Line: 45
Column: 9
def fuseNNPACKConvRelu(net):
net.Proto().ParseFromString(
C.transform_fuseNNPACKConvRelu(net.Proto().SerializeToString())
)
def optimizeForMKLDNN(net, training_mode = False):
net.Proto().ParseFromString(
Reported by Pylint.
Line: 51
Column: 9
def optimizeForMKLDNN(net, training_mode = False):
net.Proto().ParseFromString(
C.transform_optimizeForMKLDNN(net.Proto().SerializeToString(), training_mode)
)
def fuseConvBN(net):
net.Proto().ParseFromString(
Reported by Pylint.
Line: 57
Column: 9
def fuseConvBN(net):
net.Proto().ParseFromString(
C.transform_fuseConvBN(net.Proto().SerializeToString())
)
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 24
Column: 1
import caffe2.python._import_c_extension as C
class Transformer(object):
def __init__(self):
pass
@classmethod
def runTransform(cls, transform_name, net):
Reported by Pylint.
Line: 24
Column: 1
import caffe2.python._import_c_extension as C
class Transformer(object):
def __init__(self):
pass
@classmethod
def runTransform(cls, transform_name, net):
Reported by Pylint.
test/onnx/model_defs/dcgan.py
19 issues
Line: 1
Column: 1
import torch
import torch.nn as nn
# configurable
bsz = 64
imgsz = 64
nz = 100
ngf = 64
Reported by Pylint.
Line: 2
Column: 1
import torch
import torch.nn as nn
# configurable
bsz = 64
imgsz = 64
nz = 100
ngf = 64
Reported by Pylint.
Line: 51
Column: 23
# state size. (nc) x 64 x 64
)
def forward(self, input):
if self.ngpu > 1 and isinstance(input.data, torch.cuda.FloatTensor):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output
Reported by Pylint.
Line: 84
Column: 23
nn.Sigmoid()
)
def forward(self, input):
if self.ngpu > 1 and isinstance(input.data, torch.cuda.FloatTensor):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
Reported by Pylint.
Line: 1
Column: 1
import torch
import torch.nn as nn
# configurable
bsz = 64
imgsz = 64
nz = 100
ngf = 64
Reported by Pylint.
Line: 6
Column: 1
# configurable
bsz = 64
imgsz = 64
nz = 100
ngf = 64
ndf = 64
nc = 3
Reported by Pylint.
Line: 7
Column: 1
# configurable
bsz = 64
imgsz = 64
nz = 100
ngf = 64
ndf = 64
nc = 3
Reported by Pylint.
Line: 8
Column: 1
# configurable
bsz = 64
imgsz = 64
nz = 100
ngf = 64
ndf = 64
nc = 3
Reported by Pylint.
Line: 9
Column: 1
bsz = 64
imgsz = 64
nz = 100
ngf = 64
ndf = 64
nc = 3
# custom weights initialization called on netG and netD
Reported by Pylint.
Line: 10
Column: 1
imgsz = 64
nz = 100
ngf = 64
ndf = 64
nc = 3
# custom weights initialization called on netG and netD
def weights_init(m):
Reported by Pylint.
caffe2/python/parallel_workers_test.py
19 issues
Line: 24
Column: 9
# does RunOperatorOnce instead of CreateNet+RunNet, we have to precreate
# all blobs beforehand
for i in range(100):
workspace.C.Workspace.current.create_blob("blob_" + str(i))
workspace.C.Workspace.current.create_blob("status_blob_" + str(i))
workspace.C.Workspace.current.create_blob("dequeue_blob")
workspace.C.Workspace.current.create_blob("status_blob")
return queue
Reported by Pylint.
Line: 25
Column: 9
# all blobs beforehand
for i in range(100):
workspace.C.Workspace.current.create_blob("blob_" + str(i))
workspace.C.Workspace.current.create_blob("status_blob_" + str(i))
workspace.C.Workspace.current.create_blob("dequeue_blob")
workspace.C.Workspace.current.create_blob("status_blob")
return queue
Reported by Pylint.
Line: 26
Column: 5
for i in range(100):
workspace.C.Workspace.current.create_blob("blob_" + str(i))
workspace.C.Workspace.current.create_blob("status_blob_" + str(i))
workspace.C.Workspace.current.create_blob("dequeue_blob")
workspace.C.Workspace.current.create_blob("status_blob")
return queue
Reported by Pylint.
Line: 27
Column: 5
workspace.C.Workspace.current.create_blob("blob_" + str(i))
workspace.C.Workspace.current.create_blob("status_blob_" + str(i))
workspace.C.Workspace.current.create_blob("dequeue_blob")
workspace.C.Workspace.current.create_blob("status_blob")
return queue
def create_worker(queue, get_blob_data):
Reported by Pylint.
Line: 63
Column: 45
workspace.ResetWorkspace()
queue = create_queue()
dummy_worker = create_worker(queue, lambda worker_id: str(worker_id))
worker_coordinator = parallel_workers.init_workers(dummy_worker)
worker_coordinator.start()
for _ in range(10):
value = dequeue_value(queue)
Reported by Pylint.
Line: 84
Column: 42
)
workspace.FeedBlob('data', 'not initialized')
def init_fun(worker_coordinator, global_coordinator):
workspace.FeedBlob('data', 'initialized')
worker_coordinator = parallel_workers.init_workers(
dummy_worker, init_fun=init_fun
)
Reported by Pylint.
Line: 84
Column: 22
)
workspace.FeedBlob('data', 'not initialized')
def init_fun(worker_coordinator, global_coordinator):
workspace.FeedBlob('data', 'initialized')
worker_coordinator = parallel_workers.init_workers(
dummy_worker, init_fun=init_fun
)
Reported by Pylint.
Line: 105
Column: 45
workspace.ResetWorkspace()
queue = create_queue()
dummy_worker = create_worker(queue, lambda worker_id: str(worker_id))
workspace.FeedBlob('data', 'not shutdown')
def shutdown_fun():
workspace.FeedBlob('data', 'shutdown')
Reported by Pylint.
Line: 1
Column: 1
import unittest
from caffe2.python import workspace, core
import caffe2.python.parallel_workers as parallel_workers
Reported by Pylint.
Line: 12
Column: 1
import caffe2.python.parallel_workers as parallel_workers
def create_queue():
queue = 'queue'
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateBlobsQueue", [], [queue], num_blobs=1, capacity=1000
Reported by Pylint.
test/distributed/argparse_util_test.py
19 issues
Line: 12
Column: 1
import unittest
from argparse import ArgumentParser
from torch.distributed.argparse_util import check_env, env
class ArgParseUtilTest(unittest.TestCase):
def setUp(self):
# remove any lingering environment variables
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
Reported by Pylint.
Line: 15
Column: 1
from torch.distributed.argparse_util import check_env, env
class ArgParseUtilTest(unittest.TestCase):
def setUp(self):
# remove any lingering environment variables
for e in os.environ.keys():
if e.startswith("PET_"):
del os.environ[e]
Reported by Pylint.
Line: 18
Column: 13
class ArgParseUtilTest(unittest.TestCase):
def setUp(self):
# remove any lingering environment variables
for e in os.environ.keys():
if e.startswith("PET_"):
del os.environ[e]
def test_env_string_arg_no_env(self):
parser = ArgumentParser()
Reported by Pylint.
Line: 18
Column: 18
class ArgParseUtilTest(unittest.TestCase):
def setUp(self):
# remove any lingering environment variables
for e in os.environ.keys():
if e.startswith("PET_"):
del os.environ[e]
def test_env_string_arg_no_env(self):
parser = ArgumentParser()
Reported by Pylint.
Line: 22
Column: 5
if e.startswith("PET_"):
del os.environ[e]
def test_env_string_arg_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar")
self.assertEqual("bar", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
Reported by Pylint.
Line: 30
Column: 5
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_string_arg_env(self):
os.environ["PET_FOO"] = "env_baz"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar")
self.assertEqual("env_baz", parser.parse_args([]).foo)
Reported by Pylint.
Line: 39
Column: 5
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_int_arg_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default=1, type=int)
self.assertEqual(1, parser.parse_args([]).foo)
self.assertEqual(2, parser.parse_args(["-f", "2"]).foo)
Reported by Pylint.
Line: 47
Column: 5
self.assertEqual(2, parser.parse_args(["-f", "2"]).foo)
self.assertEqual(2, parser.parse_args(["--foo", "2"]).foo)
def test_env_int_arg_env(self):
os.environ["PET_FOO"] = "3"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default=1, type=int)
self.assertEqual(3, parser.parse_args([]).foo)
Reported by Pylint.
Line: 56
Column: 5
self.assertEqual(2, parser.parse_args(["-f", "2"]).foo)
self.assertEqual(2, parser.parse_args(["--foo", "2"]).foo)
def test_env_no_default_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env)
self.assertIsNone(parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
Reported by Pylint.
test/jit/test_attr.py
19 issues
Line: 1
Column: 1
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
import torch
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 2
Column: 1
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
import torch
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 3
Column: 1
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
import torch
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 22
Column: 21
def forward(self, x):
y = getattr(self, "init_attr_val") # noqa: B009
w : list[float] = [1.0]
z = getattr(self, "missing", w) # noqa: B009
z.append(y)
return z
result = A().forward(0.0)
Reported by Pylint.
Line: 20
Column: 31
super(A, self).__init__()
self.init_attr_val = 1.0
def forward(self, x):
y = getattr(self, "init_attr_val") # noqa: B009
w : list[float] = [1.0]
z = getattr(self, "missing", w) # noqa: B009
z.append(y)
return z
Reported by Pylint.
Line: 1
Column: 1
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
import torch
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
Reported by Pylint.
Line: 12
Column: 1
"instead.")
class TestGetDefaultAttr(JitTestCase):
def test_getattr_with_default(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
Reported by Pylint.
Line: 12
Column: 1
"instead.")
class TestGetDefaultAttr(JitTestCase):
def test_getattr_with_default(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
Reported by Pylint.
Line: 13
Column: 5
class TestGetDefaultAttr(JitTestCase):
def test_getattr_with_default(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.init_attr_val = 1.0
Reported by Pylint.
Line: 15
Column: 9
class TestGetDefaultAttr(JitTestCase):
def test_getattr_with_default(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.init_attr_val = 1.0
def forward(self, x):
Reported by Pylint.
scripts/release_notes/namespace_check.py
19 issues
Line: 2
Column: 1
import argparse
import torch
from os import path
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
Reported by Pylint.
Line: 7
Column: 1
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
"",
"nn",
"nn.functional",
Reported by Pylint.
Line: 7
Column: 1
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
"",
"nn",
"nn.functional",
Reported by Pylint.
Line: 7
Column: 1
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
"",
"nn",
"nn.functional",
Reported by Pylint.
Line: 7
Column: 1
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
"",
"nn",
"nn.functional",
Reported by Pylint.
Line: 1
Column: 1
import argparse
import torch
from os import path
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
Reported by Pylint.
Line: 3
Column: 1
import argparse
import torch
from os import path
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
Reported by Pylint.
Line: 4
Column: 1
import argparse
import torch
from os import path
import json
# Import all utils so that getattr below can find them
from torch.utils import bottleneck, checkpoint, model_zoo
all_submod_list = [
Reported by Pylint.
Line: 32
Column: 1
"utils.model_zoo",
]
def get_content(submod):
mod = torch
if submod:
submod = submod.split(".")
for name in submod:
mod = getattr(mod, name)
Reported by Pylint.
Line: 41
Column: 1
content = dir(mod)
return content
def namespace_filter(data):
out = set(d for d in data if d[0] != "_")
return out
def run(args, submod):
print(f"## Processing torch.{submod}")
Reported by Pylint.