The following issues were found
caffe2/python/operator_test/split_op_cost_test.py
24 issues
Line: 7
Column: 28
class TestSplitOpCost(TestCase):
def _verify_cost(self, workspace, split_op):
flops, bytes_written, bytes_read = workspace.GetOperatorCost(
split_op, split_op.input
)
self.assertEqual(flops, 0)
self.assertEqual(
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
class TestSplitOpCost(TestCase):
def _verify_cost(self, workspace, split_op):
flops, bytes_written, bytes_read = workspace.GetOperatorCost(
split_op, split_op.input
Reported by Pylint.
Line: 6
Column: 1
from caffe2.python.test_util import TestCase
class TestSplitOpCost(TestCase):
def _verify_cost(self, workspace, split_op):
flops, bytes_written, bytes_read = workspace.GetOperatorCost(
split_op, split_op.input
)
self.assertEqual(flops, 0)
Reported by Pylint.
Line: 21
Column: 5
sum(workspace.FetchBlob(b).nbytes for b in split_op.output),
)
def test_columnwise_equal_outputSplit(self):
workspace.ResetWorkspace()
workspace.FeedBlob("input", np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
split_op = core.CreateOperator(
"Split",
["input"],
Reported by Pylint.
Line: 21
Column: 5
sum(workspace.FetchBlob(b).nbytes for b in split_op.output),
)
def test_columnwise_equal_outputSplit(self):
workspace.ResetWorkspace()
workspace.FeedBlob("input", np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
split_op = core.CreateOperator(
"Split",
["input"],
Reported by Pylint.
Line: 43
Column: 5
self._verify_cost(workspace, split_op)
def test_rowwise_equal_outputSplit(self):
workspace.ResetWorkspace()
workspace.FeedBlob("input", np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
split_op = core.CreateOperator(
"Split",
["input"],
Reported by Pylint.
Line: 43
Column: 5
self._verify_cost(workspace, split_op)
def test_rowwise_equal_outputSplit(self):
workspace.ResetWorkspace()
workspace.FeedBlob("input", np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
split_op = core.CreateOperator(
"Split",
["input"],
Reported by Pylint.
Line: 63
Column: 5
self._verify_cost(workspace, split_op)
def test_columnwise_equal_outputSplit_columnRemoved(self):
workspace.ResetWorkspace()
workspace.FeedBlob("input", np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
# To be able to use 'add_axis' (which should have been called 'remove_axis') on 'axis',
# the dimensions of split tensors must match on 'axis'
split_op = core.CreateOperator(
Reported by Pylint.
Line: 63
Column: 5
self._verify_cost(workspace, split_op)
def test_columnwise_equal_outputSplit_columnRemoved(self):
workspace.ResetWorkspace()
workspace.FeedBlob("input", np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
# To be able to use 'add_axis' (which should have been called 'remove_axis') on 'axis',
# the dimensions of split tensors must match on 'axis'
split_op = core.CreateOperator(
Reported by Pylint.
Line: 89
Column: 5
self._verify_cost(workspace, split_op)
def test_rowwise_equal_outputSplit_rowRemoved(self):
workspace.ResetWorkspace()
workspace.FeedBlob("input", np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
split_op = core.CreateOperator(
"Split",
["input"],
Reported by Pylint.
test/distributed/nn/jit/test_instantiator.py
24 issues
Line: 7
Column: 1
import unittest
from typing import Tuple
import torch
from torch import Tensor, nn
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
Reported by Pylint.
Line: 8
Column: 1
from typing import Tuple
import torch
from torch import Tensor, nn
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 9
Column: 1
import torch
from torch import Tensor, nn
import torch.distributed as dist
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
Reported by Pylint.
Line: 15
Column: 1
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
from torch.distributed.nn.jit import instantiator
from torch.testing._internal.common_utils import run_tests
@torch.jit.interface
class MyModuleInterface:
Reported by Pylint.
Line: 16
Column: 1
sys.exit(0)
from torch.distributed.nn.jit import instantiator
from torch.testing._internal.common_utils import run_tests
@torch.jit.interface
class MyModuleInterface:
def forward(
Reported by Pylint.
Line: 50
Column: 39
dir_path = pathlib.Path(instantiator.INSTANTIATED_TEMPLATE_DIR_PATH)
# Cleanup.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
for file_path in file_paths:
file_path.unlink()
# Check before run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
Reported by Pylint.
Line: 55
Column: 39
file_path.unlink()
# Check before run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
num_files_before = len(list(file_paths))
self.assertEqual(num_files_before, 0)
generated_module = instantiator.instantiate_scriptable_remote_module_template(
MyModuleInterface
Reported by Pylint.
Line: 66
Column: 39
self.assertTrue(hasattr(generated_module, "_generated_methods"))
# Check after run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
num_files_after = len(list(file_paths))
self.assertEqual(num_files_after, 1)
def test_instantiate_non_scripted_remote_module_template(self):
dir_path = pathlib.Path(instantiator.INSTANTIATED_TEMPLATE_DIR_PATH)
Reported by Pylint.
Line: 74
Column: 39
dir_path = pathlib.Path(instantiator.INSTANTIATED_TEMPLATE_DIR_PATH)
# Cleanup.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
for file_path in file_paths:
file_path.unlink()
# Check before run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
Reported by Pylint.
Line: 79
Column: 39
file_path.unlink()
# Check before run.
file_paths = dir_path.glob(f"{instantiator._FILE_PREFIX}*.py")
num_files_before = len(list(file_paths))
self.assertEqual(num_files_before, 0)
generated_module = (
instantiator.instantiate_non_scriptable_remote_module_template()
Reported by Pylint.
caffe2/python/operator_test/flexible_top_k_test.py
24 issues
Line: 11
Column: 1
import caffe2.python.serialized_test.serialized_test_util as serial
from collections import OrderedDict
from hypothesis import given, settings
import numpy as np
class TestFlexibleTopK(serial.SerializedTestCase):
def flexible_top_k_ref(self, X, k):
Reported by Pylint.
Line: 44
Column: 42
@given(X=hu.tensor(min_dim=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_flexible_top_k(self, X, gc, dc):
X = X.astype(dtype=np.float32)
k_shape = (int(X.size / X.shape[-1]), )
k = np.random.randint(1, high=X.shape[-1] + 1, size=k_shape)
output_list = ["Values", "Indices"]
Reported by Pylint.
Line: 61
Column: 47
@given(X=hu.tensor(min_dim=2), **hu.gcs_cpu_only)
@settings(deadline=10000)
def test_flexible_top_k_grad(self, X, gc, dc):
X = X.astype(np.float32)
k_shape = (int(X.size / X.shape[-1]), )
k = np.random.randint(1, high=X.shape[-1] + 1, size=k_shape)
# this try to make sure adding stepsize (0.05)
Reported by Pylint.
Line: 1
Column: 1
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
Reported by Pylint.
Line: 10
Column: 1
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from collections import OrderedDict
from hypothesis import given, settings
import numpy as np
class TestFlexibleTopK(serial.SerializedTestCase):
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
class TestFlexibleTopK(serial.SerializedTestCase):
def flexible_top_k_ref(self, X, k):
X_flat = X.reshape((-1, X.shape[-1]))
indices_ref = np.ndarray(shape=sum(k), dtype=np.int32)
values_ref = np.ndarray(shape=sum(k), dtype=np.float32)
offset = 0
Reported by Pylint.
Line: 16
Column: 5
class TestFlexibleTopK(serial.SerializedTestCase):
def flexible_top_k_ref(self, X, k):
X_flat = X.reshape((-1, X.shape[-1]))
indices_ref = np.ndarray(shape=sum(k), dtype=np.int32)
values_ref = np.ndarray(shape=sum(k), dtype=np.float32)
offset = 0
for i in range(X_flat.shape[0]):
Reported by Pylint.
Line: 16
Column: 5
class TestFlexibleTopK(serial.SerializedTestCase):
def flexible_top_k_ref(self, X, k):
X_flat = X.reshape((-1, X.shape[-1]))
indices_ref = np.ndarray(shape=sum(k), dtype=np.int32)
values_ref = np.ndarray(shape=sum(k), dtype=np.float32)
offset = 0
for i in range(X_flat.shape[0]):
Reported by Pylint.
Line: 16
Column: 5
class TestFlexibleTopK(serial.SerializedTestCase):
def flexible_top_k_ref(self, X, k):
X_flat = X.reshape((-1, X.shape[-1]))
indices_ref = np.ndarray(shape=sum(k), dtype=np.int32)
values_ref = np.ndarray(shape=sum(k), dtype=np.float32)
offset = 0
for i in range(X_flat.shape[0]):
Reported by Pylint.
Line: 17
Column: 9
class TestFlexibleTopK(serial.SerializedTestCase):
def flexible_top_k_ref(self, X, k):
X_flat = X.reshape((-1, X.shape[-1]))
indices_ref = np.ndarray(shape=sum(k), dtype=np.int32)
values_ref = np.ndarray(shape=sum(k), dtype=np.float32)
offset = 0
for i in range(X_flat.shape[0]):
od = OrderedDict()
Reported by Pylint.
.github/scripts/run_torchbench.py
24 issues
Line: 34
Column: 53
timeout: 720
tests:"""
def gen_abtest_config(control: str, treatment: str, models: List[str]) -> str:
d = {}
d["control"] = control
d["treatment"] = treatment
config = ABTEST_CONFIG_TEMPLATE.format(**d)
if models == ["ALL"]:
Reported by Pylint.
Line: 46
Column: 30
config = config + "\n"
return config
def deploy_torchbench_config(output_dir: str, config: str) -> None:
# Create test dir if needed
pathlib.Path(output_dir).mkdir(exist_ok=True)
# TorchBench config file name
config_path = os.path.join(output_dir, TORCHBENCH_CONFIG_NAME)
with open(config_path, "w") as fp:
Reported by Pylint.
Line: 74
Column: 61
return []
return model_list
def run_torchbench(pytorch_path: str, torchbench_path: str, output_dir: str) -> None:
# Copy system environment so that we will not override
env = dict(os.environ)
command = ["python", "bisection.py", "--work-dir", output_dir,
"--pytorch-src", pytorch_path, "--torchbench-src", torchbench_path,
"--config", os.path.join(output_dir, "config.yaml"),
Reported by Pylint.
Line: 18
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import os
import pathlib
import argparse
import subprocess
from typing import List
CUDA_VERSION = "cu102"
PYTHON_VERSION = "3.7"
Reported by Bandit.
Line: 34
Column: 1
timeout: 720
tests:"""
def gen_abtest_config(control: str, treatment: str, models: List[str]) -> str:
d = {}
d["control"] = control
d["treatment"] = treatment
config = ABTEST_CONFIG_TEMPLATE.format(**d)
if models == ["ALL"]:
Reported by Pylint.
Line: 35
Column: 5
tests:"""
def gen_abtest_config(control: str, treatment: str, models: List[str]) -> str:
d = {}
d["control"] = control
d["treatment"] = treatment
config = ABTEST_CONFIG_TEMPLATE.format(**d)
if models == ["ALL"]:
return config + "\n"
Reported by Pylint.
Line: 46
Column: 1
config = config + "\n"
return config
def deploy_torchbench_config(output_dir: str, config: str) -> None:
# Create test dir if needed
pathlib.Path(output_dir).mkdir(exist_ok=True)
# TorchBench config file name
config_path = os.path.join(output_dir, TORCHBENCH_CONFIG_NAME)
with open(config_path, "w") as fp:
Reported by Pylint.
Line: 51
Column: 36
pathlib.Path(output_dir).mkdir(exist_ok=True)
# TorchBench config file name
config_path = os.path.join(output_dir, TORCHBENCH_CONFIG_NAME)
with open(config_path, "w") as fp:
fp.write(config)
def extract_models_from_pr(torchbench_path: str, prbody_file: str) -> List[str]:
model_list = []
with open(prbody_file, "r") as pf:
Reported by Pylint.
Line: 54
Column: 1
with open(config_path, "w") as fp:
fp.write(config)
def extract_models_from_pr(torchbench_path: str, prbody_file: str) -> List[str]:
model_list = []
with open(prbody_file, "r") as pf:
lines = map(lambda x: x.strip(), pf.read().splitlines())
magic_lines = list(filter(lambda x: x.startswith(MAGIC_PREFIX), lines))
if magic_lines:
Reported by Pylint.
Line: 56
Column: 36
def extract_models_from_pr(torchbench_path: str, prbody_file: str) -> List[str]:
model_list = []
with open(prbody_file, "r") as pf:
lines = map(lambda x: x.strip(), pf.read().splitlines())
magic_lines = list(filter(lambda x: x.startswith(MAGIC_PREFIX), lines))
if magic_lines:
# Only the first magic line will be respected.
model_list = list(map(lambda x: x.strip(), magic_lines[0][len(MAGIC_PREFIX):].split(",")))
Reported by Pylint.
.circleci/ecr_gc_docker/gc.py
24 issues
Line: 4
Column: 1
#!/usr/bin/env python3
import argparse
import boto3
import datetime
import pytz
import re
import sys
Reported by Pylint.
Line: 6
Column: 1
import argparse
import boto3
import datetime
import pytz
import re
import sys
def save_to_s3(project, data):
Reported by Pylint.
Line: 13
Column: 5
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
Reported by Pylint.
Line: 14
Column: 9
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 14
Column: 15
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 14
Column: 28
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 14
Column: 20
def save_to_s3(project, data):
table_content = ""
client = boto3.client("s3")
for repo, tag, window, age, pushed in data:
table_content += "<tr><td>{repo}</td><td>{tag}</td><td>{window}</td><td>{age}</td><td>{pushed}</td></tr>".format(
repo=repo, tag=tag, window=window, age=age, pushed=pushed
)
html_body = """
<html>
Reported by Pylint.
Line: 69
Column: 11
)
def repos(client):
paginator = client.get_paginator("describe_repositories")
pages = paginator.paginate(registryId="308535385114")
for page in pages:
for repo in page["repositories"]:
yield repo
Reported by Pylint.
Line: 73
Column: 13
paginator = client.get_paginator("describe_repositories")
pages = paginator.paginate(registryId="308535385114")
for page in pages:
for repo in page["repositories"]:
yield repo
def images(client, repository):
paginator = client.get_paginator("describe_images")
Reported by Pylint.
Line: 77
Column: 12
yield repo
def images(client, repository):
paginator = client.get_paginator("describe_images")
pages = paginator.paginate(
registryId="308535385114", repositoryName=repository["repositoryName"]
)
for page in pages:
Reported by Pylint.
caffe2/python/mkl/rewrite_graph.py
24 issues
Line: 19
Column: 5
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
Reported by Pylint.
Line: 1
Column: 1
import copy
from caffe2.proto import caffe2_pb2
from caffe2.python import core
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import core
def rewrite_init_net_simple(net):
for op in net.op:
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
Reported by Pylint.
Line: 12
Column: 9
def rewrite_init_net_simple(net):
for op in net.op:
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
Reported by Pylint.
Line: 15
Column: 1
for op in net.op:
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
Reported by Pylint.
Line: 16
Column: 13
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
Reported by Pylint.
Line: 22
Column: 1
raise ValueError("Failed to find last producer of blob, %s", blob)
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
if op.type == 'BoxWithNMSLimit':
outputs.add(op.output[0])
outputs.add(op.output[1])
Reported by Pylint.
Line: 22
Column: 1
raise ValueError("Failed to find last producer of blob, %s", blob)
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
if op.type == 'BoxWithNMSLimit':
outputs.add(op.output[0])
outputs.add(op.output[1])
Reported by Pylint.
Line: 24
Column: 9
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
if op.type == 'BoxWithNMSLimit':
outputs.add(op.output[0])
outputs.add(op.output[1])
outputs.add(op.output[2])
for op in net.op:
Reported by Pylint.
Line: 29
Column: 9
outputs.add(op.output[0])
outputs.add(op.output[1])
outputs.add(op.output[2])
for op in net.op:
if op.type == 'CopyIDEEPToCPU':
if op.input[0] in outputs:
print("Chaning CopyIDEEPToCPU to Copy for {}".format(op.input[0]))
op.type = 'Copy'
op.device_option.device_type = caffe2_pb2.CPU
Reported by Pylint.
benchmarks/distributed/rpc/rl/coordinator.py
24 issues
Line: 4
Column: 1
import numpy as np
import time
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from observer import ObserverBase
Reported by Pylint.
Line: 5
Column: 1
import time
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from observer import ObserverBase
COORDINATOR_NAME = "coordinator"
Reported by Pylint.
Line: 73
Column: 13
print(f"Episode {ep} - ", end='')
n_steps = episode_steps
agent_start_time = time.time()
futs = []
for ob_rref in self.ob_rrefs:
futs.append(ob_rref.rpc_async().run_ob_episode(
self.agent_rref, n_steps))
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import time
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from observer import ObserverBase
Reported by Pylint.
Line: 2
Column: 1
import numpy as np
import time
import torch
import torch.distributed.rpc as rpc
from agent import AgentBase
from observer import ObserverBase
Reported by Pylint.
Line: 17
Column: 1
EPISODE_STEPS = 100
class CoordinatorBase:
def __init__(self, batch_size, batch, state_size, nlayers, out_features):
r"""
Coordinator object to run on worker. Only one coordinator exists. Responsible
for facilitating communication between agent and observers and recording benchmark
throughput and latency data.
Reported by Pylint.
Line: 17
Column: 1
EPISODE_STEPS = 100
class CoordinatorBase:
def __init__(self, batch_size, batch, state_size, nlayers, out_features):
r"""
Coordinator object to run on worker. Only one coordinator exists. Responsible
for facilitating communication between agent and observers and recording benchmark
throughput and latency data.
Reported by Pylint.
Line: 18
Column: 5
class CoordinatorBase:
def __init__(self, batch_size, batch, state_size, nlayers, out_features):
r"""
Coordinator object to run on worker. Only one coordinator exists. Responsible
for facilitating communication between agent and observers and recording benchmark
throughput and latency data.
Args:
Reported by Pylint.
Line: 25
Column: 1
throughput and latency data.
Args:
batch_size (int): Number of observer requests to process in a batch
batch (bool): Whether to process and respond to observer requests as a batch or 1 at a time
state_size (list): List of ints dictating the dimensions of the state
nlayers (int): Number of layers in the model
out_features (int): Number of out features in the model
"""
self.batch_size = batch_size
Reported by Pylint.
Line: 49
Column: 5
self.agent_rref.rpc_sync().set_world(
batch_size, state_size, nlayers, out_features, self.batch)
def run_coordinator(self, episodes, episode_steps, queue):
r"""
Runs n benchmark episodes. Each episode is started by coordinator telling each
observer to contact the agent. Each episode is concluded by coordinator telling agent
to finish the episode, and then the coordinator records benchmark data
Args:
Reported by Pylint.
caffe2/python/models/resnet.py
24 issues
Line: 11
Column: 1
from caffe2.python import brew
import logging
'''
Utility for creating ResNe(X)t
"Deep Residual Learning for Image Recognition" by He, Zhang et. al. 2015
"Aggregated Residual Transformations for Deep Neural Networks" by Xie et. al. 2016
'''
Reported by Pylint.
Line: 86
Column: 5
)
return self.prev_blob
'''
Add a "bottleneck" component as described in He et. al. Figure 3 (right)
'''
def add_bottleneck(
self,
Reported by Pylint.
Line: 306
Column: 5
conv1_kernel=7,
conv1_stride=2,
final_avg_kernel=7,
log=None,
bn_epsilon=1e-5,
bn_momentum=0.9,
):
if num_layers not in RESNEXT_BLOCK_CONFIG:
log.error("{}-layer is invalid for resnext config".format(num_layers))
Reported by Pylint.
Line: 1
Column: 1
## @package resnet
# Module caffe2.python.models.resnet
from caffe2.python import brew
import logging
Reported by Pylint.
Line: 9
Column: 1
from caffe2.python import brew
import logging
'''
Utility for creating ResNe(X)t
"Deep Residual Learning for Image Recognition" by He, Zhang et. al. 2015
"Aggregated Residual Transformations for Deep Neural Networks" by Xie et. al. 2016
Reported by Pylint.
Line: 18
Column: 1
'''
class ResNetBuilder():
'''
Helper class for constructing residual blocks.
'''
def __init__(
Reported by Pylint.
Line: 23
Column: 5
Helper class for constructing residual blocks.
'''
def __init__(
self,
model,
prev_blob,
no_bias,
is_test,
Reported by Pylint.
Line: 41
Column: 5
self.bn_momentum = bn_momentum
self.no_bias = 1 if no_bias else 0
def add_conv(
self,
in_filters,
out_filters,
kernel,
stride=1,
Reported by Pylint.
Line: 41
Column: 5
self.bn_momentum = bn_momentum
self.no_bias = 1 if no_bias else 0
def add_conv(
self,
in_filters,
out_filters,
kernel,
stride=1,
Reported by Pylint.
Line: 66
Column: 5
)
return self.prev_blob
def add_relu(self):
self.prev_blob = brew.relu(
self.model,
self.prev_blob,
self.prev_blob, # in-place
)
Reported by Pylint.
torch/optim/_multi_tensor/radam.py
24 issues
Line: 2
Column: 1
import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict
class RAdam(Optimizer):
r"""Implements RAdam algorithm with multi tensor APIs.
It has been proposed in `On the variance of the adaptive learning rate and beyond`_.
Reported by Pylint.
Line: 3
Column: 1
import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict
class RAdam(Optimizer):
r"""Implements RAdam algorithm with multi tensor APIs.
It has been proposed in `On the variance of the adaptive learning rate and beyond`_.
Reported by Pylint.
Line: 75
Column: 40
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
Reported by Pylint.
Line: 75
Column: 74
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
Reported by Pylint.
Line: 77
Column: 43
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
state['step'] += 1
Reported by Pylint.
Line: 77
Column: 77
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avg.append(state['exp_avg'])
exp_avg_sq.append(state['exp_avg_sq'])
state['step'] += 1
Reported by Pylint.
Line: 119
Column: 21
for _, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
Reported by Pylint.
Line: 98
Column: 3
return loss
# TODO: refactor to a base class once foreach ops are in a good shape.
def zero_grad(self, set_to_none: bool = False):
per_device_and_dtype_grads = defaultdict(lambda: defaultdict(list))
for group in self.param_groups:
for p in group['params']:
if p.grad is not None:
Reported by Pylint.
Line: 119
Column: 21
for _, per_dtype_grads in per_device_and_dtype_grads.items():
for grads in per_dtype_grads.values():
torch._foreach_zero_(grads)
Reported by Pylint.
Line: 1
Column: 1
import torch
from . import _functional as F
from ..optimizer import Optimizer
from collections import defaultdict
class RAdam(Optimizer):
r"""Implements RAdam algorithm with multi tensor APIs.
It has been proposed in `On the variance of the adaptive learning rate and beyond`_.
Reported by Pylint.
torch/quantization/fx/qconfig_utils.py
24 issues
Line: 12
Column: 1
Graph,
)
from .utils import _parent_name
def get_flattened_qconfig_dict(qconfig_dict):
""" flatten the global, object_type and module_name qconfig
to the same qconfig_dict so that it can be used by
Reported by Pylint.
Line: 134
Column: 9
def generate_qconfig_map(
root: torch.nn.Module,
modules: Dict[str, torch.nn.Module],
input_graph: Graph,
qconfig_dict: Any,
node_name_to_scope: Dict[str, Tuple[str, type]]) -> Dict[str, QConfigAny]:
global_qconfig = qconfig_dict.get("", None)
Reported by Pylint.
Line: 1
Column: 1
import torch
from collections import OrderedDict, defaultdict
from typing import Union, Callable, Any, Dict, Tuple, Set
from torch.quantization.qconfig import add_module_to_qconfig_obs_ctr, QConfigAny
import re
from torch.fx.graph import (
Graph,
Reported by Pylint.
Line: 2
Column: 1
import torch
from collections import OrderedDict, defaultdict
from typing import Union, Callable, Any, Dict, Tuple, Set
from torch.quantization.qconfig import add_module_to_qconfig_obs_ctr, QConfigAny
import re
from torch.fx.graph import (
Graph,
Reported by Pylint.
Line: 3
Column: 1
import torch
from collections import OrderedDict, defaultdict
from typing import Union, Callable, Any, Dict, Tuple, Set
from torch.quantization.qconfig import add_module_to_qconfig_obs_ctr, QConfigAny
import re
from torch.fx.graph import (
Graph,
Reported by Pylint.
Line: 6
Column: 1
from typing import Union, Callable, Any, Dict, Tuple, Set
from torch.quantization.qconfig import add_module_to_qconfig_obs_ctr, QConfigAny
import re
from torch.fx.graph import (
Graph,
)
Reported by Pylint.
Line: 66
Column: 1
return qconfig_dict
def get_object_type_qconfig(
qconfig_dict: Any,
object_type: Union[Callable, str],
fallback_qconfig: QConfigAny) -> QConfigAny:
# object_type can be
# 1. module type (call_module)
Reported by Pylint.
Line: 78
Column: 1
object_type, fallback_qconfig)
def get_module_name_regex_qconfig(qconfig_dict, module_name, fallback_qconfig):
for regex_pattern, qconfig in \
qconfig_dict['module_name_regex'].items():
if re.match(regex_pattern, module_name):
# first match wins
return qconfig
Reported by Pylint.
Line: 87
Column: 1
return fallback_qconfig
def maybe_adjust_qconfig_for_module_name_object_type_order(
qconfig_dict: Any,
cur_module_path: str,
cur_object_type: Callable,
cur_object_type_idx: int,
fallback_qconfig: QConfigAny,
Reported by Pylint.
Line: 108
Column: 1
return fallback_qconfig
def get_module_name_qconfig(qconfig_dict, module_name, fallback_qconfig):
if module_name == '':
# module name qconfig not found
return fallback_qconfig
if module_name in qconfig_dict['module_name']:
return qconfig_dict['module_name'][module_name]
Reported by Pylint.