The following issues were found
benchmarks/distributed/rpc/parameter_server/trainer/trainer.py
15 issues
Line: 7
Column: 1
from metrics.MetricsLogger import MetricsLogger
import torch
class TrainerBase(ABC):
BATCH_LEVEL_METRIC = "batch_level_metric"
Reported by Pylint.
Line: 34
Column: 28
"""
return
def record_start(self, type, key, name, cuda=True):
r"""
A method that records the start event for a metric.
Args:
type (str): group id for metric
key (str): unique id for metric within a group
Reported by Pylint.
Line: 50
Column: 26
cuda
)
def record_end(self, type, key):
r"""
A method that records the end event for a metric.
Args:
type (str): group id for metric
key (str): unique id for metric within a group
Reported by Pylint.
Line: 150
Column: 28
)
@staticmethod
def methodmetric(name, type="method_metric", cuda=True):
r"""
A decorator that records a metric for the decorated method.
Args:
name (str): description of the metric
type (str): group id for metric
Reported by Pylint.
Line: 162
Column: 17
@functools.wraps(function)
def wrapper(self, *args):
key = time.time()
self.__metrics_logger.record_start(type, key, name, cuda)
result = function(self, *args)
self.__metrics_logger.record_end(type, key)
return result
return wrapper
return decorator
Reported by Pylint.
Line: 164
Column: 17
key = time.time()
self.__metrics_logger.record_start(type, key, name, cuda)
result = function(self, *args)
self.__metrics_logger.record_end(type, key)
return result
return wrapper
return decorator
def get_metrics(self):
Reported by Pylint.
Line: 242
Column: 5
"""
return f"{epoch},{index}"
def train(self, model, data):
r"""
A method that implements the training algorithm.
Args:
model (nn.Module): neural network model
data (list): training examples
Reported by Pylint.
Line: 1
Column: 1
import functools
import time
from abc import ABC, abstractmethod
from metrics.MetricsLogger import MetricsLogger
import torch
Reported by Pylint.
Line: 10
Column: 1
import torch
class TrainerBase(ABC):
BATCH_LEVEL_METRIC = "batch_level_metric"
BATCH_ALL = "batch_all"
FORWARD_METRIC = "forward_metric"
FORWARD_PASS = "forward_pass"
Reported by Pylint.
Line: 15
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b105_hardcoded_password_string.html
BATCH_LEVEL_METRIC = "batch_level_metric"
BATCH_ALL = "batch_all"
FORWARD_METRIC = "forward_metric"
FORWARD_PASS = "forward_pass"
BACKWARD_METRIC = "backward_metric"
BACKWARD = "backward"
def __init__(self, rank):
r"""
Reported by Bandit.
caffe2/python/ideep/conv_transpose_test.py
15 issues
Line: 7
Column: 1
import unittest
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
Reported by Pylint.
Line: 15
Column: 22
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvTransposeTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 2),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
adj=st.integers(0, 2),
Reported by Pylint.
Line: 1
Column: 1
import unittest
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
Reported by Pylint.
Line: 16
Column: 1
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class ConvTransposeTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 2),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
adj=st.integers(0, 2),
size=st.integers(7, 10),
Reported by Pylint.
Line: 30
Column: 5
compute_dX=st.booleans(),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_convolution_transpose_gradients(self, stride, pad, kernel, adj,
size, input_channels,
output_channels, batch_size,
use_bias, training_mode,
compute_dX, gc, dc):
training = 1 if training_mode else 0
Reported by Pylint.
Line: 30
Column: 5
compute_dX=st.booleans(),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_convolution_transpose_gradients(self, stride, pad, kernel, adj,
size, input_channels,
output_channels, batch_size,
use_bias, training_mode,
compute_dX, gc, dc):
training = 1 if training_mode else 0
Reported by Pylint.
Line: 30
Column: 5
compute_dX=st.booleans(),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_convolution_transpose_gradients(self, stride, pad, kernel, adj,
size, input_channels,
output_channels, batch_size,
use_bias, training_mode,
compute_dX, gc, dc):
training = 1 if training_mode else 0
Reported by Pylint.
Line: 30
Column: 5
compute_dX=st.booleans(),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_convolution_transpose_gradients(self, stride, pad, kernel, adj,
size, input_channels,
output_channels, batch_size,
use_bias, training_mode,
compute_dX, gc, dc):
training = 1 if training_mode else 0
Reported by Pylint.
Line: 30
Column: 5
compute_dX=st.booleans(),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_convolution_transpose_gradients(self, stride, pad, kernel, adj,
size, input_channels,
output_channels, batch_size,
use_bias, training_mode,
compute_dX, gc, dc):
training = 1 if training_mode else 0
Reported by Pylint.
benchmarks/record_function_benchmark/record_function_bench.py
15 issues
Line: 3
Column: 1
import argparse
import sys
import torch
import torch.utils.benchmark as benchmark_utils
try:
from benchmarks.fastrnns.factory import lstm_creator
except ImportError:
Reported by Pylint.
Line: 4
Column: 1
import argparse
import sys
import torch
import torch.utils.benchmark as benchmark_utils
try:
from benchmarks.fastrnns.factory import lstm_creator
except ImportError:
Reported by Pylint.
Line: 13
Column: 1
from caffe2.benchmarks.fastrnns.factory import lstm_creator
from torchvision.models import resnet50
def prepare_lstm_jit(bench_args):
model_def = lstm_creator(
script=True,
seqLength=bench_args.lstmSeqLength,
Reported by Pylint.
Line: 26
Column: 26
device='cpu')
return model_def.inputs, model_def.forward
def prepare_resnet50_jit(bench_args):
model = resnet50()
inputs = (torch.randn(32, 3, 224, 224),)
model = torch.jit.trace(model, inputs)
return inputs, model
Reported by Pylint.
Line: 27
Column: 5
return model_def.inputs, model_def.forward
def prepare_resnet50_jit(bench_args):
model = resnet50()
inputs = (torch.randn(32, 3, 224, 224),)
model = torch.jit.trace(model, inputs)
return inputs, model
MODELS = {
Reported by Pylint.
Line: 43
Column: 17
results = []
for model_name in model_names:
model_creator = MODELS[model_name]
inputs, model = model_creator(bench_args)
print("Benchmarking RecordFunction overhead for", model_name)
print("Running warmup...", end=" ")
sys.stdout.flush()
for _ in range(bench_args.warmup):
Reported by Pylint.
Line: 54
Column: 17
for num_threads in NUM_THREADS:
for with_rec_fn in [True, False]:
torch.autograd._enable_record_function(with_rec_fn)
torch.autograd._clear_callbacks()
if with_rec_fn:
torch.autograd._set_empty_test_observer(True, 0.0001)
print("Running {} RecordFunction, num threads {} ...".format(
Reported by Pylint.
Line: 55
Column: 17
for num_threads in NUM_THREADS:
for with_rec_fn in [True, False]:
torch.autograd._enable_record_function(with_rec_fn)
torch.autograd._clear_callbacks()
if with_rec_fn:
torch.autograd._set_empty_test_observer(True, 0.0001)
print("Running {} RecordFunction, num threads {} ...".format(
"with" if with_rec_fn else "without", num_threads), end=" ")
Reported by Pylint.
Line: 57
Column: 21
torch.autograd._enable_record_function(with_rec_fn)
torch.autograd._clear_callbacks()
if with_rec_fn:
torch.autograd._set_empty_test_observer(True, 0.0001)
print("Running {} RecordFunction, num threads {} ...".format(
"with" if with_rec_fn else "without", num_threads), end=" ")
sys.stdout.flush()
timer = benchmark_utils.Timer(
Reported by Pylint.
Line: 1
Column: 1
import argparse
import sys
import torch
import torch.utils.benchmark as benchmark_utils
try:
from benchmarks.fastrnns.factory import lstm_creator
except ImportError:
Reported by Pylint.
benchmarks/framework_overhead_benchmark/SimpleAddModule.py
15 issues
Line: 1
Column: 1
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
Reported by Pylint.
Line: 6
Column: 9
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
class SimpleAddModule(torch.nn.Module):
def __init__(self, add_op):
Reported by Pylint.
Line: 1
Column: 1
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
Reported by Pylint.
Line: 1
Column: 1
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
Reported by Pylint.
Line: 4
Column: 1
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
Reported by Pylint.
Line: 4
Column: 1
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
Reported by Pylint.
Line: 4
Column: 1
import torch
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
Reported by Pylint.
Line: 5
Column: 5
from utils import NUM_LOOP_ITERS
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
class SimpleAddModule(torch.nn.Module):
Reported by Pylint.
Line: 7
Column: 9
def add_tensors_loop(x, y):
z = torch.add(x, y)
for i in range(NUM_LOOP_ITERS):
z = torch.add(z, x)
return z
class SimpleAddModule(torch.nn.Module):
def __init__(self, add_op):
super(SimpleAddModule, self).__init__()
Reported by Pylint.
Line: 10
Column: 1
z = torch.add(z, x)
return z
class SimpleAddModule(torch.nn.Module):
def __init__(self, add_op):
super(SimpleAddModule, self).__init__()
self.add_op = add_op
def forward(self, x, y):
Reported by Pylint.
benchmarks/functional_autograd_benchmark/audio_text_models.py
15 issues
Line: 1
Column: 1
import torch
from torch import nn, Tensor
import torchaudio_models as models
from utils import extract_weights, load_weights, GetterReturnType
def get_wav2letter(device: torch.device) -> GetterReturnType:
N = 10
Reported by Pylint.
Line: 2
Column: 1
import torch
from torch import nn, Tensor
import torchaudio_models as models
from utils import extract_weights, load_weights, GetterReturnType
def get_wav2letter(device: torch.device) -> GetterReturnType:
N = 10
Reported by Pylint.
Line: 1
Column: 1
import torch
from torch import nn, Tensor
import torchaudio_models as models
from utils import extract_weights, load_weights, GetterReturnType
def get_wav2letter(device: torch.device) -> GetterReturnType:
N = 10
Reported by Pylint.
Line: 8
Column: 1
from utils import extract_weights, load_weights, GetterReturnType
def get_wav2letter(device: torch.device) -> GetterReturnType:
N = 10
input_frames = 700
vocab_size = 28
model = models.Wav2Letter(num_classes=vocab_size)
criterion = torch.nn.NLLLoss()
Reported by Pylint.
Line: 9
Column: 5
from utils import extract_weights, load_weights, GetterReturnType
def get_wav2letter(device: torch.device) -> GetterReturnType:
N = 10
input_frames = 700
vocab_size = 28
model = models.Wav2Letter(num_classes=vocab_size)
criterion = torch.nn.NLLLoss()
model.to(device)
Reported by Pylint.
Line: 29
Column: 1
return forward, params
def get_deepspeech(device: torch.device) -> GetterReturnType:
sample_rate = 16000
window_size = 0.02
window = "hamming"
audio_conf = dict(sample_rate=sample_rate,
window_size=window_size,
Reported by Pylint.
Line: 29
Column: 1
return forward, params
def get_deepspeech(device: torch.device) -> GetterReturnType:
sample_rate = 16000
window_size = 0.02
window = "hamming"
audio_conf = dict(sample_rate=sample_rate,
window_size=window_size,
Reported by Pylint.
Line: 38
Column: 5
window=window,
noise_dir=None)
N = 10
num_classes = 10
spectrogram_size = 161
# Commented are the original sizes in the code
seq_length = 500 # 1343
target_length = 10 # 50
Reported by Pylint.
Line: 67
Column: 1
return forward, params
def get_transformer(device: torch.device) -> GetterReturnType:
# For most SOTA research, you would like to have embed to 720, nhead to 12, bsz to 64, tgt_len/src_len to 128.
N = 64
seq_length = 128
ntoken = 50
model = models.TransformerModel(ntoken=ntoken, ninp=720, nhead=12, nhid=2048, nlayers=2)
Reported by Pylint.
Line: 68
Column: 1
return forward, params
def get_transformer(device: torch.device) -> GetterReturnType:
# For most SOTA research, you would like to have embed to 720, nhead to 12, bsz to 64, tgt_len/src_len to 128.
N = 64
seq_length = 128
ntoken = 50
model = models.TransformerModel(ntoken=ntoken, ninp=720, nhead=12, nhid=2048, nlayers=2)
model.to(device)
Reported by Pylint.
benchmarks/operator_benchmark/c2/replace_nan_test.py
15 issues
Line: 1
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
Reported by Pylint.
Line: 4
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
Reported by Pylint.
Line: 10
Column: 28
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
replace_nan_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
replace_nan_short_configs = op_bench.config_list(
Reported by Pylint.
Line: 15
Column: 29
)
replace_nan_short_configs = op_bench.config_list(
attrs=[
[16, 16, "float"],
[16, 16, "double"],
[64, 64, "float"],
[64, 64, "double"],
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
Reported by Pylint.
Line: 7
Column: 1
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
replace_nan_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
Reported by Pylint.
Line: 29
Column: 9
class ReplaceNaNBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.input = self.tensor([M, N], dtype)
self.set_module_name("replace_nan")
def forward(self):
op = core.CreateOperator("ReplaceNaN", self.input, self.input, value=1.0)
return op
Reported by Pylint.
Line: 1
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
"""Microbenchmarks for element-wise ReplaceNaN operator."""
# Configs for C2 ReplaceNaN operator
Reported by Pylint.
Line: 27
Column: 1
)
class ReplaceNaNBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.input = self.tensor([M, N], dtype)
self.set_module_name("replace_nan")
def forward(self):
Reported by Pylint.
benchmarks/operator_benchmark/pt/split_test.py
15 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Split operator"""
# Configs for PT Split operator
split_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 9
Column: 23
# Configs for PT Split operator
split_configs_short = op_bench.config_list(
attr_names=["M", "N", "parts"],
attrs=[
[8, 8, 2],
[256, 512, 2],
[512, 512, 2],
Reported by Pylint.
Line: 22
Column: 22
tags=["short"],
)
split_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
parts=[2, 4],
device=['cpu', 'cuda'],
tags=['long']
Reported by Pylint.
Line: 31
Column: 22
)
class SplitBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, parts, device):
self.inputs = {
"input": torch.rand(M, N, device=device),
"split_size": int(M * N / parts)
}
Reported by Pylint.
Line: 43
Column: 1
return torch.split(input, split_size)
op_bench.generate_pt_test(split_configs_short + split_configs_long,
SplitBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 5
Column: 1
import torch
"""Microbenchmarks for Split operator"""
# Configs for PT Split operator
split_configs_short = op_bench.config_list(
attr_names=["M", "N", "parts"],
Reported by Pylint.
Line: 33
Column: 9
class SplitBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, parts, device):
self.inputs = {
"input": torch.rand(M, N, device=device),
"split_size": int(M * N / parts)
}
self.set_module_name('split')
Reported by Pylint.
Line: 39
Column: 23
}
self.set_module_name('split')
def forward(self, input, split_size: int):
return torch.split(input, split_size)
op_bench.generate_pt_test(split_configs_short + split_configs_long,
SplitBenchmark)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for Split operator"""
# Configs for PT Split operator
split_configs_short = op_bench.config_list(
Reported by Pylint.
Line: 31
Column: 1
)
class SplitBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, parts, device):
self.inputs = {
"input": torch.rand(M, N, device=device),
"split_size": int(M * N / parts)
}
Reported by Pylint.
caffe2/python/device_checker.py
15 issues
Line: 44
Column: 13
_input_device_options = input_device_options or \
InferOpBlobDevicesAsDict(op)[0]
print(_input_device_options)
for i, arr in enumerate(inputs):
workspace.FeedBlob(
op.input[i], np.array(arr),
_input_device_options.get(op.input[i], device_option)
)
workspace.RunOperatorOnce(op)
Reported by Pylint.
Line: 1
Column: 1
## @package device_checker
# Module caffe2.python.device_checker
import numpy as np
import copy
from caffe2.python import workspace
from caffe2.python.core import InferOpBlobDevicesAsDict
from future.utils import viewitems
Reported by Pylint.
Line: 4
Column: 1
## @package device_checker
# Module caffe2.python.device_checker
import numpy as np
import copy
from caffe2.python import workspace
from caffe2.python.core import InferOpBlobDevicesAsDict
from future.utils import viewitems
Reported by Pylint.
Line: 10
Column: 1
from future.utils import viewitems
class DeviceChecker(object):
"""A device checker in Python to check consistency across multiple devices.
This is not the most efficient way to check devices, as the Python interface
will involve a lot of copies back and forth operations. Use at your own risk.
"""
Reported by Pylint.
Line: 21
Column: 5
self._threshold = threshold
self._device_options = device_options
def CheckSimple(self, op, inputs, outputs_to_check,
input_device_options=None):
"""Checks the operator with different device implementations.
Inputs:
op: the operator to be checked.
Reported by Pylint.
Line: 21
Column: 5
self._threshold = threshold
self._device_options = device_options
def CheckSimple(self, op, inputs, outputs_to_check,
input_device_options=None):
"""Checks the operator with different device implementations.
Inputs:
op: the operator to be checked.
Reported by Pylint.
Line: 58
Column: 13
# After running on all devices, check correctness
success = True
for i in range(1, len(self._device_options)):
for j in range(len(outputs_to_check)):
x = results[i][j]
y = results[0][j]
if not np.allclose(x, y,
atol=self._threshold, rtol=self._threshold):
print('Failure in checking device option {}'
Reported by Pylint.
Line: 59
Column: 17
success = True
for i in range(1, len(self._device_options)):
for j in range(len(outputs_to_check)):
x = results[i][j]
y = results[0][j]
if not np.allclose(x, y,
atol=self._threshold, rtol=self._threshold):
print('Failure in checking device option {}'
' and output {}. The outputs are:'
Reported by Pylint.
Line: 60
Column: 17
for i in range(1, len(self._device_options)):
for j in range(len(outputs_to_check)):
x = results[i][j]
y = results[0][j]
if not np.allclose(x, y,
atol=self._threshold, rtol=self._threshold):
print('Failure in checking device option {}'
' and output {}. The outputs are:'
.format(i, op.output[outputs_to_check[j]]))
Reported by Pylint.
Line: 76
Column: 5
workspace.SwitchWorkspace(old_ws_name)
return success
def CheckNet(self, net, inputs=None, blobs_to_check=None, ignore=None):
"""Checks a network by inspecting all of its intermediate results, and
see if things match.
"""
if inputs is None:
inputs = {}
Reported by Pylint.
caffe2/experiments/python/tt_pad_op_test.py
15 issues
Line: 23
Column: 1
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 24
Column: 1
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 30
Column: 1
import caffe2.python.hypothesis_test_util as hu
class TestTTPad(hu.HypothesisTestCase):
@given(K=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
Reported by Pylint.
Line: 35
Column: 5
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
Reported by Pylint.
Line: 35
Column: 5
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
Reported by Pylint.
Line: 35
Column: 5
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
Reported by Pylint.
Line: 35
Column: 5
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
Reported by Pylint.
Line: 35
Column: 5
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
Reported by Pylint.
Line: 35
Column: 5
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
Reported by Pylint.
benchmarks/operator_benchmark/pt/linear_test.py
15 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
from pt import configs
"""Microbenchmarks for Linear operator."""
Reported by Pylint.
Line: 4
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
from pt import configs
"""Microbenchmarks for Linear operator."""
Reported by Pylint.
Line: 6
Column: 1
import torch
import torch.nn as nn
from pt import configs
"""Microbenchmarks for Linear operator."""
Reported by Pylint.
Line: 12
Column: 23
"""Microbenchmarks for Linear operator."""
class LinearBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, IN, OUT, device):
self.inputs = {
"input_one": torch.rand(N, IN, device=device)
}
self.linear = nn.Linear(IN, OUT).to(device=device)
Reported by Pylint.
Line: 24
Column: 1
return self.linear(input_one)
op_bench.generate_pt_test(configs.linear_configs_short + configs.linear_configs_long,
LinearBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 9
Column: 1
from pt import configs
"""Microbenchmarks for Linear operator."""
class LinearBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, IN, OUT, device):
self.inputs = {
Reported by Pylint.
Line: 14
Column: 9
class LinearBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, IN, OUT, device):
self.inputs = {
"input_one": torch.rand(N, IN, device=device)
}
self.linear = nn.Linear(IN, OUT).to(device=device)
self.set_module_name("linear")
Reported by Pylint.
Line: 17
Column: 9
self.inputs = {
"input_one": torch.rand(N, IN, device=device)
}
self.linear = nn.Linear(IN, OUT).to(device=device)
self.set_module_name("linear")
def forward(self, input_one):
return self.linear(input_one)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
import torch.nn as nn
from pt import configs
"""Microbenchmarks for Linear operator."""
Reported by Pylint.
Line: 12
Column: 1
"""Microbenchmarks for Linear operator."""
class LinearBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, IN, OUT, device):
self.inputs = {
"input_one": torch.rand(N, IN, device=device)
}
self.linear = nn.Linear(IN, OUT).to(device=device)
Reported by Pylint.