The following issues were found
caffe2/python/layer_test_util.py
18 issues
Line: 51
Column: 9
('float_features', schema.Scalar((np.float32, (32,)))),
)
trainer_extra_schema = trainer_extra_schema or schema.Struct()
self.model = layer_model_helper.LayerModelHelper(
'test_model',
input_feature_schema=input_feature_schema,
trainer_extra_schema=trainer_extra_schema)
def new_record(self, schema_obj):
Reported by Pylint.
Line: 1
Column: 1
## @package layer_test_util
# Module caffe2.python.layer_test_util
from collections import namedtuple
Reported by Pylint.
Line: 25
Column: 1
# pyre-fixme[13]: Pyre can't detect attribute initialization through the
# super().__new__ call
class OpSpec(namedtuple("OpSpec", "type input output arg")):
def __new__(cls, op_type, op_input, op_output, op_arg=None):
return super(OpSpec, cls).__new__(cls, op_type, op_input,
op_output, op_arg)
Reported by Pylint.
Line: 32
Column: 1
op_output, op_arg)
class LayersTestCase(test_util.TestCase):
def setUp(self):
super(LayersTestCase, self).setUp()
self.setup_example()
Reported by Pylint.
Line: 35
Column: 9
class LayersTestCase(test_util.TestCase):
def setUp(self):
super(LayersTestCase, self).setUp()
self.setup_example()
def setup_example(self):
"""
This is undocumented feature in hypothesis,
Reported by Pylint.
Line: 46
Column: 5
workspace.ResetWorkspace()
self.reset_model()
def reset_model(self, input_feature_schema=None, trainer_extra_schema=None):
input_feature_schema = input_feature_schema or schema.Struct(
('float_features', schema.Scalar((np.float32, (32,)))),
)
trainer_extra_schema = trainer_extra_schema or schema.Struct()
self.model = layer_model_helper.LayerModelHelper(
Reported by Pylint.
Line: 56
Column: 5
input_feature_schema=input_feature_schema,
trainer_extra_schema=trainer_extra_schema)
def new_record(self, schema_obj):
return schema.NewRecord(self.model.net, schema_obj)
def get_training_nets(self, add_constants=False):
"""
We don't use
Reported by Pylint.
Line: 75
Column: 5
layer.add_operators(train_net, train_init_net)
return train_init_net, train_net
def get_eval_net(self):
return layer_model_instantiator.generate_eval_net(self.model)
def get_predict_net(self):
return layer_model_instantiator.generate_predict_net(self.model)
Reported by Pylint.
Line: 78
Column: 5
def get_eval_net(self):
return layer_model_instantiator.generate_eval_net(self.model)
def get_predict_net(self):
return layer_model_instantiator.generate_predict_net(self.model)
def run_train_net(self):
self.model.output_schema = schema.Struct()
train_init_net, train_net = \
Reported by Pylint.
Line: 81
Column: 5
def get_predict_net(self):
return layer_model_instantiator.generate_predict_net(self.model)
def run_train_net(self):
self.model.output_schema = schema.Struct()
train_init_net, train_net = \
layer_model_instantiator.generate_training_nets(self.model)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
Reported by Pylint.
benchmarks/operator_benchmark/pt/qlayernorm_test.py
18 issues
Line: 3
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
Reported by Pylint.
Line: 8
Column: 27
"""Microbenchmarks for quantized layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
(1, 8, 16),
(8, 8, 16),
(32, 8, 16),
(64, 128, 56, 56),
Reported by Pylint.
Line: 20
Column: 27
)
class QLayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
scale = 1.0
zero_point = 0
Reported by Pylint.
Line: 45
Column: 1
output_zero_point=Y_zero_point)
op_bench.generate_pt_test(layernorm_configs_short, QLayerNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
Reported by Pylint.
Line: 6
Column: 1
import torch
"""Microbenchmarks for quantized layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
(1, 8, 16),
(8, 8, 16),
Reported by Pylint.
Line: 26
Column: 9
X = (torch.rand(*dims) - 0.5) * 256
scale = 1.0
zero_point = 0
self.qX = torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype)
self.inputs = {
"qX": self.qX,
"weight": torch.rand(*self.qX.size()[1:], dtype=torch.float),
Reported by Pylint.
Line: 29
Column: 9
self.qX = torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype)
self.inputs = {
"qX": self.qX,
"weight": torch.rand(*self.qX.size()[1:], dtype=torch.float),
"bias": torch.rand(*self.qX.size()[1:], dtype=torch.float),
"eps": 1e-5,
"Y_scale": 0.1,
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for quantized layernorm operator."""
layernorm_configs_short = op_bench.cross_product_configs(
dims=(
Reported by Pylint.
Line: 20
Column: 1
)
class QLayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
scale = 1.0
zero_point = 0
Reported by Pylint.
Line: 22
Column: 5
class QLayerNormBenchmark(op_bench.TorchBenchmarkBase):
def init(self, dims, dtype):
X = (torch.rand(*dims) - 0.5) * 256
scale = 1.0
zero_point = 0
self.qX = torch.quantize_per_tensor(
X, scale=scale, zero_point=zero_point, dtype=dtype)
Reported by Pylint.
caffe2/python/mkl/mkl_pool_speed_test.py
18 issues
Line: 12
Column: 22
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testMaxPoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
Reported by Pylint.
Line: 102
Column: 9
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
Reported by Pylint.
Line: 1
Column: 1
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
Reported by Pylint.
Line: 13
Column: 1
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testMaxPoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testMaxPoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testMaxPoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
Line: 14
Column: 5
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testMaxPoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
Line: 18
Column: 9
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
Reported by Pylint.
Line: 40
Column: 5
print("Maxpooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testAveragePoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
Line: 40
Column: 5
print("Maxpooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testAveragePoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
Reported by Pylint.
benchmarks/tensorexpr/tensor_engine.py
18 issues
Line: 21
Column: 9
def set_engine_mode(mode):
global tensor_engine
if mode == "tf":
from . import tf_engine
tensor_engine = tf_engine.TensorFlowEngine()
elif mode == "pt":
from . import pt_engine
Reported by Pylint.
Line: 25
Column: 9
tensor_engine = tf_engine.TensorFlowEngine()
elif mode == "pt":
from . import pt_engine
tensor_engine = pt_engine.TorchTensorEngine()
elif mode == "topi":
from . import topi_engine
Reported by Pylint.
Line: 29
Column: 9
tensor_engine = pt_engine.TorchTensorEngine()
elif mode == "topi":
from . import topi_engine
tensor_engine = topi_engine.TopiEngine()
elif mode == "relay":
from . import relay_engine
Reported by Pylint.
Line: 33
Column: 9
tensor_engine = topi_engine.TopiEngine()
elif mode == "relay":
from . import relay_engine
tensor_engine = relay_engine.RelayEngine()
elif mode == "nnc":
from . import nnc_engine
Reported by Pylint.
Line: 37
Column: 9
tensor_engine = relay_engine.RelayEngine()
elif mode == "nnc":
from . import nnc_engine
tensor_engine = nnc_engine.NncEngine()
else:
raise ValueError("invalid tensor engine mode: %s" % (mode))
tensor_engine.mode = mode
Reported by Pylint.
Line: 19
Column: 5
def set_engine_mode(mode):
global tensor_engine
if mode == "tf":
from . import tf_engine
tensor_engine = tf_engine.TensorFlowEngine()
elif mode == "pt":
Reported by Pylint.
Line: 1
Column: 1
tensor_engine = None
def unsupported(func):
def wrapper(self):
return func(self)
wrapper.is_supported = False
return wrapper
Reported by Pylint.
Line: 1
Column: 1
tensor_engine = None
def unsupported(func):
def wrapper(self):
return func(self)
wrapper.is_supported = False
return wrapper
Reported by Pylint.
Line: 4
Column: 1
tensor_engine = None
def unsupported(func):
def wrapper(self):
return func(self)
wrapper.is_supported = False
return wrapper
Reported by Pylint.
Line: 12
Column: 1
return wrapper
def is_supported(method):
if hasattr(method, "is_supported"):
return method.is_supported
return True
Reported by Pylint.
benchmarks/tensorexpr/swish.py
18 issues
Line: 1
Column: 1
from . import benchmark
import torch
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
import torch
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 1
Column: 1
from . import benchmark
import torch
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
import torch
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 5
Column: 1
import torch
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.data = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
Reported by Pylint.
Line: 5
Column: 1
import torch
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.data = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
Reported by Pylint.
Line: 6
Column: 5
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.data = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.data]
Reported by Pylint.
Line: 6
Column: 5
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.data = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.data]
Reported by Pylint.
Line: 6
Column: 5
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.data = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.data]
Reported by Pylint.
Line: 8
Column: 9
class SwishBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.data = self.rand([M, N], device=device, dtype=dtype, requires_grad=self.requires_grad)
self.inputs = [self.data]
self.zeros = torch.zeros(M, N, device=device)
self.six = self.zeros + 6.0
Reported by Pylint.
benchmarks/operator_benchmark/pt/clip_ranges_test.py
18 issues
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for ClipRanges operator."""
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
Reported by Pylint.
Line: 9
Column: 28
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
LENGTH=range(1, 100),
M=[1],
N=[2],
MAX_LENGTH=range(1, 100),
device=['cpu', 'cuda'],
Reported by Pylint.
Line: 20
Column: 29
)
clip_ranges_short_configs = op_bench.config_list(
attrs=[
[6, 1, 2, 1, torch.int32],
[7, 1, 2, 2, torch.int32],
[8, 1, 2, 3, torch.int32],
[9, 1, 2, 4, torch.int32],
Reported by Pylint.
Line: 36
Column: 27
)
class ClipRangesBenchmark(op_bench.TorchBenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, device, dtype):
self.inputs = {
"input": torch.rand(LENGTH, M, N, device=device).type(dtype),
"max_length": MAX_LENGTH
}
Reported by Pylint.
Line: 48
Column: 1
return torch.ops.fb.clip_ranges(input, max_length)
op_bench.generate_pt_test(
clip_ranges_long_configs + clip_ranges_short_configs, ClipRangesBenchmark
)
if __name__ == "__main__":
Reported by Pylint.
Line: 5
Column: 1
import torch
"""Microbenchmarks for ClipRanges operator."""
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
LENGTH=range(1, 100),
Reported by Pylint.
Line: 38
Column: 9
class ClipRangesBenchmark(op_bench.TorchBenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, device, dtype):
self.inputs = {
"input": torch.rand(LENGTH, M, N, device=device).type(dtype),
"max_length": MAX_LENGTH
}
self.set_module_name("clip_ranges")
Reported by Pylint.
Line: 44
Column: 23
}
self.set_module_name("clip_ranges")
def forward(self, input, max_length: int):
return torch.ops.fb.clip_ranges(input, max_length)
op_bench.generate_pt_test(
clip_ranges_long_configs + clip_ranges_short_configs, ClipRangesBenchmark
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
"""Microbenchmarks for ClipRanges operator."""
torch.ops.load_library("//caffe2/torch/fb/sparsenn:sparsenn_operators")
# Configs for C2 ClipRanges operator
clip_ranges_long_configs = op_bench.cross_product_configs(
Reported by Pylint.
Line: 36
Column: 1
)
class ClipRangesBenchmark(op_bench.TorchBenchmarkBase):
def init(self, LENGTH, M, N, MAX_LENGTH, device, dtype):
self.inputs = {
"input": torch.rand(LENGTH, M, N, device=device).type(dtype),
"max_length": MAX_LENGTH
}
Reported by Pylint.
benchmarks/tensorexpr/softmax.py
18 issues
Line: 1
Column: 1
from . import benchmark
import scipy.special
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
import scipy.special
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 1
Column: 1
from . import benchmark
import scipy.special
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 2
Column: 1
from . import benchmark
import scipy.special
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
Reported by Pylint.
Line: 5
Column: 1
import scipy.special
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
Reported by Pylint.
Line: 6
Column: 5
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
self.inputs = [self.randn(
Reported by Pylint.
Line: 6
Column: 5
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
self.inputs = [self.randn(
Reported by Pylint.
Line: 6
Column: 5
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
self.inputs = [self.randn(
Reported by Pylint.
Line: 8
Column: 9
class SoftmaxBench(benchmark.Benchmark):
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
self.inputs = [self.randn(
[M, N], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
Reported by Pylint.
Line: 9
Column: 9
def __init__(self, mode, device, dtype, M, N):
super().__init__(mode, device, dtype)
self.M = M
self.N = N
self.dtype = dtype
self.inputs = [self.randn(
[M, N], device=device, dtype=dtype, requires_grad=self.requires_grad
)]
Reported by Pylint.
caffe2/experiments/python/tt_contraction_op_test.py
18 issues
Line: 23
Column: 1
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 24
Column: 1
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
Reported by Pylint.
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 30
Column: 1
import caffe2.python.hypothesis_test_util as hu
class TestTTContraction(hu.HypothesisTestCase):
@given(D=st.integers(min_value=5, max_value=20),
K=st.integers(min_value=5, max_value=20),
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
Reported by Pylint.
Line: 36
Column: 5
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
Reported by Pylint.
Line: 36
Column: 5
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
Reported by Pylint.
Line: 36
Column: 5
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
Reported by Pylint.
Line: 36
Column: 5
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
Reported by Pylint.
Line: 36
Column: 5
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
Reported by Pylint.
Line: 36
Column: 5
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
Reported by Pylint.
.github/scripts/generate_pytorch_version.py
18 issues
Line: 28
Column: 9
root = get_pytorch_root()
# We're on a tag
am_on_tag = (
subprocess.run(
['git', 'describe', '--tags', '--exact'],
cwd=root,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
).returncode == 0
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
import os
import subprocess
import re
from datetime import datetime
from distutils.util import strtobool
Reported by Pylint.
Line: 5
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_imports.html#b404-import-subprocess
import argparse
import os
import subprocess
import re
from datetime import datetime
from distutils.util import strtobool
from pathlib import Path
Reported by Bandit.
Line: 16
Column: 1
TRAILING_RC_PATTERN = re.compile("-rc[0-9]*$")
LEGACY_BASE_VERSION_SUFFIX_PATTERN = re.compile("a0$")
class NoGitTagException(Exception):
pass
def get_pytorch_root() -> Path:
return Path(subprocess.check_output(
['git', 'rev-parse', '--show-toplevel']
Reported by Pylint.
Line: 19
Column: 1
class NoGitTagException(Exception):
pass
def get_pytorch_root() -> Path:
return Path(subprocess.check_output(
['git', 'rev-parse', '--show-toplevel']
).decode('ascii').strip())
def get_tag() -> str:
Reported by Pylint.
Line: 20
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
pass
def get_pytorch_root() -> Path:
return Path(subprocess.check_output(
['git', 'rev-parse', '--show-toplevel']
).decode('ascii').strip())
def get_tag() -> str:
root = get_pytorch_root()
Reported by Bandit.
Line: 20
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b607_start_process_with_partial_path.html
pass
def get_pytorch_root() -> Path:
return Path(subprocess.check_output(
['git', 'rev-parse', '--show-toplevel']
).decode('ascii').strip())
def get_tag() -> str:
root = get_pytorch_root()
Reported by Bandit.
Line: 24
Column: 1
['git', 'rev-parse', '--show-toplevel']
).decode('ascii').strip())
def get_tag() -> str:
root = get_pytorch_root()
# We're on a tag
am_on_tag = (
subprocess.run(
['git', 'describe', '--tags', '--exact'],
Reported by Pylint.
Line: 28
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b607_start_process_with_partial_path.html
root = get_pytorch_root()
# We're on a tag
am_on_tag = (
subprocess.run(
['git', 'describe', '--tags', '--exact'],
cwd=root,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
).returncode == 0
Reported by Bandit.
Line: 28
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b603_subprocess_without_shell_equals_true.html
root = get_pytorch_root()
# We're on a tag
am_on_tag = (
subprocess.run(
['git', 'describe', '--tags', '--exact'],
cwd=root,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
).returncode == 0
Reported by Bandit.
benchmarks/operator_benchmark/c2/batch_gather_test.py
18 issues
Line: 1
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import numpy
"""Microbenchmarks for element-wise BatchGather operator."""
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import numpy
"""Microbenchmarks for element-wise BatchGather operator."""
Reported by Pylint.
Line: 4
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import numpy
"""Microbenchmarks for element-wise BatchGather operator."""
Reported by Pylint.
Line: 11
Column: 30
"""Microbenchmarks for element-wise BatchGather operator."""
# Configs for C2 BatherGather operator
batch_gather_configs_short = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[8, 8, 1],
[256, 512, 1],
[512, 512, 1],
Reported by Pylint.
Line: 27
Column: 29
tags=["short"]
)
batch_gather_configs_long = op_bench.cross_product_configs(
M=[128, 1024],
N=[128, 1024],
K=[1, 2],
device=['cpu', 'cuda'],
tags=["long"]
Reported by Pylint.
Line: 3
Column: 1
import benchmark_caffe2 as op_bench_c2
import operator_benchmark as op_bench
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
from caffe2.python import core
import numpy
"""Microbenchmarks for element-wise BatchGather operator."""
Reported by Pylint.
Line: 8
Column: 1
import numpy
"""Microbenchmarks for element-wise BatchGather operator."""
# Configs for C2 BatherGather operator
batch_gather_configs_short = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
Reported by Pylint.
Line: 37
Column: 9
class BatchGatherBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, K, device):
self.input_one = self.tensor([M, N, K], device=device)
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.index = self.feed_tensor(numpy.random.randint(0, max_val, index_dim), device=device)
self.output = self.tensor([M, index_dim, K], device=device)
Reported by Pylint.
Line: 41
Column: 9
max_val = N
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.index = self.feed_tensor(numpy.random.randint(0, max_val, index_dim), device=device)
self.output = self.tensor([M, index_dim, K], device=device)
self.set_module_name("batch_gather")
def forward(self):
op = core.CreateOperator("BatchGather", [self.input_one, self.index], self.output)
Reported by Pylint.
Line: 42
Column: 9
numpy.random.seed((1 << 32) - 1)
index_dim = numpy.random.randint(0, N)
self.index = self.feed_tensor(numpy.random.randint(0, max_val, index_dim), device=device)
self.output = self.tensor([M, index_dim, K], device=device)
self.set_module_name("batch_gather")
def forward(self):
op = core.CreateOperator("BatchGather", [self.input_one, self.index], self.output)
return op
Reported by Pylint.