The following issues were found
caffe2/python/examples/lmdb_create_example.py
11 issues
Line: 11
Column: 1
import argparse
import numpy as np
import lmdb
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, model_helper
'''
Simple example to create an lmdb database of random image data and labels.
Reported by Pylint.
Line: 15
Column: 1
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, model_helper
'''
Simple example to create an lmdb database of random image data and labels.
This can be used a skeleton to write your own data import.
It also runs a dummy-model with Caffe2 that reads the data and
validates the checksum is same.
Reported by Pylint.
Line: 69
Column: 11
print(">>> Read database...")
model = model_helper.ModelHelper(name="lmdbtest")
batch_size = 32
data, label = model.TensorProtosDBInput(
[], ["data", "label"], batch_size=batch_size,
db=db_file, db_type="lmdb")
checksum = 0
Reported by Pylint.
Line: 69
Column: 5
print(">>> Read database...")
model = model_helper.ModelHelper(name="lmdbtest")
batch_size = 32
data, label = model.TensorProtosDBInput(
[], ["data", "label"], batch_size=batch_size,
db=db_file, db_type="lmdb")
checksum = 0
Reported by Pylint.
Line: 1
Column: 1
## @package lmdb_create_example
# Module caffe2.python.examples.lmdb_create_example
import argparse
import numpy as np
Reported by Pylint.
Line: 24
Column: 1
'''
def create_db(output_file):
print(">>> Write database...")
LMDB_MAP_SIZE = 1 << 40 # MODIFY
env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)
checksum = 0
Reported by Pylint.
Line: 26
Column: 5
def create_db(output_file):
print(">>> Write database...")
LMDB_MAP_SIZE = 1 << 40 # MODIFY
env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)
checksum = 0
with env.begin(write=True) as txn:
for j in range(0, 128):
Reported by Pylint.
Line: 58
Column: 1
)
checksum += np.sum(img_data) * label
if (j % 16 == 0):
print("Inserted {} rows".format(j))
print("Checksum/write: {}".format(int(checksum)))
return checksum
Reported by Pylint.
Line: 65
Column: 1
return checksum
def read_db_with_caffe2(db_file, expected_checksum):
print(">>> Read database...")
model = model_helper.ModelHelper(name="lmdbtest")
batch_size = 32
data, label = model.TensorProtosDBInput(
[], ["data", "label"], batch_size=batch_size,
Reported by Pylint.
Line: 87
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
checksum += np.sum(img_datas[j, :]) * labels[j]
print("Checksum/read: {}".format(int(checksum)))
assert np.abs(expected_checksum - checksum < 0.1), \
"Read/write checksums dont match"
def main():
parser = argparse.ArgumentParser(
Reported by Bandit.
benchmarks/operator_benchmark/common/tests/pt_backward_test.py
11 issues
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8, 1],
N=[8, 2],
K=[8, 4],
tags=["short"]
Reported by Pylint.
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8, 1],
N=[8, 2],
K=[8, 4],
tags=["short"]
Reported by Pylint.
Line: 16
Column: 9
# for both inputs. The test name can also be used for filtering.
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 17
Column: 9
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8, 1],
N=[8, 2],
K=[8, 4],
tags=["short"]
Reported by Pylint.
Line: 14
Column: 1
# This benchmark uses the auto_set to automatically set requires_grad
# for both inputs. The test name can also be used for filtering.
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
Reported by Pylint.
Line: 15
Column: 5
# This benchmark uses the auto_set to automatically set requires_grad
# for both inputs. The test name can also be used for filtering.
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 15
Column: 5
# This benchmark uses the auto_set to automatically set requires_grad
# for both inputs. The test name can also be used for filtering.
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 15
Column: 5
# This benchmark uses the auto_set to automatically set requires_grad
# for both inputs. The test name can also be used for filtering.
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 15
Column: 5
# This benchmark uses the auto_set to automatically set requires_grad
# for both inputs. The test name can also be used for filtering.
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K, requires_grad=self.auto_set())
self.input_two = torch.rand(M, N, K, requires_grad=self.auto_set())
self.set_module_name("add")
def forward(self):
Reported by Pylint.
benchmarks/operator_benchmark/common/tests/pt_cpu_gpu_forward_backward_test.py
11 issues
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
device=["cuda", "cpu"],
Reported by Pylint.
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
device=["cuda", "cpu"],
Reported by Pylint.
Line: 16
Column: 9
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 17
Column: 9
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
add_configs = op_bench.cross_product_configs(
M=[8],
N=[8],
K=[8],
device=["cuda", "cpu"],
Reported by Pylint.
Line: 14
Column: 1
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
Reported by Pylint.
Line: 15
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 15
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 15
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 15
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K, device):
self.input_one = torch.rand(M, N, K, device=device, requires_grad=True)
self.input_two = torch.rand(M, N, K, device=device, requires_grad=True)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
benchmarks/operator_benchmark/common/tests/random_sample_test.py
11 issues
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
configs = op_bench.random_sample_configs(
M=[1, 2, 3, 4, 5, 6],
N=[7, 8, 9, 10, 11, 12],
K=[13, 14, 15, 16, 17, 18],
# probs saves the weights of each value
Reported by Pylint.
Line: 2
Column: 1
import operator_benchmark as op_bench
import torch
configs = op_bench.random_sample_configs(
M=[1, 2, 3, 4, 5, 6],
N=[7, 8, 9, 10, 11, 12],
K=[13, 14, 15, 16, 17, 18],
# probs saves the weights of each value
Reported by Pylint.
Line: 23
Column: 9
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 24
Column: 9
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
return torch.add(self.input_one, self.input_two)
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
import torch
configs = op_bench.random_sample_configs(
M=[1, 2, 3, 4, 5, 6],
N=[7, 8, 9, 10, 11, 12],
K=[13, 14, 15, 16, 17, 18],
# probs saves the weights of each value
Reported by Pylint.
Line: 21
Column: 1
)
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
Reported by Pylint.
Line: 22
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 22
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 22
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
Line: 22
Column: 5
class AddBenchmark(op_bench.TorchBenchmarkBase):
def init(self, M, N, K):
self.input_one = torch.rand(M, N, K)
self.input_two = torch.rand(M, N, K)
self.set_module_name("add")
def forward(self):
Reported by Pylint.
caffe2/python/modeling/get_entry_from_blobs_test.py
11 issues
Line: 1
Column: 1
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
Reported by Pylint.
Line: 28
Column: 1
import numpy as np
class GetEntryFromBlobsTest(unittest.TestCase):
def test_get_entry_from_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=10, dim_out=8)
Reported by Pylint.
Line: 29
Column: 5
class GetEntryFromBlobsTest(unittest.TestCase):
def test_get_entry_from_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=10, dim_out=8)
# no operator name set, will use default
Reported by Pylint.
Line: 36
Column: 9
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=8, dim_out=4)
i1, i2 = np.random.randint(4, size=2)
net_modifier = GetEntryFromBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
i1=i1,
i2=i2,
Reported by Pylint.
Line: 36
Column: 13
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=8, dim_out=4)
i1, i2 = np.random.randint(4, size=2)
net_modifier = GetEntryFromBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
i1=i1,
i2=i2,
Reported by Pylint.
Line: 54
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.assertEqual(fc1_w_entry.size, 1)
self.assertEqual(fc1_w_entry[0], fc1_w[i1][i2])
assert model.net.output_record() is None
def test_get_entry_from_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=4)
Reported by Bandit.
Line: 56
Column: 5
self.assertEqual(fc1_w_entry[0], fc1_w[i1][i2])
assert model.net.output_record() is None
def test_get_entry_from_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=4)
# no operator name set, will use default
Reported by Pylint.
Line: 63
Column: 9
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=4, dim_out=4)
i1, i2 = np.random.randint(4), np.random.randint(5) - 1
net_modifier = GetEntryFromBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
i1=i1,
i2=i2,
Reported by Pylint.
Line: 63
Column: 13
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=4, dim_out=4)
i1, i2 = np.random.randint(4), np.random.randint(5) - 1
net_modifier = GetEntryFromBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
i1=i1,
i2=i2,
Reported by Pylint.
Line: 90
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
self.assertEqual(fc1_w_entry.size, 1)
self.assertEqual(fc1_w_entry[0], fc1_w[i1][i2])
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
Reported by Bandit.
.jenkins/pytorch/perf_test/compare_with_baseline.py
11 issues
Line: 1
Column: 1
import sys
import json
import math
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--test-name', dest='test_name', action='store',
required=True, help='test name')
parser.add_argument('--sample-stats', dest='sample_stats', action='store',
Reported by Pylint.
Line: 18
Column: 5
test_name = args.test_name
if 'cpu' in test_name:
backend = 'cpu'
elif 'gpu' in test_name:
backend = 'gpu'
data_file_path = '../{}_runtime.json'.format(backend)
Reported by Pylint.
Line: 20
Column: 5
if 'cpu' in test_name:
backend = 'cpu'
elif 'gpu' in test_name:
backend = 'gpu'
data_file_path = '../{}_runtime.json'.format(backend)
with open(data_file_path) as data_file:
data = json.load(data_file)
Reported by Pylint.
Line: 22
Column: 1
elif 'gpu' in test_name:
backend = 'gpu'
data_file_path = '../{}_runtime.json'.format(backend)
with open(data_file_path) as data_file:
data = json.load(data_file)
if test_name in data:
Reported by Pylint.
Line: 32
Column: 5
sigma = float(data[test_name]['sigma'])
else:
# Let the test pass if baseline number doesn't exist
mean = sys.maxsize
sigma = 0.001
print("population mean: ", mean)
print("population sigma: ", sigma)
Reported by Pylint.
Line: 33
Column: 5
else:
# Let the test pass if baseline number doesn't exist
mean = sys.maxsize
sigma = 0.001
print("population mean: ", mean)
print("population sigma: ", sigma)
# Let the test pass if baseline number is NaN (which happened in
Reported by Pylint.
Line: 41
Column: 5
# Let the test pass if baseline number is NaN (which happened in
# the past when we didn't have logic for catching NaN numbers)
if math.isnan(mean) or math.isnan(sigma):
mean = sys.maxsize
sigma = 0.001
sample_stats_data = json.loads(args.sample_stats)
sample_mean = float(sample_stats_data['mean'])
Reported by Pylint.
Line: 42
Column: 5
# the past when we didn't have logic for catching NaN numbers)
if math.isnan(mean) or math.isnan(sigma):
mean = sys.maxsize
sigma = 0.001
sample_stats_data = json.loads(args.sample_stats)
sample_mean = float(sample_stats_data['mean'])
sample_sigma = float(sample_stats_data['sigma'])
Reported by Pylint.
Line: 52
Column: 1
print("sample mean: ", sample_mean)
print("sample sigma: ", sample_sigma)
if math.isnan(sample_mean):
raise Exception('''Error: sample mean is NaN''')
elif math.isnan(sample_sigma):
raise Exception('''Error: sample sigma is NaN''')
z_value = (sample_mean - mean) / sigma
Reported by Pylint.
Line: 61
Column: 1
print("z-value: ", z_value)
if z_value >= 3:
raise Exception('''\n
z-value >= 3, there is high chance of perf regression.\n
To reproduce this regression, run
`cd .jenkins/pytorch/perf_test/ && bash {}.sh` on your local machine
and compare the runtime before/after your code change.
Reported by Pylint.
benchmarks/operator_benchmark/pt/configs.py
11 issues
Line: 12
Column: 25
return [config for config in config_list if cuda_config not in config]
# Configs for conv-1d ops
conv_1d_configs_short = op_bench.config_list(
attr_names=[
'IC', 'OC', 'kernel', 'stride', 'N', 'L'
],
attrs=[
[128, 256, 3, 1, 1, 64],
Reported by Pylint.
Line: 26
Column: 24
tags=['short']
)
conv_1d_configs_long = op_bench.cross_product_configs(
IC=[128, 512],
OC=[128, 512],
kernel=[3],
stride=[1, 2],
N=[8],
Reported by Pylint.
Line: 38
Column: 25
)
# Configs for Conv2d and ConvTranspose1d
conv_2d_configs_short = op_bench.config_list(
attr_names=[
'IC', 'OC', 'kernel', 'stride', 'N', 'H', 'W', 'G', 'pad',
],
attrs=[
[256, 256, 3, 1, 1, 16, 16, 1, 0],
Reported by Pylint.
Line: 51
Column: 24
tags=['short']
)
conv_2d_configs_long = op_bench.cross_product_configs(
IC=[128, 256],
OC=[128, 256],
kernel=[3],
stride=[1, 2],
N=[4],
Reported by Pylint.
Line: 66
Column: 25
)
# Configs for Conv3d and ConvTranspose3d
conv_3d_configs_short = op_bench.config_list(
attr_names=[
'IC', 'OC', 'kernel', 'stride', 'N', 'D', 'H', 'W'
],
attrs=[
[64, 64, 3, 1, 8, 4, 16, 16],
Reported by Pylint.
Line: 79
Column: 24
tags=['short']
)
linear_configs_short = op_bench.config_list(
attr_names=["N", "IN", "OUT"],
attrs=[
[1, 1, 1],
[4, 256, 128],
[16, 512, 256],
Reported by Pylint.
Line: 93
Column: 23
)
linear_configs_long = op_bench.cross_product_configs(
N=[32, 64],
IN=[128, 512],
OUT=[64, 128],
device=['cpu', 'cuda'],
tags=["long"]
Reported by Pylint.
Line: 101
Column: 30
tags=["long"]
)
embeddingbag_short_configs = op_bench.cross_product_configs(
embeddingbags=[10, 120, 1000, 2300],
dim=[64],
mode=['sum'],
input_size=[8, 16, 64],
offset=[0],
Reported by Pylint.
Line: 3
Column: 1
import operator_benchmark as op_bench
"""
Configs shared by multiple benchmarks
"""
def remove_cuda(config_list):
cuda_config = {'device': 'cuda'}
return [config for config in config_list if cuda_config not in config]
Reported by Pylint.
Line: 1
Column: 1
import operator_benchmark as op_bench
"""
Configs shared by multiple benchmarks
"""
def remove_cuda(config_list):
cuda_config = {'device': 'cuda'}
return [config for config in config_list if cuda_config not in config]
Reported by Pylint.
binaries/bench_gen/bench_gen.py
11 issues
Line: 6
Column: 1
import argparse
import ast
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
from caffe2.python import workspace, brew
def parse_kwarg(kwarg_str):
Reported by Pylint.
Line: 7
Column: 1
import ast
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
from caffe2.python import workspace, brew
def parse_kwarg(kwarg_str):
key, value = kwarg_str.split('=')
Reported by Pylint.
Line: 8
Column: 1
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
from caffe2.python import workspace, brew
def parse_kwarg(kwarg_str):
key, value = kwarg_str.split('=')
try:
Reported by Pylint.
Line: 20
Column: 10
return key, value
def main(args):
# User defined keyword arguments
kwargs = {"order": "NCHW", "use_cudnn": False}
kwargs.update(dict(args.kwargs))
model = ModelHelper(name=args.benchmark_name)
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python3
import argparse
import ast
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
from caffe2.python import workspace, brew
Reported by Pylint.
Line: 11
Column: 1
from caffe2.python import workspace, brew
def parse_kwarg(kwarg_str):
key, value = kwarg_str.split('=')
try:
value = ast.literal_eval(value)
except ValueError:
pass
Reported by Pylint.
Line: 20
Column: 1
return key, value
def main(args):
# User defined keyword arguments
kwargs = {"order": "NCHW", "use_cudnn": False}
kwargs.update(dict(args.kwargs))
model = ModelHelper(name=args.benchmark_name)
Reported by Pylint.
Line: 48
Column: 13
if args.debug:
print("init_net:")
for op in init_net.op:
print(" ", op.type, op.input, "-->", op.output)
print("predict_net:")
for op in predict_net.op:
print(" ", op.type, op.input, "-->", op.output)
Reported by Pylint.
Line: 51
Column: 13
for op in init_net.op:
print(" ", op.type, op.input, "-->", op.output)
print("predict_net:")
for op in predict_net.op:
print(" ", op.type, op.input, "-->", op.output)
with open(args.predict_net, 'wb') as f:
f.write(predict_net.SerializeToString())
with open(args.init_net, 'wb') as f:
Reported by Pylint.
Line: 54
Column: 42
for op in predict_net.op:
print(" ", op.type, op.input, "-->", op.output)
with open(args.predict_net, 'wb') as f:
f.write(predict_net.SerializeToString())
with open(args.init_net, 'wb') as f:
f.write(init_net.SerializeToString())
Reported by Pylint.
caffe2/python/mkl/mkl_squeeze_op_test.py
11 issues
Line: 7
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 8
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
Reported by Pylint.
Line: 16
Column: 9
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLSqueezeTest(hu.HypothesisTestCase):
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
Reported by Pylint.
Line: 24
Column: 55
inplace=st.booleans(),
**mu.gcs
)
def test_mkl_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
Reported by Pylint.
Line: 1
Column: 1
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
Reported by Pylint.
Line: 18
Column: 1
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLSqueezeTest(hu.HypothesisTestCase):
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
Reported by Pylint.
Line: 23
Column: 5
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_mkl_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
Reported by Pylint.
Line: 23
Column: 5
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_mkl_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
Reported by Pylint.
Line: 23
Column: 5
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_mkl_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
Reported by Pylint.
Line: 29
Column: 9
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
op = core.CreateOperator(
"Squeeze", "X", "X" if inplace else "Y", dims=squeeze_dims
)
self.assertDeviceChecks(dc, op, [X], [0])
Reported by Pylint.
.circleci/cimodel/data/simple/android_definitions.py
11 issues
Line: 1
Column: 1
import cimodel.data.simple.util.branch_filters as branch_filters
from cimodel.data.simple.util.docker_constants import (
DOCKER_IMAGE_NDK, DOCKER_REQUIREMENT_NDK
)
import cimodel.lib.miniutils as miniutils
class AndroidJob:
def __init__(self,
Reported by Pylint.
Line: 8
Column: 1
import cimodel.lib.miniutils as miniutils
class AndroidJob:
def __init__(self,
variant,
template_name,
is_master_only=True):
Reported by Pylint.
Line: 8
Column: 1
import cimodel.lib.miniutils as miniutils
class AndroidJob:
def __init__(self,
variant,
template_name,
is_master_only=True):
Reported by Pylint.
Line: 18
Column: 5
self.template_name = template_name
self.is_master_only = is_master_only
def gen_tree(self):
base_name_parts = [
"pytorch",
"linux",
"xenial",
Reported by Pylint.
Line: 44
Column: 1
}
if self.is_master_only:
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
return [{self.template_name: props_dict}]
class AndroidGradleJob:
Reported by Pylint.
Line: 49
Column: 1
return [{self.template_name: props_dict}]
class AndroidGradleJob:
def __init__(self,
job_name,
template_name,
dependencies,
is_master_only=True,
Reported by Pylint.
Line: 49
Column: 1
return [{self.template_name: props_dict}]
class AndroidGradleJob:
def __init__(self,
job_name,
template_name,
dependencies,
is_master_only=True,
Reported by Pylint.
Line: 50
Column: 5
class AndroidGradleJob:
def __init__(self,
job_name,
template_name,
dependencies,
is_master_only=True,
is_pr_only=False,
Reported by Pylint.
Line: 65
Column: 5
self.is_pr_only = is_pr_only
self.extra_props = dict(extra_props)
def gen_tree(self):
props_dict = {
"name": self.job_name,
"requires": self.dependencies,
}
Reported by Pylint.
Line: 73
Column: 1
}
if self.is_master_only:
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.NON_PR_BRANCH_LIST)
elif self.is_pr_only:
props_dict["filters"] = branch_filters.gen_filter_dict(branch_filters.PR_BRANCH_LIST)
if self.extra_props:
props_dict.update(self.extra_props)
Reported by Pylint.