The following issues were found
samples/java/tutorial_code/TrackingMotion/corner_subpixels/CornerSubPixDemo.java
24 issues
Line: 41
String filename = args.length > 0 ? args[0] : "../data/pic3.png";
src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY);
Reported by PMD.
Line: 109
blockSize, gradientSize, useHarrisDetector, k);
/// Draw corners detected
System.out.println("** Number of corners detected: " + corners.rows());
int[] cornersData = new int[(int) (corners.total() * corners.channels())];
corners.get(0, 0, cornersData);
int radius = 4;
Mat matCorners = new Mat(corners.rows(), 2, CvType.CV_32F);
float[] matCornersData = new float[(int) (matCorners.total() * matCorners.channels())];
Reported by PMD.
Line: 138
/// Write them down
matCorners.get(0, 0, matCornersData);
for (int i = 0; i < corners.rows(); i++) {
System.out.println(
" -- Refined Corner [" + i + "] (" + matCornersData[i * 2] + "," + matCornersData[i * 2 + 1] + ")");
}
}
}
Reported by PMD.
Line: 28
import org.opencv.imgproc.Imgproc;
class CornerSubPix {
private Mat src = new Mat();
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgLabel;
private static final int MAX_CORNERS = 25;
private int maxCorners = 10;
Reported by PMD.
Line: 28
import org.opencv.imgproc.Imgproc;
class CornerSubPix {
private Mat src = new Mat();
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgLabel;
private static final int MAX_CORNERS = 25;
private int maxCorners = 10;
Reported by PMD.
Line: 28
import org.opencv.imgproc.Imgproc;
class CornerSubPix {
private Mat src = new Mat();
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgLabel;
private static final int MAX_CORNERS = 25;
private int maxCorners = 10;
Reported by PMD.
Line: 29
class CornerSubPix {
private Mat src = new Mat();
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgLabel;
private static final int MAX_CORNERS = 25;
private int maxCorners = 10;
private Random rng = new Random(12345);
Reported by PMD.
Line: 29
class CornerSubPix {
private Mat src = new Mat();
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgLabel;
private static final int MAX_CORNERS = 25;
private int maxCorners = 10;
private Random rng = new Random(12345);
Reported by PMD.
Line: 30
class CornerSubPix {
private Mat src = new Mat();
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgLabel;
private static final int MAX_CORNERS = 25;
private int maxCorners = 10;
private Random rng = new Random(12345);
Reported by PMD.
Line: 30
class CornerSubPix {
private Mat src = new Mat();
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgLabel;
private static final int MAX_CORNERS = 25;
private int maxCorners = 10;
private Random rng = new Random(12345);
Reported by PMD.
samples/python/tutorial_code/TrackingMotion/corner_subpixels/cornerSubPix_Demo.py
24 issues
Line: 2
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
Reported by Pylint.
Line: 12
Column: 5
rng.seed(12345)
def goodFeaturesToTrack_Demo(val):
maxCorners = max(val, 1)
# Parameters for Shi-Tomasi algorithm
qualityLevel = 0.01
minDistance = 10
blockSize = 3
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
Reported by Pylint.
Line: 4
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
Reported by Pylint.
Line: 5
Column: 1
import cv2 as cv
import numpy as np
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
Reported by Pylint.
Line: 7
Column: 1
import argparse
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
def goodFeaturesToTrack_Demo(val):
maxCorners = max(val, 1)
Reported by Pylint.
Line: 8
Column: 1
import random as rng
source_window = 'Image'
maxTrackbar = 25
rng.seed(12345)
def goodFeaturesToTrack_Demo(val):
maxCorners = max(val, 1)
Reported by Pylint.
Line: 11
Column: 1
maxTrackbar = 25
rng.seed(12345)
def goodFeaturesToTrack_Demo(val):
maxCorners = max(val, 1)
# Parameters for Shi-Tomasi algorithm
qualityLevel = 0.01
minDistance = 10
Reported by Pylint.
Line: 11
Column: 1
maxTrackbar = 25
rng.seed(12345)
def goodFeaturesToTrack_Demo(val):
maxCorners = max(val, 1)
# Parameters for Shi-Tomasi algorithm
qualityLevel = 0.01
minDistance = 10
Reported by Pylint.
modules/core/src/opencl/runtime/generator/parser_clfft.py
24 issues
Line: 5
Column: 1
# usage:
# cat clFFT.h | $0
from __future__ import print_function
import sys, re;
from common import remove_comments, getTokens, getParameters, postProcessParameters
try:
Reported by Pylint.
Line: 15
Column: 1
f = open(sys.argv[1], "r")
else:
f = sys.stdin
except:
sys.exit("ERROR. Can't open input file")
fns = []
while True:
Reported by Pylint.
Line: 33
Column: 1
nl = nl.strip()
nl = re.sub(r'\n', r'', nl)
if len(nl) == 0:
break;
line += ' ' + nl
line = remove_comments(line)
parts = getTokens(line)
Reported by Pylint.
Line: 65
Column: 1
fn['ret'] = ret
fn['calling'] = calling
name = parts[i]; i += 1;
fn['name'] = name
print('name=' + name)
params = getParameters(i, parts)
Reported by Pylint.
Line: 88
Column: 1
from pprint import pprint
pprint(fns)
from common import *
filterFileName='./filter/opencl_clfft_functions.list'
numEnabled = readFunctionFilter(fns, filterFileName)
functionsFilter = generateFilterNames(fns)
Reported by Pylint.
Line: 88
Column: 1
from pprint import pprint
pprint(fns)
from common import *
filterFileName='./filter/opencl_clfft_functions.list'
numEnabled = readFunctionFilter(fns, filterFileName)
functionsFilter = generateFilterNames(fns)
Reported by Pylint.
Line: 88
Column: 1
from pprint import pprint
pprint(fns)
from common import *
filterFileName='./filter/opencl_clfft_functions.list'
numEnabled = readFunctionFilter(fns, filterFileName)
functionsFilter = generateFilterNames(fns)
Reported by Pylint.
Line: 88
Column: 1
from pprint import pprint
pprint(fns)
from common import *
filterFileName='./filter/opencl_clfft_functions.list'
numEnabled = readFunctionFilter(fns, filterFileName)
functionsFilter = generateFilterNames(fns)
Reported by Pylint.
Line: 88
Column: 1
from pprint import pprint
pprint(fns)
from common import *
filterFileName='./filter/opencl_clfft_functions.list'
numEnabled = readFunctionFilter(fns, filterFileName)
functionsFilter = generateFilterNames(fns)
Reported by Pylint.
Line: 88
Column: 1
from pprint import pprint
pprint(fns)
from common import *
filterFileName='./filter/opencl_clfft_functions.list'
numEnabled = readFunctionFilter(fns, filterFileName)
functionsFilter = generateFilterNames(fns)
Reported by Pylint.
samples/dnn/human_parsing.py
24 issues
Line: 45
Column: 1
import argparse
import os.path
import numpy as np
import cv2 as cv
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA)
targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD,
Reported by Pylint.
Line: 54
Column: 16
cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16)
def preprocess(image):
"""
Create 4-dimensional blob from image and flip image
:param image: input image
"""
image_rev = np.flip(image, axis=1)
Reported by Pylint.
Line: 60
Column: 5
:param image: input image
"""
image_rev = np.flip(image, axis=1)
input = cv.dnn.blobFromImages([image, image_rev], mean=(104.00698793, 116.66876762, 122.67891434))
return input
def run_net(input, model_path, backend, target):
"""
Reported by Pylint.
Line: 64
Column: 13
return input
def run_net(input, model_path, backend, target):
"""
Read network and infer model
:param model_path: path to JPPNet model
:param backend: computation backend
:param target: computation device
Reported by Pylint.
Line: 106
Column: 5
# 17 RightLeg
# 18 LeftShoe
# 19 RightShoe
head_output, tail_output = np.split(out, indices_or_sections=[1], axis=0)
head_output = head_output.squeeze(0)
tail_output = tail_output.squeeze(0)
head_output = np.stack([cv.resize(img, dsize=input_shape) for img in head_output[:, ...]])
tail_output = np.stack([cv.resize(img, dsize=input_shape) for img in tail_output[:, ...]])
Reported by Pylint.
Line: 142
Column: 17
return segm
def parse_human(image, model_path, backend=cv.dnn.DNN_BACKEND_OPENCV, target=cv.dnn.DNN_TARGET_CPU):
"""
Prepare input for execution, run net and postprocess output to parse human.
:param image: input image
:param model_path: path to JPPNet model
:param backend: name of computation backend
Reported by Pylint.
Line: 150
Column: 5
:param backend: name of computation backend
:param target: name of computation target
"""
input = preprocess(image)
input_h, input_w = input.shape[2:]
output = run_net(input, model_path, backend, target)
grayscale_out = postprocess(output, (input_w, input_h))
segmentation = decode_labels(grayscale_out)
return segmentation
Reported by Pylint.
Line: 152
Column: 5
"""
input = preprocess(image)
input_h, input_w = input.shape[2:]
output = run_net(input, model_path, backend, target)
grayscale_out = postprocess(output, (input_w, input_h))
segmentation = decode_labels(grayscale_out)
return segmentation
Reported by Pylint.
Line: 3
Column: 1
#!/usr/bin/env python
'''
You can download the converted pb model from https://www.dropbox.com/s/qag9vzambhhkvxr/lip_jppnet_384.pb?dl=0
or convert the model yourself.
Follow these steps if you want to convert the original model yourself:
To get original .meta pre-trained model download https://drive.google.com/file/d/1BFVXgeln-bek8TCbRjN6utPAgRE0LJZg/view
For correct convert .meta to .pb model download original repository https://github.com/Engineering-Course/LIP_JPPNet
Change script evaluate_parsing_JPPNet-s2.py for human parsing
Reported by Pylint.
Line: 7
Column: 1
or convert the model yourself.
Follow these steps if you want to convert the original model yourself:
To get original .meta pre-trained model download https://drive.google.com/file/d/1BFVXgeln-bek8TCbRjN6utPAgRE0LJZg/view
For correct convert .meta to .pb model download original repository https://github.com/Engineering-Course/LIP_JPPNet
Change script evaluate_parsing_JPPNet-s2.py for human parsing
1. Remove preprocessing to create image_batch_origin:
with tf.name_scope("create_inputs"):
...
Reported by Pylint.
samples/java/tutorial_code/Histograms_Matching/back_projection/CalcBackProjectDemo1.java
24 issues
Line: 40
public CalcBackProject1(String[] args) {
//! [Read the image]
if (args.length != 1) {
System.err.println("You must supply one argument that corresponds to the path to the image.");
System.exit(0);
}
Mat src = Imgcodecs.imread(args[0]);
if (src.empty()) {
Reported by PMD.
Line: 46
Mat src = Imgcodecs.imread(args[0]);
if (src.empty()) {
System.err.println("Empty image: " + args[0]);
System.exit(0);
}
//! [Read the image]
//! [Transform it to HSV]
Reported by PMD.
Line: 28
import org.opencv.imgproc.Imgproc;
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
Reported by PMD.
Line: 28
import org.opencv.imgproc.Imgproc;
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
Reported by PMD.
Line: 29
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
Reported by PMD.
Line: 29
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
Reported by PMD.
Line: 30
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
private int bins = 25;
Reported by PMD.
Line: 30
class CalcBackProject1 {
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
private int bins = 25;
Reported by PMD.
Line: 31
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
private int bins = 25;
Reported by PMD.
Line: 31
private Mat hue;
private Mat histImg = new Mat();
private JFrame frame;
private JLabel imgLabel;
private JLabel backprojLabel;
private JLabel histImgLabel;
private static final int MAX_SLIDER = 180;
private int bins = 25;
Reported by PMD.
samples/python/tutorial_code/features2D/Homography/panorama_stitching_rotating_camera.py
24 issues
Line: 8
Column: 1
from __future__ import print_function
import numpy as np
import cv2 as cv
def basicPanoramaStitching(img1Path, img2Path):
img1 = cv.imread(cv.samples.findFile(img1Path))
img2 = cv.imread(cv.samples.findFile(img2Path))
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import cv2 as cv
def basicPanoramaStitching(img1Path, img2Path):
img1 = cv.imread(cv.samples.findFile(img1Path))
img2 = cv.imread(cv.samples.findFile(img2Path))
# [camera-pose-from-Blender-at-location-1]
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import cv2 as cv
def basicPanoramaStitching(img1Path, img2Path):
img1 = cv.imread(cv.samples.findFile(img1Path))
img2 = cv.imread(cv.samples.findFile(img2Path))
# [camera-pose-from-Blender-at-location-1]
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import cv2 as cv
def basicPanoramaStitching(img1Path, img2Path):
img1 = cv.imread(cv.samples.findFile(img1Path))
img2 = cv.imread(cv.samples.findFile(img2Path))
# [camera-pose-from-Blender-at-location-1]
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
Reported by Pylint.
Line: 10
Column: 1
import numpy as np
import cv2 as cv
def basicPanoramaStitching(img1Path, img2Path):
img1 = cv.imread(cv.samples.findFile(img1Path))
img2 = cv.imread(cv.samples.findFile(img2Path))
# [camera-pose-from-Blender-at-location-1]
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
Reported by Pylint.
Line: 15
Column: 5
img2 = cv.imread(cv.samples.findFile(img2Path))
# [camera-pose-from-Blender-at-location-1]
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
[ 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
[-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
[0, 0, 0, 1]],dtype=np.float64)
# [camera-pose-from-Blender-at-location-1]
Reported by Pylint.
Line: 16
Column: 1
# [camera-pose-from-Blender-at-location-1]
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
[ 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
[-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
[0, 0, 0, 1]],dtype=np.float64)
# [camera-pose-from-Blender-at-location-1]
# [camera-pose-from-Blender-at-location-2]
Reported by Pylint.
Line: 17
Column: 1
# [camera-pose-from-Blender-at-location-1]
c1Mo = np.array([[0.9659258723258972, 0.2588190734386444, 0.0, 1.5529145002365112],
[ 0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
[-0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
[0, 0, 0, 1]],dtype=np.float64)
# [camera-pose-from-Blender-at-location-1]
# [camera-pose-from-Blender-at-location-2]
c2Mo = np.array([[0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112],
Reported by Pylint.
Line: 22
Column: 5
# [camera-pose-from-Blender-at-location-1]
# [camera-pose-from-Blender-at-location-2]
c2Mo = np.array([[0.9659258723258972, -0.2588190734386444, 0.0, -1.5529145002365112],
[-0.08852133899927139, -0.3303661346435547, -0.9396926164627075, -0.10281121730804443],
[0.24321036040782928, 0.9076734185218811, -0.342020183801651, 6.130080699920654],
[0, 0, 0, 1]],dtype=np.float64)
# [camera-pose-from-Blender-at-location-2]
Reported by Pylint.
modules/python/test/test_umat.py
23 issues
Line: 5
Column: 1
from __future__ import print_function
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests
def load_exposure_seq(path):
Reported by Pylint.
Line: 12
Column: 1
from tests_common import NewOpenCVTests
def load_exposure_seq(path):
images = []
times = []
with open(os.path.join(path, 'list.txt'), 'r') as list_file:
for line in list_file.readlines():
name, time = line.split()
Reported by Pylint.
Line: 23
Column: 1
return images, times
class UMat(NewOpenCVTests):
def test_umat_construct(self):
data = np.random.random([512, 512])
# UMat constructors
data_um = cv.UMat(data) # from ndarray
Reported by Pylint.
Line: 25
Column: 5
class UMat(NewOpenCVTests):
def test_umat_construct(self):
data = np.random.random([512, 512])
# UMat constructors
data_um = cv.UMat(data) # from ndarray
data_sub_um = cv.UMat(data_um, (128, 256), (128, 256)) # from UMat
data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type
Reported by Pylint.
Line: 25
Column: 5
class UMat(NewOpenCVTests):
def test_umat_construct(self):
data = np.random.random([512, 512])
# UMat constructors
data_um = cv.UMat(data) # from ndarray
data_sub_um = cv.UMat(data_um, (128, 256), (128, 256)) # from UMat
data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type
Reported by Pylint.
Line: 32
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
data_sub_um = cv.UMat(data_um, (128, 256), (128, 256)) # from UMat
data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type
# test continuous and submatrix flags
assert data_um.isContinuous() and not data_um.isSubmatrix()
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
# test operation on submatrix
cv.multiply(data_sub_um, 2., dst=data_dst_um)
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
Reported by Bandit.
Line: 33
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
data_dst_um = cv.UMat(128, 128, cv.CV_64F) # from size/type
# test continuous and submatrix flags
assert data_um.isContinuous() and not data_um.isSubmatrix()
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
# test operation on submatrix
cv.multiply(data_sub_um, 2., dst=data_dst_um)
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
def test_umat_handle(self):
Reported by Bandit.
Line: 36
Suggestion:
https://bandit.readthedocs.io/en/latest/plugins/b101_assert_used.html
assert not data_sub_um.isContinuous() and data_sub_um.isSubmatrix()
# test operation on submatrix
cv.multiply(data_sub_um, 2., dst=data_dst_um)
assert np.allclose(2. * data[128:256, 128:256], data_dst_um.get())
def test_umat_handle(self):
a_um = cv.UMat(256, 256, cv.CV_32F)
_ctx_handle = cv.UMat.context() # obtain context handle
_queue_handle = cv.UMat.queue() # obtain queue handle
Reported by Bandit.
samples/python/gaussian_mix.py
23 issues
Line: 12
Column: 1
xrange = range
import numpy as np
import cv2 as cv
from numpy import random
def make_gaussians(cluster_n, img_size):
points = []
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
Reported by Pylint.
Line: 9
Column: 5
PY3 = sys.version_info[0] == 3
if PY3:
xrange = range
import numpy as np
import cv2 as cv
from numpy import random
Reported by Pylint.
Line: 11
Column: 1
if PY3:
xrange = range
import numpy as np
import cv2 as cv
from numpy import random
def make_gaussians(cluster_n, img_size):
Reported by Pylint.
Line: 12
Column: 1
xrange = range
import numpy as np
import cv2 as cv
from numpy import random
def make_gaussians(cluster_n, img_size):
points = []
Reported by Pylint.
Line: 14
Column: 1
import numpy as np
import cv2 as cv
from numpy import random
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for _i in xrange(cluster_n):
Reported by Pylint.
Line: 16
Column: 1
from numpy import random
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
for _i in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
Reported by Pylint.
Line: 21
Column: 9
ref_distrs = []
for _i in xrange(cluster_n):
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
Reported by Pylint.
Line: 23
Column: 9
mean = (0.1 + 0.8*random.rand(2)) * img_size
a = (random.rand(2, 2)-0.5)*img_size*0.1
cov = np.dot(a.T, a) + img_size*0.05*np.eye(2)
n = 100 + random.randint(900)
pts = random.multivariate_normal(mean, cov, n)
points.append( pts )
ref_distrs.append( (mean, cov) )
points = np.float32( np.vstack(points) )
return points, ref_distrs
Reported by Pylint.
Line: 30
Column: 1
points = np.float32( np.vstack(points) )
return points, ref_distrs
def draw_gaussain(img, mean, cov, color):
x, y = mean
w, u, _vt = cv.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv.ellipse(img, (int(x), int(y)), (int(s1), int(s2)), ang, 0, 360, color, 1, cv.LINE_AA)
Reported by Pylint.
samples/dnn/mobilenet_ssd_accuracy.py
23 issues
Line: 13
Column: 1
#
# Tested on COCO 2017 object detection dataset, http://cocodataset.org/#download
import os
import cv2 as cv
import json
import argparse
parser = argparse.ArgumentParser(
description='Evaluate MobileNet-SSD model using both TensorFlow and OpenCV. '
Reported by Pylint.
Line: 62
Column: 1
json.dump(detections, f)
### Get TensorFlow predictions #################################################
import tensorflow as tf
with tf.gfile.FastGFile(args.weights) as f:
# Load the model
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
Reported by Pylint.
Line: 110
Column: 1
# %matplotlib inline
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
Reported by Pylint.
Line: 111
Column: 1
# %matplotlib inline
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
Reported by Pylint.
Line: 113
Column: 1
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
annType = ['segm','bbox','keypoints']
annType = annType[1] #specify type here
Reported by Pylint.
Line: 109
Column: 1
### Evaluation part ############################################################
# %matplotlib inline
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab
Reported by Pylint.
Line: 112
Column: 1
import matplotlib.pyplot as plt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
annType = ['segm','bbox','keypoints']
Reported by Pylint.
Line: 113
Column: 1
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
annType = ['segm','bbox','keypoints']
annType = annType[1] #specify type here
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
# Script to evaluate MobileNet-SSD object detection model trained in TensorFlow
# using both TensorFlow and OpenCV. Example:
#
# python mobilenet_ssd_accuracy.py \
# --weights=frozen_inference_graph.pb \
# --prototxt=ssd_mobilenet_v1_coco.pbtxt \
# --images=val2017 \
# --annotations=annotations/instances_val2017.json
Reported by Pylint.
Line: 14
Column: 1
# Tested on COCO 2017 object detection dataset, http://cocodataset.org/#download
import os
import cv2 as cv
import json
import argparse
parser = argparse.ArgumentParser(
description='Evaluate MobileNet-SSD model using both TensorFlow and OpenCV. '
'COCO evaluation framework is required: http://cocodataset.org')
Reported by Pylint.
modules/core/misc/java/test/ScalarTest.java
23 issues
Line: 6
import org.opencv.core.Scalar;
import org.opencv.test.OpenCVTestCase;
public class ScalarTest extends OpenCVTestCase {
private Scalar dstScalar;
private Scalar s1;
private Scalar s2;
Reported by PMD.
Line: 8
public class ScalarTest extends OpenCVTestCase {
private Scalar dstScalar;
private Scalar s1;
private Scalar s2;
@Override
protected void setUp() throws Exception {
Reported by PMD.
Line: 9
public class ScalarTest extends OpenCVTestCase {
private Scalar dstScalar;
private Scalar s1;
private Scalar s2;
@Override
protected void setUp() throws Exception {
super.setUp();
Reported by PMD.
Line: 10
private Scalar dstScalar;
private Scalar s1;
private Scalar s2;
@Override
protected void setUp() throws Exception {
super.setUp();
Reported by PMD.
Line: 12
private Scalar s1;
private Scalar s2;
@Override
protected void setUp() throws Exception {
super.setUp();
s1 = new Scalar(1.0);
s2 = Scalar.all(1.0);
Reported by PMD.
Line: 18
s1 = new Scalar(1.0);
s2 = Scalar.all(1.0);
dstScalar = null;
}
public void testAll() {
dstScalar = Scalar.all(2.0);
Scalar truth = new Scalar(2.0, 2.0, 2.0, 2.0);
Reported by PMD.
Line: 21
dstScalar = null;
}
public void testAll() {
dstScalar = Scalar.all(2.0);
Scalar truth = new Scalar(2.0, 2.0, 2.0, 2.0);
assertEquals(truth, dstScalar);
}
Reported by PMD.
Line: 27
assertEquals(truth, dstScalar);
}
public void testClone() {
dstScalar = s2.clone();
assertEquals(s2, dstScalar);
}
public void testConj() {
Reported by PMD.
Line: 32
assertEquals(s2, dstScalar);
}
public void testConj() {
dstScalar = s2.conj();
Scalar truth = new Scalar(1, -1, -1, -1);
assertEquals(truth, dstScalar);
}
Reported by PMD.
Line: 38
assertEquals(truth, dstScalar);
}
public void testEqualsObject() {
dstScalar = s2.clone();
assertTrue(s2.equals(dstScalar));
assertFalse(s2.equals(s1));
}
Reported by PMD.