The following issues were found
samples/python/morphology.py
28 issues
Line: 21
Column: 1
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
def main():
import sys
from itertools import cycle
Reported by Pylint.
Line: 49
Column: 20
cur_mode = next(modes)
cur_str_mode = next(str_modes)
else:
cur_mode = modes.next()
cur_str_mode = str_modes.next()
def update(dummy=None):
try: # do not get trackbar position while trackbar is not created
sz = cv.getTrackbarPos('op/size', 'morphology')
Reported by Pylint.
Line: 50
Column: 24
cur_str_mode = next(str_modes)
else:
cur_mode = modes.next()
cur_str_mode = str_modes.next()
def update(dummy=None):
try: # do not get trackbar position while trackbar is not created
sz = cv.getTrackbarPos('op/size', 'morphology')
iters = cv.getTrackbarPos('iters', 'morphology')
Reported by Pylint.
Line: 90
Column: 28
if PY3:
cur_mode = next(modes)
else:
cur_mode = modes.next()
if ch == ord('2'):
if PY3:
cur_str_mode = next(str_modes)
else:
cur_str_mode = str_modes.next()
Reported by Pylint.
Line: 95
Column: 32
if PY3:
cur_str_mode = next(str_modes)
else:
cur_str_mode = str_modes.next()
update()
print('Done')
Reported by Pylint.
Line: 20
Column: 1
import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
def main():
import sys
Reported by Pylint.
Line: 25
Column: 5
def main():
import sys
from itertools import cycle
from common import draw_str
try:
fn = sys.argv[1]
Reported by Pylint.
Line: 25
Column: 5
def main():
import sys
from itertools import cycle
from common import draw_str
try:
fn = sys.argv[1]
Reported by Pylint.
Line: 31
Column: 5
try:
fn = sys.argv[1]
except:
fn = 'baboon.jpg'
img = cv.imread(cv.samples.findFile(fn))
if img is None:
Reported by Pylint.
Line: 52
Column: 16
cur_mode = modes.next()
cur_str_mode = str_modes.next()
def update(dummy=None):
try: # do not get trackbar position while trackbar is not created
sz = cv.getTrackbarPos('op/size', 'morphology')
iters = cv.getTrackbarPos('iters', 'morphology')
except:
return
Reported by Pylint.
samples/java/tutorial_code/ShapeDescriptors/bounding_rects_circles/GeneralContoursDemo1.java
28 issues
Line: 45
String filename = args.length > 0 ? args[0] : "../data/stuff.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
/// Convert image to gray and blur it
Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY);
Reported by PMD.
Line: 31
import org.opencv.imgproc.Imgproc;
class GeneralContours1 {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
Reported by PMD.
Line: 31
import org.opencv.imgproc.Imgproc;
class GeneralContours1 {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
Reported by PMD.
Line: 32
class GeneralContours1 {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 32
class GeneralContours1 {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 33
class GeneralContours1 {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 33
class GeneralContours1 {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 34
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
public GeneralContours1(String[] args) {
Reported by PMD.
Line: 36
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
public GeneralContours1(String[] args) {
//! [setup]
/// Load source image
Reported by PMD.
Line: 37
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
public GeneralContours1(String[] args) {
//! [setup]
/// Load source image
String filename = args.length > 0 ? args[0] : "../data/stuff.jpg";
Reported by PMD.
modules/highgui/misc/java/src/java/highgui+HighGui.java
28 issues
Line: 46
public static void imshow(String winname, Mat img) {
if (img.empty()) {
System.err.println("Error: Empty image in imshow");
System.exit(-1);
} else {
ImageWindow tmpWindow = windows.get(winname);
if (tmpWindow == null) {
ImageWindow newWin = new ImageWindow(winname, img);
Reported by PMD.
Line: 121
// If there are no windows to be shown return
if (windows.isEmpty()) {
System.err.println("Error: waitKey must be used after an imshow");
System.exit(-1);
}
// Remove the unused windows
Iterator<Map.Entry<String,
Reported by PMD.
Line: 153
win.lbl.setIcon(icon);
}
} else {
System.err.println("Error: no imshow associated with" + " namedWindow: \"" + win.name + "\"");
System.exit(-1);
}
}
try {
Reported by PMD.
Line: 21
* This class was designed for use in Java applications
* to recreate the OpenCV HighGui functionalities.
*/
public final class HighGui {
// Constants for namedWindow
public final static int WINDOW_NORMAL = ImageWindow.WINDOW_NORMAL;
public final static int WINDOW_AUTOSIZE = ImageWindow.WINDOW_AUTOSIZE;
Reported by PMD.
Line: 21
* This class was designed for use in Java applications
* to recreate the OpenCV HighGui functionalities.
*/
public final class HighGui {
// Constants for namedWindow
public final static int WINDOW_NORMAL = ImageWindow.WINDOW_NORMAL;
public final static int WINDOW_AUTOSIZE = ImageWindow.WINDOW_AUTOSIZE;
Reported by PMD.
Line: 21
* This class was designed for use in Java applications
* to recreate the OpenCV HighGui functionalities.
*/
public final class HighGui {
// Constants for namedWindow
public final static int WINDOW_NORMAL = ImageWindow.WINDOW_NORMAL;
public final static int WINDOW_AUTOSIZE = ImageWindow.WINDOW_AUTOSIZE;
Reported by PMD.
Line: 21
* This class was designed for use in Java applications
* to recreate the OpenCV HighGui functionalities.
*/
public final class HighGui {
// Constants for namedWindow
public final static int WINDOW_NORMAL = ImageWindow.WINDOW_NORMAL;
public final static int WINDOW_AUTOSIZE = ImageWindow.WINDOW_AUTOSIZE;
Reported by PMD.
Line: 28
public final static int WINDOW_AUTOSIZE = ImageWindow.WINDOW_AUTOSIZE;
// Control Variables
public static int n_closed_windows = 0;
public static int pressedKey = -1;
public static CountDownLatch latch = new CountDownLatch(1);
// Windows Map
public static Map<String, ImageWindow> windows = new HashMap<String, ImageWindow>();
Reported by PMD.
Line: 47
public static void imshow(String winname, Mat img) {
if (img.empty()) {
System.err.println("Error: Empty image in imshow");
System.exit(-1);
} else {
ImageWindow tmpWindow = windows.get(winname);
if (tmpWindow == null) {
ImageWindow newWin = new ImageWindow(winname, img);
windows.put(winname, newWin);
Reported by PMD.
Line: 54
ImageWindow newWin = new ImageWindow(winname, img);
windows.put(winname, newWin);
} else {
tmpWindow.setMat(img);
}
}
}
public static Image toBufferedImage(Mat m) {
Reported by PMD.
samples/dnn/openpose.py
28 issues
Line: 3
Column: 1
# To use Inference Engine backend, specify location of plugins:
# source /opt/intel/computer_vision_sdk/bin/setupvars.sh
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(
description='This script is used to demonstrate OpenPose human pose estimation network '
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. '
Reported by Pylint.
Line: 4
Column: 1
# To use Inference Engine backend, specify location of plugins:
# source /opt/intel/computer_vision_sdk/bin/setupvars.sh
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(
description='This script is used to demonstrate OpenPose human pose estimation network '
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. '
Reported by Pylint.
Line: 1
Column: 1
# To use Inference Engine backend, specify location of plugins:
# source /opt/intel/computer_vision_sdk/bin/setupvars.sh
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(
description='This script is used to demonstrate OpenPose human pose estimation network '
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. '
Reported by Pylint.
Line: 5
Column: 1
# source /opt/intel/computer_vision_sdk/bin/setupvars.sh
import cv2 as cv
import numpy as np
import argparse
parser = argparse.ArgumentParser(
description='This script is used to demonstrate OpenPose human pose estimation network '
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. '
'The sample and model are simplified and could be used for a single person on the frame.')
Reported by Pylint.
Line: 9
Column: 1
parser = argparse.ArgumentParser(
description='This script is used to demonstrate OpenPose human pose estimation network '
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. '
'The sample and model are simplified and could be used for a single person on the frame.')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--proto', help='Path to .prototxt')
parser.add_argument('--model', help='Path to .caffemodel')
parser.add_argument('--dataset', help='Specify what kind of model was trained. '
Reported by Pylint.
Line: 10
Column: 1
parser = argparse.ArgumentParser(
description='This script is used to demonstrate OpenPose human pose estimation network '
'from https://github.com/CMU-Perceptual-Computing-Lab/openpose project using OpenCV. '
'The sample and model are simplified and could be used for a single person on the frame.')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--proto', help='Path to .prototxt')
parser.add_argument('--model', help='Path to .caffemodel')
parser.add_argument('--dataset', help='Specify what kind of model was trained. '
'It could be (COCO, MPI, HAND) depends on dataset.')
Reported by Pylint.
Line: 16
Column: 1
parser.add_argument('--model', help='Path to .caffemodel')
parser.add_argument('--dataset', help='Specify what kind of model was trained. '
'It could be (COCO, MPI, HAND) depends on dataset.')
parser.add_argument('--thr', default=0.1, type=float, help='Threshold value for pose parts heat map')
parser.add_argument('--width', default=368, type=int, help='Resize input to specific width.')
parser.add_argument('--height', default=368, type=int, help='Resize input to specific height.')
parser.add_argument('--scale', default=0.003922, type=float, help='Scale for blob.')
args = parser.parse_args()
Reported by Pylint.
Line: 47
Column: 1
elif args.dataset == 'HAND':
BODY_PARTS = { "Wrist": 0,
"ThumbMetacarpal": 1, "ThumbProximal": 2, "ThumbMiddle": 3, "ThumbDistal": 4,
"IndexFingerMetacarpal": 5, "IndexFingerProximal": 6, "IndexFingerMiddle": 7, "IndexFingerDistal": 8,
"MiddleFingerMetacarpal": 9, "MiddleFingerProximal": 10, "MiddleFingerMiddle": 11, "MiddleFingerDistal": 12,
"RingFingerMetacarpal": 13, "RingFingerProximal": 14, "RingFingerMiddle": 15, "RingFingerDistal": 16,
"LittleFingerMetacarpal": 17, "LittleFingerProximal": 18, "LittleFingerMiddle": 19, "LittleFingerDistal": 20,
}
Reported by Pylint.
Line: 48
Column: 1
BODY_PARTS = { "Wrist": 0,
"ThumbMetacarpal": 1, "ThumbProximal": 2, "ThumbMiddle": 3, "ThumbDistal": 4,
"IndexFingerMetacarpal": 5, "IndexFingerProximal": 6, "IndexFingerMiddle": 7, "IndexFingerDistal": 8,
"MiddleFingerMetacarpal": 9, "MiddleFingerProximal": 10, "MiddleFingerMiddle": 11, "MiddleFingerDistal": 12,
"RingFingerMetacarpal": 13, "RingFingerProximal": 14, "RingFingerMiddle": 15, "RingFingerDistal": 16,
"LittleFingerMetacarpal": 17, "LittleFingerProximal": 18, "LittleFingerMiddle": 19, "LittleFingerDistal": 20,
}
POSE_PAIRS = [ ["Wrist", "ThumbMetacarpal"], ["ThumbMetacarpal", "ThumbProximal"],
Reported by Pylint.
Line: 49
Column: 1
"ThumbMetacarpal": 1, "ThumbProximal": 2, "ThumbMiddle": 3, "ThumbDistal": 4,
"IndexFingerMetacarpal": 5, "IndexFingerProximal": 6, "IndexFingerMiddle": 7, "IndexFingerDistal": 8,
"MiddleFingerMetacarpal": 9, "MiddleFingerProximal": 10, "MiddleFingerMiddle": 11, "MiddleFingerDistal": 12,
"RingFingerMetacarpal": 13, "RingFingerProximal": 14, "RingFingerMiddle": 15, "RingFingerDistal": 16,
"LittleFingerMetacarpal": 17, "LittleFingerProximal": 18, "LittleFingerMiddle": 19, "LittleFingerDistal": 20,
}
POSE_PAIRS = [ ["Wrist", "ThumbMetacarpal"], ["ThumbMetacarpal", "ThumbProximal"],
["ThumbProximal", "ThumbMiddle"], ["ThumbMiddle", "ThumbDistal"],
Reported by Pylint.
samples/python/tutorial_code/imgProc/erosion_dilatation/morphology_1.py
28 issues
Line: 2
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
src = None
erosion_size = 0
max_elem = 2
max_kernel_size = 21
Reported by Pylint.
Line: 3
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
src = None
erosion_size = 0
max_elem = 2
max_kernel_size = 21
Reported by Pylint.
Line: 18
Column: 5
## [main]
def main(image):
global src
src = cv.imread(cv.samples.findFile(image))
if src is None:
print('Could not open or find the image: ', image)
exit(0)
Reported by Pylint.
Line: 48
Column: 13
## [erosion]
def erosion(val):
erosion_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_erosion_window)
erosion_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_erosion_window))
## [kernel]
element = cv.getStructuringElement(erosion_shape, (2 * erosion_size + 1, 2 * erosion_size + 1),
Reported by Pylint.
Line: 49
Column: 5
## [erosion]
def erosion(val):
erosion_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_erosion_window)
erosion_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_erosion_window))
## [kernel]
element = cv.getStructuringElement(erosion_shape, (2 * erosion_size + 1, 2 * erosion_size + 1),
(erosion_size, erosion_size))
Reported by Pylint.
Line: 62
Column: 16
## [dilation]
def dilatation(val):
dilatation_size = cv.getTrackbarPos(title_trackbar_kernel_size, title_dilation_window)
dilation_shape = morph_shape(cv.getTrackbarPos(title_trackbar_element_shape, title_dilation_window))
element = cv.getStructuringElement(dilation_shape, (2 * dilatation_size + 1, 2 * dilatation_size + 1),
(dilatation_size, dilatation_size))
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
src = None
erosion_size = 0
max_elem = 2
max_kernel_size = 21
Reported by Pylint.
Line: 4
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
src = None
erosion_size = 0
max_elem = 2
max_kernel_size = 21
Reported by Pylint.
Line: 6
Column: 1
import numpy as np
import argparse
src = None
erosion_size = 0
max_elem = 2
max_kernel_size = 21
title_trackbar_element_shape = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse'
title_trackbar_kernel_size = 'Kernel size:\n 2n +1'
Reported by Pylint.
Line: 7
Column: 1
import argparse
src = None
erosion_size = 0
max_elem = 2
max_kernel_size = 21
title_trackbar_element_shape = 'Element:\n 0: Rect \n 1: Cross \n 2: Ellipse'
title_trackbar_kernel_size = 'Kernel size:\n 2n +1'
title_erosion_window = 'Erosion Demo'
Reported by Pylint.
samples/dnn/optical_flow.py
28 issues
Line: 16
Column: 1
import argparse
import os.path
import numpy as np
import cv2 as cv
class OpticalFlow(object):
def __init__(self, proto, model, height, width):
self.net = cv.dnn.readNetFromCaffe(proto, model)
Reported by Pylint.
Line: 20
Column: 24
class OpticalFlow(object):
def __init__(self, proto, model, height, width):
self.net = cv.dnn.readNetFromCaffe(proto, model)
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
self.height = height
self.width = width
Reported by Pylint.
Line: 31
Column: 9
inp1 = cv.dnn.blobFromImage(second_img, size=(self.width, self.height))
self.net.setInput(inp0, "img0")
self.net.setInput(inp1, "img1")
flow = self.net.forward()
output = self.motion_to_color(flow)
return output
def motion_to_color(self, flow):
arr = np.arange(0, 255, dtype=np.uint8)
Reported by Pylint.
Line: 35
Column: 31
output = self.motion_to_color(flow)
return output
def motion_to_color(self, flow):
arr = np.arange(0, 255, dtype=np.uint8)
colormap = cv.applyColorMap(arr, cv.COLORMAP_HSV)
colormap = colormap.squeeze(1)
flow = flow.squeeze(0)
Reported by Pylint.
Line: 7
Column: 1
Original paper: https://arxiv.org/abs/1612.01925.
Original repo: https://github.com/lmb-freiburg/flownet2.
Download the converted .caffemodel model from https://drive.google.com/open?id=16qvE9VNmU39NttpZwZs81Ga8VYQJDaWZ
and .prototxt from https://drive.google.com/file/d/1RyNIUsan1ZOh2hpYIH36A-jofAvJlT6a/view?usp=sharing.
Otherwise download original model from https://lmb.informatik.uni-freiburg.de/resources/binaries/flownet2/flownet2-models.tar.gz,
convert .h5 model to .caffemodel and modify original .prototxt using .prototxt from link above.
'''
Reported by Pylint.
Line: 8
Column: 1
Original repo: https://github.com/lmb-freiburg/flownet2.
Download the converted .caffemodel model from https://drive.google.com/open?id=16qvE9VNmU39NttpZwZs81Ga8VYQJDaWZ
and .prototxt from https://drive.google.com/file/d/1RyNIUsan1ZOh2hpYIH36A-jofAvJlT6a/view?usp=sharing.
Otherwise download original model from https://lmb.informatik.uni-freiburg.de/resources/binaries/flownet2/flownet2-models.tar.gz,
convert .h5 model to .caffemodel and modify original .prototxt using .prototxt from link above.
'''
import argparse
Reported by Pylint.
Line: 9
Column: 1
Download the converted .caffemodel model from https://drive.google.com/open?id=16qvE9VNmU39NttpZwZs81Ga8VYQJDaWZ
and .prototxt from https://drive.google.com/file/d/1RyNIUsan1ZOh2hpYIH36A-jofAvJlT6a/view?usp=sharing.
Otherwise download original model from https://lmb.informatik.uni-freiburg.de/resources/binaries/flownet2/flownet2-models.tar.gz,
convert .h5 model to .caffemodel and modify original .prototxt using .prototxt from link above.
'''
import argparse
import os.path
Reported by Pylint.
Line: 19
Column: 1
import cv2 as cv
class OpticalFlow(object):
def __init__(self, proto, model, height, width):
self.net = cv.dnn.readNetFromCaffe(proto, model)
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
self.height = height
self.width = width
Reported by Pylint.
Line: 19
Column: 1
import cv2 as cv
class OpticalFlow(object):
def __init__(self, proto, model, height, width):
self.net = cv.dnn.readNetFromCaffe(proto, model)
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
self.height = height
self.width = width
Reported by Pylint.
Line: 26
Column: 5
self.height = height
self.width = width
def compute_flow(self, first_img, second_img):
inp0 = cv.dnn.blobFromImage(first_img, size=(self.width, self.height))
inp1 = cv.dnn.blobFromImage(second_img, size=(self.width, self.height))
self.net.setInput(inp0, "img0")
self.net.setInput(inp1, "img1")
flow = self.net.forward()
Reported by Pylint.
samples/python/floodfill.py
28 issues
Line: 21
Column: 1
from __future__ import print_function
import numpy as np
import cv2 as cv
import sys
class App():
Reported by Pylint.
Line: 27
Column: 22
class App():
def update(self, dummy=None):
if self.seed_pt is None:
cv.imshow('floodfill', self.img)
return
flooded = self.img.copy()
self.mask[:] = 0
Reported by Pylint.
Line: 42
Column: 43
cv.circle(flooded, self.seed_pt, 2, (0, 0, 255), -1)
cv.imshow('floodfill', flooded)
def onmouse(self, event, x, y, flags, param):
if flags & cv.EVENT_FLAG_LBUTTON:
self.seed_pt = x, y
self.update()
def run(self):
Reported by Pylint.
Line: 42
Column: 23
cv.circle(flooded, self.seed_pt, 2, (0, 0, 255), -1)
cv.imshow('floodfill', flooded)
def onmouse(self, event, x, y, flags, param):
if flags & cv.EVENT_FLAG_LBUTTON:
self.seed_pt = x, y
self.update()
def run(self):
Reported by Pylint.
Line: 44
Column: 13
def onmouse(self, event, x, y, flags, param):
if flags & cv.EVENT_FLAG_LBUTTON:
self.seed_pt = x, y
self.update()
def run(self):
try:
fn = sys.argv[1]
Reported by Pylint.
Line: 50
Column: 9
def run(self):
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
self.img = cv.imread(cv.samples.findFile(fn))
if self.img is None:
print('Failed to load image file:', fn)
Reported by Pylint.
Line: 53
Column: 9
except:
fn = 'fruits.jpg'
self.img = cv.imread(cv.samples.findFile(fn))
if self.img is None:
print('Failed to load image file:', fn)
sys.exit(1)
h, w = self.img.shape[:2]
Reported by Pylint.
Line: 59
Column: 9
sys.exit(1)
h, w = self.img.shape[:2]
self.mask = np.zeros((h+2, w+2), np.uint8)
self.seed_pt = None
self.fixed_range = True
self.connectivity = 4
self.update()
Reported by Pylint.
Line: 60
Column: 9
h, w = self.img.shape[:2]
self.mask = np.zeros((h+2, w+2), np.uint8)
self.seed_pt = None
self.fixed_range = True
self.connectivity = 4
self.update()
cv.setMouseCallback('floodfill', self.onmouse)
Reported by Pylint.
Line: 61
Column: 9
h, w = self.img.shape[:2]
self.mask = np.zeros((h+2, w+2), np.uint8)
self.seed_pt = None
self.fixed_range = True
self.connectivity = 4
self.update()
cv.setMouseCallback('floodfill', self.onmouse)
cv.createTrackbar('lo', 'floodfill', 20, 255, self.update)
Reported by Pylint.
platforms/android/service/engine/src/org/opencv/engine/HardwareDetector.java
28 issues
Line: 15
import android.text.TextUtils;
import android.util.Log;
public class HardwareDetector {
private static String TAG = "OpenCVEngine/HardwareDetector";
public static final int ARCH_UNKNOWN = -1;
public static final int ARCH_X86 = 0x01000000;
Reported by PMD.
Line: 31
// Return CPU flags list
public static List<String> getFlags() {
Map<String, String> raw = getRawCpuInfo();
String f = raw.get("flags");
if (f == null)
f = raw.get("Features");
if (f == null)
return Arrays.asList();
return Arrays.asList(TextUtils.split(f, " "));
Reported by PMD.
Line: 33
Map<String, String> raw = getRawCpuInfo();
String f = raw.get("flags");
if (f == null)
f = raw.get("Features");
if (f == null)
return Arrays.asList();
return Arrays.asList(TextUtils.split(f, " "));
}
Reported by PMD.
Line: 42
// Return CPU arch
public static int getAbi() {
List<String> abis = Arrays.asList(Build.CPU_ABI, Build.CPU_ABI2);
Log.i(TAG, "ABIs: " + abis.toString());
if (abis.contains("x86_64")) {
return ARCH_X86_64;
} else if (abis.contains("x86")) {
return ARCH_X86;
} else if (abis.contains("arm64-v8a")) {
Reported by PMD.
Line: 43
public static int getAbi() {
List<String> abis = Arrays.asList(Build.CPU_ABI, Build.CPU_ABI2);
Log.i(TAG, "ABIs: " + abis.toString());
if (abis.contains("x86_64")) {
return ARCH_X86_64;
} else if (abis.contains("x86")) {
return ARCH_X86;
} else if (abis.contains("arm64-v8a")) {
return ARCH_ARMv8;
Reported by PMD.
Line: 45
Log.i(TAG, "ABIs: " + abis.toString());
if (abis.contains("x86_64")) {
return ARCH_X86_64;
} else if (abis.contains("x86")) {
return ARCH_X86;
} else if (abis.contains("arm64-v8a")) {
return ARCH_ARMv8;
} else if (abis.contains("armeabi-v7a")
|| abis.contains("armeabi-v7a-hard")) {
Reported by PMD.
Line: 47
return ARCH_X86_64;
} else if (abis.contains("x86")) {
return ARCH_X86;
} else if (abis.contains("arm64-v8a")) {
return ARCH_ARMv8;
} else if (abis.contains("armeabi-v7a")
|| abis.contains("armeabi-v7a-hard")) {
return ARCH_ARMv7;
} else if (abis.contains("armeabi")) {
Reported by PMD.
Line: 49
return ARCH_X86;
} else if (abis.contains("arm64-v8a")) {
return ARCH_ARMv8;
} else if (abis.contains("armeabi-v7a")
|| abis.contains("armeabi-v7a-hard")) {
return ARCH_ARMv7;
} else if (abis.contains("armeabi")) {
return ARCH_ARM;
} else if (abis.contains("mips64")) {
Reported by PMD.
Line: 50
} else if (abis.contains("arm64-v8a")) {
return ARCH_ARMv8;
} else if (abis.contains("armeabi-v7a")
|| abis.contains("armeabi-v7a-hard")) {
return ARCH_ARMv7;
} else if (abis.contains("armeabi")) {
return ARCH_ARM;
} else if (abis.contains("mips64")) {
return ARCH_MIPS_64;
Reported by PMD.
Line: 52
} else if (abis.contains("armeabi-v7a")
|| abis.contains("armeabi-v7a-hard")) {
return ARCH_ARMv7;
} else if (abis.contains("armeabi")) {
return ARCH_ARM;
} else if (abis.contains("mips64")) {
return ARCH_MIPS_64;
} else if (abis.contains("mips")) {
return ARCH_MIPS;
Reported by PMD.
samples/dnn/dnn_model_runner/dnn_conversion/common/utils.py
28 issues
Line: 8
Column: 1
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch
from .test.configs.test_config import CommonConfig
SEED_VAL = 42
Reported by Pylint.
Line: 9
Column: 1
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import torch
from .test.configs.test_config import CommonConfig
SEED_VAL = 42
DNN_LIB = "DNN"
Reported by Pylint.
Line: 11
Column: 1
import tensorflow as tf
import torch
from .test.configs.test_config import CommonConfig
SEED_VAL = 42
DNN_LIB = "DNN"
# common path for model savings
MODEL_PATH_ROOT = os.path.join(CommonConfig().output_data_root_dir, "{}/models")
Reported by Pylint.
Line: 46
Column: 5
"\t* mean {} for the original model: {}\t"
"\t* mean time (min) for the original model inferences: {}\n"
"\t* mean {} for the DNN model: {}\t"
"\t* mean time (min) for the DNN model inferences: {}\n".format(
metric_name, np.mean(general_quality_metric[:, 0]),
np.mean(general_inference_time[:, 0]) / 60000,
metric_name, np.mean(general_quality_metric[:, 1]),
np.mean(general_inference_time[:, 1]) / 60000,
)
Reported by Pylint.
Line: 43
Column: 5
general_quality_metric = np.array(general_quality_metric)
general_inference_time = np.array(general_inference_time)
summary_line = "===== End of processing. General results:\n"
"\t* mean {} for the original model: {}\t"
"\t* mean time (min) for the original model inferences: {}\n"
"\t* mean {} for the DNN model: {}\t"
"\t* mean time (min) for the DNN model inferences: {}\n".format(
metric_name, np.mean(general_quality_metric[:, 0]),
np.mean(general_inference_time[:, 0]) / 60000,
Reported by Pylint.
Line: 44
Column: 5
general_inference_time = np.array(general_inference_time)
summary_line = "===== End of processing. General results:\n"
"\t* mean {} for the original model: {}\t"
"\t* mean time (min) for the original model inferences: {}\n"
"\t* mean {} for the DNN model: {}\t"
"\t* mean time (min) for the DNN model inferences: {}\n".format(
metric_name, np.mean(general_quality_metric[:, 0]),
np.mean(general_inference_time[:, 0]) / 60000,
metric_name, np.mean(general_quality_metric[:, 1]),
Reported by Pylint.
Line: 45
Column: 5
summary_line = "===== End of processing. General results:\n"
"\t* mean {} for the original model: {}\t"
"\t* mean time (min) for the original model inferences: {}\n"
"\t* mean {} for the DNN model: {}\t"
"\t* mean time (min) for the DNN model inferences: {}\n".format(
metric_name, np.mean(general_quality_metric[:, 0]),
np.mean(general_inference_time[:, 0]) / 60000,
metric_name, np.mean(general_quality_metric[:, 1]),
np.mean(general_inference_time[:, 1]) / 60000,
Reported by Pylint.
Line: 46
Column: 5
"\t* mean {} for the original model: {}\t"
"\t* mean time (min) for the original model inferences: {}\n"
"\t* mean {} for the DNN model: {}\t"
"\t* mean time (min) for the DNN model inferences: {}\n".format(
metric_name, np.mean(general_quality_metric[:, 0]),
np.mean(general_inference_time[:, 0]) / 60000,
metric_name, np.mean(general_quality_metric[:, 1]),
np.mean(general_inference_time[:, 1]) / 60000,
)
Reported by Pylint.
Line: 122
Column: 5
def create_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--test",
type=str_bool,
help="Define whether you'd like to run the model with OpenCV for testing.",
default=False
),
Reported by Pylint.
Line: 128
Column: 5
help="Define whether you'd like to run the model with OpenCV for testing.",
default=False
),
parser.add_argument(
"--default_img_preprocess",
type=str_bool,
help="Define whether you'd like to preprocess the input image with defined"
" PyTorch or TF functions for model test with OpenCV.",
default=False
Reported by Pylint.
samples/java/tutorial_code/ShapeDescriptors/moments/MomentsDemo.java
27 issues
Line: 45
String filename = args.length > 0 ? args[0] : "../data/stuff.jpg";
Mat src = Imgcodecs.imread(filename);
if (src.empty()) {
System.err.println("Cannot read image: " + filename);
System.exit(0);
}
/// Convert image to gray and blur it
Imgproc.cvtColor(src, srcGray, Imgproc.COLOR_BGR2GRAY);
Reported by PMD.
Line: 155
//! [showDrawings]
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
System.out.println("\t Info: Area and Contour Length \n");
for (int i = 0; i < contours.size(); i++) {
System.out.format(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f\n", i,
mu.get(i).m00, Imgproc.contourArea(contours.get(i)),
Imgproc.arcLength(new MatOfPoint2f(contours.get(i).toArray()), true));
}
Reported by PMD.
Line: 31
import org.opencv.imgproc.Moments;
class MomentsClass {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
Reported by PMD.
Line: 31
import org.opencv.imgproc.Moments;
class MomentsClass {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
Reported by PMD.
Line: 32
class MomentsClass {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 32
class MomentsClass {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 33
class MomentsClass {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 33
class MomentsClass {
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
Reported by PMD.
Line: 34
private Mat srcGray = new Mat();
private JFrame frame;
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
public MomentsClass(String[] args) {
Reported by PMD.
Line: 36
private JLabel imgSrcLabel;
private JLabel imgContoursLabel;
private static final int MAX_THRESHOLD = 255;
private int threshold = 100;
private Random rng = new Random(12345);
public MomentsClass(String[] args) {
//! [setup]
/// Load source image
Reported by PMD.