The following issues were found
samples/java/sbt/src/main/java/DetectFaceDemo.java
12 issues
Line: 17
*/
public class DetectFaceDemo {
public void run() {
System.out.println("\nRunning DetectFaceDemo");
// Create a face detector from the cascade file in the resources
// directory.
CascadeClassifier faceDetector = new CascadeClassifier(getClass()
.getResource("/lbpcascade_frontalface.xml").getPath());
Reported by PMD.
Line: 31
MatOfRect faceDetections = new MatOfRect();
faceDetector.detectMultiScale(image, faceDetections);
System.out.println(String.format("Detected %s faces",
faceDetections.toArray().length));
// Draw a bounding box around each face.
for (Rect rect : faceDetections.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x
Reported by PMD.
Line: 42
// Save the visualized detection.
String filename = "faceDetection.png";
System.out.println(String.format("Writing %s", filename));
Imgcodecs.imwrite(filename, image);
}
}
Reported by PMD.
Line: 21
// Create a face detector from the cascade file in the resources
// directory.
CascadeClassifier faceDetector = new CascadeClassifier(getClass()
.getResource("/lbpcascade_frontalface.xml").getPath());
Mat image = Imgcodecs.imread(getClass().getResource(
"/AverageMaleFace.jpg").getPath());
// Detect faces in the image.
Reported by PMD.
Line: 21
// Create a face detector from the cascade file in the resources
// directory.
CascadeClassifier faceDetector = new CascadeClassifier(getClass()
.getResource("/lbpcascade_frontalface.xml").getPath());
Mat image = Imgcodecs.imread(getClass().getResource(
"/AverageMaleFace.jpg").getPath());
// Detect faces in the image.
Reported by PMD.
Line: 23
// directory.
CascadeClassifier faceDetector = new CascadeClassifier(getClass()
.getResource("/lbpcascade_frontalface.xml").getPath());
Mat image = Imgcodecs.imread(getClass().getResource(
"/AverageMaleFace.jpg").getPath());
// Detect faces in the image.
// MatOfRect is a special container class for Rect.
MatOfRect faceDetections = new MatOfRect();
Reported by PMD.
Line: 23
// directory.
CascadeClassifier faceDetector = new CascadeClassifier(getClass()
.getResource("/lbpcascade_frontalface.xml").getPath());
Mat image = Imgcodecs.imread(getClass().getResource(
"/AverageMaleFace.jpg").getPath());
// Detect faces in the image.
// MatOfRect is a special container class for Rect.
MatOfRect faceDetections = new MatOfRect();
Reported by PMD.
Line: 32
faceDetector.detectMultiScale(image, faceDetections);
System.out.println(String.format("Detected %s faces",
faceDetections.toArray().length));
// Draw a bounding box around each face.
for (Rect rect : faceDetections.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x
+ rect.width, rect.y + rect.height), new Scalar(0, 255, 0));
Reported by PMD.
Line: 36
// Draw a bounding box around each face.
for (Rect rect : faceDetections.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x
+ rect.width, rect.y + rect.height), new Scalar(0, 255, 0));
}
// Save the visualized detection.
String filename = "faceDetection.png";
Reported by PMD.
Line: 36
// Draw a bounding box around each face.
for (Rect rect : faceDetections.toArray()) {
Imgproc.rectangle(image, new Point(rect.x, rect.y), new Point(rect.x
+ rect.width, rect.y + rect.height), new Scalar(0, 255, 0));
}
// Save the visualized detection.
String filename = "faceDetection.png";
Reported by PMD.
samples/python/kalman.py
12 issues
Line: 22
Column: 1
long = int
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
Reported by Pylint.
Line: 25
Column: 1
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(2, 1, 0)
Reported by Pylint.
Line: 19
Column: 5
PY3 = sys.version_info[0] == 3
if PY3:
long = int
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
Reported by Pylint.
Line: 21
Column: 1
if PY3:
long = int
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
Reported by Pylint.
Line: 22
Column: 1
long = int
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
Reported by Pylint.
Line: 24
Column: 1
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
img_height = 500
img_width = 500
Reported by Pylint.
Line: 24
Column: 1
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
img_height = 500
img_width = 500
Reported by Pylint.
Line: 25
Column: 1
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(2, 1, 0)
Reported by Pylint.
Line: 25
Column: 1
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
def main():
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(2, 1, 0)
Reported by Pylint.
Line: 27
Column: 1
from math import cos, sin, sqrt
import numpy as np
def main():
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(2, 1, 0)
code = long(-1)
Reported by Pylint.
modules/features2d/misc/java/test/SIMPLEBLOBFeatureDetectorTest.java
12 issues
Line: 19
public class SIMPLEBLOBFeatureDetectorTest extends OpenCVTestCase {
Feature2D detector;
int matSize;
KeyPoint[] truth;
private Mat getMaskImg() {
Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Reported by PMD.
Line: 20
public class SIMPLEBLOBFeatureDetectorTest extends OpenCVTestCase {
Feature2D detector;
int matSize;
KeyPoint[] truth;
private Mat getMaskImg() {
Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Mat right = mask.submat(0, matSize, matSize / 2, matSize);
Reported by PMD.
Line: 21
Feature2D detector;
int matSize;
KeyPoint[] truth;
private Mat getMaskImg() {
Mat mask = new Mat(matSize, matSize, CvType.CV_8U, new Scalar(255));
Mat right = mask.submat(0, matSize, matSize / 2, matSize);
right.setTo(new Scalar(0));
Reported by PMD.
Line: 44
return img;
}
@Override
protected void setUp() throws Exception {
super.setUp();
detector = SimpleBlobDetector.create();
matSize = 200;
truth = new KeyPoint[] {
Reported by PMD.
Line: 58
};
}
public void testCreate() {
assertNotNull(detector);
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
Reported by PMD.
Line: 62
assertNotNull(detector);
}
public void testDetectListOfMatListOfListOfKeyPoint() {
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
Reported by PMD.
Line: 66
fail("Not yet implemented");
}
public void testDetectListOfMatListOfListOfKeyPointListOfMat() {
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
Mat img = getTestImg();
Reported by PMD.
Line: 70
fail("Not yet implemented");
}
public void testDetectMatListOfKeyPoint() {
Mat img = getTestImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints);
Reported by PMD.
Line: 79
assertListKeyPointEquals(Arrays.asList(truth), keypoints.toList(), EPS);
}
public void testDetectMatListOfKeyPointMat() {
Mat img = getTestImg();
Mat mask = getMaskImg();
MatOfKeyPoint keypoints = new MatOfKeyPoint();
detector.detect(img, keypoints, mask);
Reported by PMD.
Line: 89
assertListKeyPointEquals(Arrays.asList(truth[1]), keypoints.toList(), EPS);
}
public void testEmpty() {
// assertFalse(detector.empty());
fail("Not yet implemented");
}
public void testRead() {
Reported by PMD.
samples/python/tutorial_code/ImgTrans/canny_detector/CannyDetector_Demo.py
12 issues
Line: 2
Column: 1
from __future__ import print_function
import cv2 as cv
import argparse
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import argparse
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import argparse
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
Reported by Pylint.
Line: 3
Column: 1
from __future__ import print_function
import cv2 as cv
import argparse
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
Reported by Pylint.
Line: 5
Column: 1
import cv2 as cv
import argparse
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
Reported by Pylint.
Line: 6
Column: 1
import argparse
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
def CannyThreshold(val):
Reported by Pylint.
Line: 7
Column: 1
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
def CannyThreshold(val):
low_threshold = val
Reported by Pylint.
Line: 8
Column: 1
max_lowThreshold = 100
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
def CannyThreshold(val):
low_threshold = val
img_blur = cv.blur(src_gray, (3,3))
Reported by Pylint.
Line: 9
Column: 1
window_name = 'Edge Map'
title_trackbar = 'Min Threshold:'
ratio = 3
kernel_size = 3
def CannyThreshold(val):
low_threshold = val
img_blur = cv.blur(src_gray, (3,3))
detected_edges = cv.Canny(img_blur, low_threshold, low_threshold*ratio, kernel_size)
Reported by Pylint.
Line: 11
Column: 1
ratio = 3
kernel_size = 3
def CannyThreshold(val):
low_threshold = val
img_blur = cv.blur(src_gray, (3,3))
detected_edges = cv.Canny(img_blur, low_threshold, low_threshold*ratio, kernel_size)
mask = detected_edges != 0
dst = src * (mask[:,:,None].astype(src.dtype))
Reported by Pylint.
samples/dnn/colorization.py
12 issues
Line: 6
Column: 1
# To download pts_in_hull.npy, see: https://github.com/richzhang/colorization/tree/caffe/colorization/resources/pts_in_hull.npy
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', required=True)
Reported by Pylint.
Line: 15
Column: 5
parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel', required=True)
parser.add_argument('--kernel', help='Path to pts_in_hull.npy', required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
W_in = 224
H_in = 224
Reported by Pylint.
Line: 1
Column: 1
# Script is based on https://github.com/richzhang/colorization/blob/master/colorization/colorize.py
# To download the caffemodel and the prototxt, see: https://github.com/richzhang/colorization/tree/caffe/colorization/models
# To download pts_in_hull.npy, see: https://github.com/richzhang/colorization/tree/caffe/colorization/resources/pts_in_hull.npy
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
Reported by Pylint.
Line: 2
Column: 1
# Script is based on https://github.com/richzhang/colorization/blob/master/colorization/colorize.py
# To download the caffemodel and the prototxt, see: https://github.com/richzhang/colorization/tree/caffe/colorization/models
# To download pts_in_hull.npy, see: https://github.com/richzhang/colorization/tree/caffe/colorization/resources/pts_in_hull.npy
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
Reported by Pylint.
Line: 3
Column: 1
# Script is based on https://github.com/richzhang/colorization/blob/master/colorization/colorize.py
# To download the caffemodel and the prototxt, see: https://github.com/richzhang/colorization/tree/caffe/colorization/models
# To download pts_in_hull.npy, see: https://github.com/richzhang/colorization/tree/caffe/colorization/resources/pts_in_hull.npy
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
Reported by Pylint.
Line: 5
Column: 1
# To download the caffemodel and the prototxt, see: https://github.com/richzhang/colorization/tree/caffe/colorization/models
# To download pts_in_hull.npy, see: https://github.com/richzhang/colorization/tree/caffe/colorization/resources/pts_in_hull.npy
import numpy as np
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
Reported by Pylint.
Line: 8
Column: 1
import argparse
import cv2 as cv
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', required=True)
parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel', required=True)
parser.add_argument('--kernel', help='Path to pts_in_hull.npy', required=True)
Reported by Pylint.
Line: 10
Column: 1
def parse_args():
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', required=True)
parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel', required=True)
parser.add_argument('--kernel', help='Path to pts_in_hull.npy', required=True)
args = parser.parse_args()
Reported by Pylint.
Line: 12
Column: 1
parser = argparse.ArgumentParser(description='iColor: deep interactive colorization')
parser.add_argument('--input', help='Path to image or video. Skip to capture frames from camera')
parser.add_argument('--prototxt', help='Path to colorization_deploy_v2.prototxt', required=True)
parser.add_argument('--caffemodel', help='Path to colorization_release_v2.caffemodel', required=True)
parser.add_argument('--kernel', help='Path to pts_in_hull.npy', required=True)
args = parser.parse_args()
return args
Reported by Pylint.
Line: 19
Column: 5
return args
if __name__ == '__main__':
W_in = 224
H_in = 224
imshowSize = (640, 480)
args = parse_args()
Reported by Pylint.
modules/python/test/test_fs_cache_dir.py
12 issues
Line: 5
Column: 1
from __future__ import print_function
import numpy as np
import cv2 as cv
import os
import datetime
from tests_common import NewOpenCVTests
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
import cv2 as cv
import os
import datetime
from tests_common import NewOpenCVTests
class get_cache_dir_test(NewOpenCVTests):
def test_get_cache_dir(self):
Reported by Pylint.
Line: 1
Column: 1
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import os
import datetime
from tests_common import NewOpenCVTests
Reported by Pylint.
Line: 6
Column: 1
import numpy as np
import cv2 as cv
import os
import datetime
from tests_common import NewOpenCVTests
class get_cache_dir_test(NewOpenCVTests):
Reported by Pylint.
Line: 7
Column: 1
import numpy as np
import cv2 as cv
import os
import datetime
from tests_common import NewOpenCVTests
class get_cache_dir_test(NewOpenCVTests):
def test_get_cache_dir(self):
Reported by Pylint.
Line: 11
Column: 1
from tests_common import NewOpenCVTests
class get_cache_dir_test(NewOpenCVTests):
def test_get_cache_dir(self):
#New binding
path = cv.utils.fs.getCacheDirectoryForDownloads()
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isdir(path))
Reported by Pylint.
Line: 11
Column: 1
from tests_common import NewOpenCVTests
class get_cache_dir_test(NewOpenCVTests):
def test_get_cache_dir(self):
#New binding
path = cv.utils.fs.getCacheDirectoryForDownloads()
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isdir(path))
Reported by Pylint.
Line: 12
Column: 5
from tests_common import NewOpenCVTests
class get_cache_dir_test(NewOpenCVTests):
def test_get_cache_dir(self):
#New binding
path = cv.utils.fs.getCacheDirectoryForDownloads()
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isdir(path))
Reported by Pylint.
Line: 18
Column: 5
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isdir(path))
def get_cache_dir_imread_interop(self, ext):
path = cv.utils.fs.getCacheDirectoryForDownloads()
gold_image = np.ones((16, 16, 3), np.uint8)
read_from_file = np.zeros((16, 16, 3), np.uint8)
test_file_name = os.path.join(path, "test." + ext)
try:
Reported by Pylint.
Line: 31
Column: 5
self.assertEqual(cv.norm(gold_image, read_from_file), 0)
def test_get_cache_dir_imread_interop_png(self):
self.get_cache_dir_imread_interop("png")
def test_get_cache_dir_imread_interop_jpeg(self):
self.get_cache_dir_imread_interop("jpg")
Reported by Pylint.
modules/core/misc/java/src/java/core+MatOfKeyPoint.java
12 issues
Line: 37
public MatOfKeyPoint(KeyPoint...a) {
super();
fromArray(a);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
Reported by PMD.
Line: 6
import java.util.Arrays;
import java.util.List;
import org.opencv.core.KeyPoint;
public class MatOfKeyPoint extends Mat {
// 32FC7
private static final int _depth = CvType.CV_32F;
private static final int _channels = 7;
Reported by PMD.
Line: 50
return;
int num = a.length;
alloc(num);
float buff[] = new float[num * _channels];
for(int i=0; i<num; i++) {
KeyPoint kp = a[i];
buff[_channels*i+0] = (float) kp.pt.x;
buff[_channels*i+1] = (float) kp.pt.y;
buff[_channels*i+2] = kp.size;
Reported by PMD.
Line: 53
float buff[] = new float[num * _channels];
for(int i=0; i<num; i++) {
KeyPoint kp = a[i];
buff[_channels*i+0] = (float) kp.pt.x;
buff[_channels*i+1] = (float) kp.pt.y;
buff[_channels*i+2] = kp.size;
buff[_channels*i+3] = kp.angle;
buff[_channels*i+4] = kp.response;
buff[_channels*i+5] = kp.octave;
Reported by PMD.
Line: 54
for(int i=0; i<num; i++) {
KeyPoint kp = a[i];
buff[_channels*i+0] = (float) kp.pt.x;
buff[_channels*i+1] = (float) kp.pt.y;
buff[_channels*i+2] = kp.size;
buff[_channels*i+3] = kp.angle;
buff[_channels*i+4] = kp.response;
buff[_channels*i+5] = kp.octave;
buff[_channels*i+6] = kp.class_id;
Reported by PMD.
Line: 55
KeyPoint kp = a[i];
buff[_channels*i+0] = (float) kp.pt.x;
buff[_channels*i+1] = (float) kp.pt.y;
buff[_channels*i+2] = kp.size;
buff[_channels*i+3] = kp.angle;
buff[_channels*i+4] = kp.response;
buff[_channels*i+5] = kp.octave;
buff[_channels*i+6] = kp.class_id;
}
Reported by PMD.
Line: 56
buff[_channels*i+0] = (float) kp.pt.x;
buff[_channels*i+1] = (float) kp.pt.y;
buff[_channels*i+2] = kp.size;
buff[_channels*i+3] = kp.angle;
buff[_channels*i+4] = kp.response;
buff[_channels*i+5] = kp.octave;
buff[_channels*i+6] = kp.class_id;
}
put(0, 0, buff); //TODO: check ret val!
Reported by PMD.
Line: 57
buff[_channels*i+1] = (float) kp.pt.y;
buff[_channels*i+2] = kp.size;
buff[_channels*i+3] = kp.angle;
buff[_channels*i+4] = kp.response;
buff[_channels*i+5] = kp.octave;
buff[_channels*i+6] = kp.class_id;
}
put(0, 0, buff); //TODO: check ret val!
}
Reported by PMD.
Line: 58
buff[_channels*i+2] = kp.size;
buff[_channels*i+3] = kp.angle;
buff[_channels*i+4] = kp.response;
buff[_channels*i+5] = kp.octave;
buff[_channels*i+6] = kp.class_id;
}
put(0, 0, buff); //TODO: check ret val!
}
Reported by PMD.
Line: 59
buff[_channels*i+3] = kp.angle;
buff[_channels*i+4] = kp.response;
buff[_channels*i+5] = kp.octave;
buff[_channels*i+6] = kp.class_id;
}
put(0, 0, buff); //TODO: check ret val!
}
public KeyPoint[] toArray() {
Reported by PMD.
samples/java/tutorial_code/features2D/feature_description/SURFMatchingDemo.java
12 issues
Line: 18
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
if (img1.empty() || img2.empty()) {
System.err.println("Cannot read images!");
System.exit(0);
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
double hessianThreshold = 400;
Reported by PMD.
Line: 17
String filename2 = args.length > 1 ? args[1] : "../data/box_in_scene.png";
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
if (img1.empty() || img2.empty()) {
System.err.println("Cannot read images!");
System.exit(0);
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
Reported by PMD.
Line: 17
String filename2 = args.length > 1 ? args[1] : "../data/box_in_scene.png";
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
if (img1.empty() || img2.empty()) {
System.err.println("Cannot read images!");
System.exit(0);
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
Reported by PMD.
Line: 19
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
if (img1.empty() || img2.empty()) {
System.err.println("Cannot read images!");
System.exit(0);
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
double hessianThreshold = 400;
int nOctaves = 4, nOctaveLayers = 3;
Reported by PMD.
Line: 36
// Since SURF is a floating-point descriptor NORM_L2 is used
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors1, descriptors2, matches);
//-- Draw matches
Mat imgMatches = new Mat();
Features2d.drawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches);
Reported by PMD.
Line: 45
HighGui.imshow("Matches", imgMatches);
HighGui.waitKey(0);
System.exit(0);
}
}
public class SURFMatchingDemo {
public static void main(String[] args) {
Reported by PMD.
Line: 49
}
}
public class SURFMatchingDemo {
public static void main(String[] args) {
// Load the native OpenCV library
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
new SURFMatching().run(args);
Reported by PMD.
Line: 12
import org.opencv.xfeatures2d.SURF;
class SURFMatching {
public void run(String[] args) {
String filename1 = args.length > 1 ? args[0] : "../data/box.png";
String filename2 = args.length > 1 ? args[1] : "../data/box_in_scene.png";
Mat img1 = Imgcodecs.imread(filename1, Imgcodecs.IMREAD_GRAYSCALE);
Mat img2 = Imgcodecs.imread(filename2, Imgcodecs.IMREAD_GRAYSCALE);
if (img1.empty() || img2.empty()) {
Reported by PMD.
Line: 24
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
double hessianThreshold = 400;
int nOctaves = 4, nOctaveLayers = 3;
boolean extended = false, upright = false;
SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
MatOfKeyPoint keypoints1 = new MatOfKeyPoint(), keypoints2 = new MatOfKeyPoint();
Mat descriptors1 = new Mat(), descriptors2 = new Mat();
detector.detectAndCompute(img1, new Mat(), keypoints1, descriptors1);
Reported by PMD.
Line: 25
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
double hessianThreshold = 400;
int nOctaves = 4, nOctaveLayers = 3;
boolean extended = false, upright = false;
SURF detector = SURF.create(hessianThreshold, nOctaves, nOctaveLayers, extended, upright);
MatOfKeyPoint keypoints1 = new MatOfKeyPoint(), keypoints2 = new MatOfKeyPoint();
Mat descriptors1 = new Mat(), descriptors2 = new Mat();
detector.detectAndCompute(img1, new Mat(), keypoints1, descriptors1);
detector.detectAndCompute(img2, new Mat(), keypoints2, descriptors2);
Reported by PMD.
samples/python/tutorial_code/ImgTrans/HoughLine/hough_lines.py
12 issues
Line: 7
Column: 1
"""
import sys
import math
import cv2 as cv
import numpy as np
def main(argv):
## [load]
Reported by Pylint.
Line: 11
Column: 1
import numpy as np
def main(argv):
## [load]
default_file = 'sudoku.png'
filename = argv[0] if len(argv) > 0 else default_file
# Loads an image
Reported by Pylint.
Line: 11
Column: 1
import numpy as np
def main(argv):
## [load]
default_file = 'sudoku.png'
filename = argv[0] if len(argv) > 0 else default_file
# Loads an image
Reported by Pylint.
Line: 33
Column: 5
# Copy edges to the images that will display the results in BGR
cdst = cv.cvtColor(dst, cv.COLOR_GRAY2BGR)
cdstP = np.copy(cdst)
## [hough_lines]
# Standard Hough Line Transform
lines = cv.HoughLines(dst, 1, np.pi / 180, 150, None, 0, 0)
## [hough_lines]
Reported by Pylint.
Line: 42
Column: 9
## [draw_lines]
# Draw the lines
if lines is not None:
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
Reported by Pylint.
Line: 45
Column: 13
for i in range(0, len(lines)):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))
pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))
Reported by Pylint.
Line: 46
Column: 13
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))
pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))
Reported by Pylint.
Line: 47
Column: 13
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))
pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))
cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA)
Reported by Pylint.
Line: 48
Column: 13
a = math.cos(theta)
b = math.sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000*(-b)), int(y0 + 1000*(a)))
pt2 = (int(x0 - 1000*(-b)), int(y0 - 1000*(a)))
cv.line(cdst, pt1, pt2, (0,0,255), 3, cv.LINE_AA)
## [draw_lines]
Reported by Pylint.
Line: 57
Column: 5
## [hough_lines_p]
# Probabilistic Line Transform
linesP = cv.HoughLinesP(dst, 1, np.pi / 180, 50, None, 50, 10)
## [hough_lines_p]
## [draw_lines_p]
# Draw the lines
if linesP is not None:
for i in range(0, len(linesP)):
Reported by Pylint.
samples/python/tutorial_code/ImgTrans/distance_transformation/imageSegmentation.py
11 issues
Line: 2
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
## [load_image]
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
## [load_image]
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
## [load_image]
Reported by Pylint.
Line: 4
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
## [load_image]
Reported by Pylint.
Line: 5
Column: 1
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
## [load_image]
# Load the image
Reported by Pylint.
Line: 11
Column: 1
## [load_image]
# Load the image
parser = argparse.ArgumentParser(description='Code for Image Segmentation with Distance Transform and Watershed Algorithm.\
Sample code showing how to segment overlapping objects using Laplacian filtering, \
in addition to Watershed and Distance Transformation')
parser.add_argument('--input', help='Path to input image.', default='cards.png')
args = parser.parse_args()
Reported by Pylint.
Line: 20
Column: 5
src = cv.imread(cv.samples.findFile(args.input))
if src is None:
print('Could not open or find the image:', args.input)
exit(0)
# Show source image
cv.imshow('Source Image', src)
## [load_image]
Reported by Pylint.
Line: 123
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random
# Generate random colors
colors = []
for contour in contours:
colors.append((rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)))
# Create the result image
dst = np.zeros((markers.shape[0], markers.shape[1], 3), dtype=np.uint8)
# Fill labeled objects with random colors
Reported by Bandit.
Line: 123
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random
# Generate random colors
colors = []
for contour in contours:
colors.append((rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)))
# Create the result image
dst = np.zeros((markers.shape[0], markers.shape[1], 3), dtype=np.uint8)
# Fill labeled objects with random colors
Reported by Bandit.
Line: 123
Suggestion:
https://bandit.readthedocs.io/en/latest/blacklists/blacklist_calls.html#b311-random
# Generate random colors
colors = []
for contour in contours:
colors.append((rng.randint(0,256), rng.randint(0,256), rng.randint(0,256)))
# Create the result image
dst = np.zeros((markers.shape[0], markers.shape[1], 3), dtype=np.uint8)
# Fill labeled objects with random colors
Reported by Bandit.