The following issues were found
samples/python/tutorial_code/ShapeDescriptors/bounding_rotated_ellipses/generalContours_demo2.py
19 issues
Line: 2
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
Reported by Pylint.
Line: 1
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
Reported by Pylint.
Line: 4
Column: 1
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
Reported by Pylint.
Line: 5
Column: 1
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val):
threshold = val
Reported by Pylint.
Line: 9
Column: 1
rng.seed(12345)
def thresh_callback(val):
threshold = val
## [Canny]
# Detect edges using Canny
canny_output = cv.Canny(src_gray, threshold, threshold * 2)
Reported by Pylint.
Line: 23
Column: 5
## [findContours]
# Find the rotated rectangles and ellipses for each contour
minRect = [None]*len(contours)
minEllipse = [None]*len(contours)
for i, c in enumerate(contours):
minRect[i] = cv.minAreaRect(c)
if c.shape[0] > 5:
minEllipse[i] = cv.fitEllipse(c)
Reported by Pylint.
Line: 24
Column: 5
# Find the rotated rectangles and ellipses for each contour
minRect = [None]*len(contours)
minEllipse = [None]*len(contours)
for i, c in enumerate(contours):
minRect[i] = cv.minAreaRect(c)
if c.shape[0] > 5:
minEllipse[i] = cv.fitEllipse(c)
Reported by Pylint.
Line: 25
Column: 12
# Find the rotated rectangles and ellipses for each contour
minRect = [None]*len(contours)
minEllipse = [None]*len(contours)
for i, c in enumerate(contours):
minRect[i] = cv.minAreaRect(c)
if c.shape[0] > 5:
minEllipse[i] = cv.fitEllipse(c)
# Draw contours + rotated rects + ellipses
Reported by Pylint.
Line: 35
Column: 12
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
## [zeroMat]
## [forContour]
for i, c in enumerate(contours):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
# contour
cv.drawContours(drawing, contours, i, color)
# ellipse
if c.shape[0] > 5:
Reported by Pylint.
modules/python/test/test_morphology.py
19 issues
Line: 13
Column: 1
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class morphology_test(NewOpenCVTests):
Reported by Pylint.
Line: 12
Column: 1
import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class morphology_test(NewOpenCVTests):
Reported by Pylint.
Line: 12
Column: 1
import sys
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class morphology_test(NewOpenCVTests):
Reported by Pylint.
Line: 13
Column: 1
PY3 = sys.version_info[0] == 3
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class morphology_test(NewOpenCVTests):
Reported by Pylint.
Line: 15
Column: 1
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class morphology_test(NewOpenCVTests):
def test_morphology(self):
Reported by Pylint.
Line: 17
Column: 1
from tests_common import NewOpenCVTests
class morphology_test(NewOpenCVTests):
def test_morphology(self):
fn = 'samples/data/rubberwhale1.png'
img = self.get_sample(fn)
Reported by Pylint.
Line: 17
Column: 1
from tests_common import NewOpenCVTests
class morphology_test(NewOpenCVTests):
def test_morphology(self):
fn = 'samples/data/rubberwhale1.png'
img = self.get_sample(fn)
Reported by Pylint.
Line: 19
Column: 5
class morphology_test(NewOpenCVTests):
def test_morphology(self):
fn = 'samples/data/rubberwhale1.png'
img = self.get_sample(fn)
modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient']
Reported by Pylint.
Line: 21
Column: 9
def test_morphology(self):
fn = 'samples/data/rubberwhale1.png'
img = self.get_sample(fn)
modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient']
str_modes = ['ellipse', 'rect', 'cross']
Reported by Pylint.
Line: 27
Column: 1
modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient']
str_modes = ['ellipse', 'rect', 'cross']
referenceHashes = { modes[0]: '071a526425b79e45b4d0d71ef51b0562', modes[1] : '071a526425b79e45b4d0d71ef51b0562',
modes[2] : '427e89f581b7df1b60a831b1ed4c8618', modes[3] : '0dd8ad251088a63d0dd022bcdc57361c'}
def update(cur_mode):
cur_str_mode = str_modes[0]
sz = 10
Reported by Pylint.
samples/java/tutorial_code/core/mat_mask_operations/MatMaskOperations.java
19 issues
Line: 25
Mat src = Imgcodecs.imread(filename, img_codec);
if (src.empty()) {
System.out.println("Can't open image [" + filename + "]");
System.out.println("Program Arguments: [image_path -- default ../data/lena.jpg] [G -- grayscale]");
System.exit(-1);
}
HighGui.namedWindow("Input", HighGui.WINDOW_AUTOSIZE);
Reported by PMD.
Line: 26
if (src.empty()) {
System.out.println("Can't open image [" + filename + "]");
System.out.println("Program Arguments: [image_path -- default ../data/lena.jpg] [G -- grayscale]");
System.exit(-1);
}
HighGui.namedWindow("Input", HighGui.WINDOW_AUTOSIZE);
HighGui.namedWindow("Output", HighGui.WINDOW_AUTOSIZE);
Reported by PMD.
Line: 39
Mat dst0 = sharpen(src, new Mat());
t = ((double) System.currentTimeMillis() - t) / 1000;
System.out.println("Hand written function time passed in seconds: " + t);
HighGui.imshow( "Output", dst0 );
HighGui.moveWindow("Output", 400, 400);
HighGui.waitKey();
Reported by PMD.
Line: 58
Imgproc.filter2D(src, dst1, src.depth(), kern);
//![filter2D]
t = ((double) System.currentTimeMillis() - t) / 1000;
System.out.println("Built-in filter2D time passed in seconds: " + t);
HighGui.imshow( "Output", dst1 );
HighGui.waitKey();
System.exit(0);
Reported by PMD.
Line: 18
int img_codec = Imgcodecs.IMREAD_COLOR;
if (args.length != 0) {
filename = args[0];
if (args.length >= 2 && args[1].equals("G"))
img_codec = Imgcodecs.IMREAD_GRAYSCALE;
}
Mat src = Imgcodecs.imread(filename, img_codec);
Reported by PMD.
Line: 24
Mat src = Imgcodecs.imread(filename, img_codec);
if (src.empty()) {
System.out.println("Can't open image [" + filename + "]");
System.out.println("Program Arguments: [image_path -- default ../data/lena.jpg] [G -- grayscale]");
System.exit(-1);
}
Reported by PMD.
Line: 27
if (src.empty()) {
System.out.println("Can't open image [" + filename + "]");
System.out.println("Program Arguments: [image_path -- default ../data/lena.jpg] [G -- grayscale]");
System.exit(-1);
}
HighGui.namedWindow("Input", HighGui.WINDOW_AUTOSIZE);
HighGui.namedWindow("Output", HighGui.WINDOW_AUTOSIZE);
Reported by PMD.
Line: 31
}
HighGui.namedWindow("Input", HighGui.WINDOW_AUTOSIZE);
HighGui.namedWindow("Output", HighGui.WINDOW_AUTOSIZE);
HighGui.imshow( "Input", src );
double t = System.currentTimeMillis();
Mat dst0 = sharpen(src, new Mat());
Reported by PMD.
Line: 55
Mat dst1 = new Mat();
//![filter2D]
Imgproc.filter2D(src, dst1, src.depth(), kern);
//![filter2D]
t = ((double) System.currentTimeMillis() - t) / 1000;
System.out.println("Built-in filter2D time passed in seconds: " + t);
HighGui.imshow( "Output", dst1 );
Reported by PMD.
Line: 63
HighGui.imshow( "Output", dst1 );
HighGui.waitKey();
System.exit(0);
}
//! [basic_method]
public static double saturate(double x) {
return x > 255.0 ? 255.0 : (x < 0.0 ? 0.0 : x);
Reported by PMD.
modules/dnn/test/imagenet_cls_test_googlenet.py
19 issues
Line: 1
Column: 1
import numpy as np
import sys
import os
import argparse
from imagenet_cls_test_alexnet import MeanChannelsFetch, CaffeModel, DnnCaffeModel, ClsAccEvaluation
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
Reported by Pylint.
Line: 2
Column: 1
import numpy as np
import sys
import os
import argparse
from imagenet_cls_test_alexnet import MeanChannelsFetch, CaffeModel, DnnCaffeModel, ClsAccEvaluation
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
Reported by Pylint.
Line: 3
Column: 1
import numpy as np
import sys
import os
import argparse
from imagenet_cls_test_alexnet import MeanChannelsFetch, CaffeModel, DnnCaffeModel, ClsAccEvaluation
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
Reported by Pylint.
Line: 7
Column: 5
import argparse
from imagenet_cls_test_alexnet import MeanChannelsFetch, CaffeModel, DnnCaffeModel, ClsAccEvaluation
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "git/caffe/python" directory')
try:
import cv2 as cv
Reported by Pylint.
Line: 9
Column: 5
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "git/caffe/python" directory')
try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
Reported by Pylint.
Line: 12
Column: 5
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "git/caffe/python" directory')
try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
if __name__ == "__main__":
Reported by Pylint.
Line: 14
Column: 5
try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--imgs_dir", help="path to ImageNet validation subset images dir, ILSVRC2012_img_val dir")
Reported by Pylint.
Line: 1
Column: 1
import numpy as np
import sys
import os
import argparse
from imagenet_cls_test_alexnet import MeanChannelsFetch, CaffeModel, DnnCaffeModel, ClsAccEvaluation
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
Reported by Pylint.
Line: 2
Column: 1
import numpy as np
import sys
import os
import argparse
from imagenet_cls_test_alexnet import MeanChannelsFetch, CaffeModel, DnnCaffeModel, ClsAccEvaluation
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
Reported by Pylint.
Line: 3
Column: 1
import numpy as np
import sys
import os
import argparse
from imagenet_cls_test_alexnet import MeanChannelsFetch, CaffeModel, DnnCaffeModel, ClsAccEvaluation
try:
import caffe
except ImportError:
raise ImportError('Can\'t find Caffe Python module. If you\'ve built it from sources without installation, '
Reported by Pylint.
modules/core/misc/java/test/PointTest.java
19 issues
Line: 9
public class PointTest extends OpenCVTestCase {
private Point p1;
private Point p2;
@Override
protected void setUp() throws Exception {
super.setUp();
Reported by PMD.
Line: 10
public class PointTest extends OpenCVTestCase {
private Point p1;
private Point p2;
@Override
protected void setUp() throws Exception {
super.setUp();
Reported by PMD.
Line: 12
private Point p1;
private Point p2;
@Override
protected void setUp() throws Exception {
super.setUp();
p1 = new Point(2, 2);
p2 = new Point(1, 1);
Reported by PMD.
Line: 20
p2 = new Point(1, 1);
}
public void testClone() {
Point truth = new Point(1, 1);
Point dstPoint = truth.clone();
assertEquals(truth, dstPoint);
}
Reported by PMD.
Line: 26
assertEquals(truth, dstPoint);
}
public void testDot() {
double result = p1.dot(p2);
assertEquals(4.0, result);
}
public void testEqualsObject() {
Reported by PMD.
Line: 31
assertEquals(4.0, result);
}
public void testEqualsObject() {
boolean flag = p1.equals(p1);
assertTrue(flag);
flag = p1.equals(p2);
assertFalse(flag);
Reported by PMD.
Line: 31
assertEquals(4.0, result);
}
public void testEqualsObject() {
boolean flag = p1.equals(p1);
assertTrue(flag);
flag = p1.equals(p2);
assertFalse(flag);
Reported by PMD.
Line: 39
assertFalse(flag);
}
public void testHashCode() {
assertEquals(p1.hashCode(), p1.hashCode());
}
public void testInside() {
Rect rect = new Rect(0, 0, 5, 3);
Reported by PMD.
Line: 43
assertEquals(p1.hashCode(), p1.hashCode());
}
public void testInside() {
Rect rect = new Rect(0, 0, 5, 3);
assertTrue(p1.inside(rect));
Point p2 = new Point(3, 3);
assertFalse(p2.inside(rect));
Reported by PMD.
Line: 43
assertEquals(p1.hashCode(), p1.hashCode());
}
public void testInside() {
Rect rect = new Rect(0, 0, 5, 3);
assertTrue(p1.inside(rect));
Point p2 = new Point(3, 3);
assertFalse(p2.inside(rect));
Reported by PMD.
samples/android/camera-calibration/src/org/opencv/samples/cameracalibration/CameraCalibrationActivity.java
19 issues
Line: 42
import java.util.Collections;
import java.util.List;
public class CameraCalibrationActivity extends CameraActivity implements CvCameraViewListener2, OnTouchListener {
private static final String TAG = "OCVSample::Activity";
private CameraBridgeViewBase mOpenCvCameraView;
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
Reported by PMD.
Line: 42
import java.util.Collections;
import java.util.List;
public class CameraCalibrationActivity extends CameraActivity implements CvCameraViewListener2, OnTouchListener {
private static final String TAG = "OCVSample::Activity";
private CameraBridgeViewBase mOpenCvCameraView;
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
Reported by PMD.
Line: 45
public class CameraCalibrationActivity extends CameraActivity implements CvCameraViewListener2, OnTouchListener {
private static final String TAG = "OCVSample::Activity";
private CameraBridgeViewBase mOpenCvCameraView;
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
private Menu mMenu;
private int mWidth;
private int mHeight;
Reported by PMD.
Line: 46
private static final String TAG = "OCVSample::Activity";
private CameraBridgeViewBase mOpenCvCameraView;
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
private Menu mMenu;
private int mWidth;
private int mHeight;
Reported by PMD.
Line: 47
private CameraBridgeViewBase mOpenCvCameraView;
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
private Menu mMenu;
private int mWidth;
private int mHeight;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
Reported by PMD.
Line: 48
private CameraBridgeViewBase mOpenCvCameraView;
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
private Menu mMenu;
private int mWidth;
private int mHeight;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
Reported by PMD.
Line: 49
private CameraCalibrator mCalibrator;
private OnCameraFrameRender mOnCameraFrameRender;
private Menu mMenu;
private int mWidth;
private int mHeight;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
Reported by PMD.
Line: 50
private OnCameraFrameRender mOnCameraFrameRender;
private Menu mMenu;
private int mWidth;
private int mHeight;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
Reported by PMD.
Line: 52
private int mWidth;
private int mHeight;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Reported by PMD.
Line: 52
private int mWidth;
private int mHeight;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
@Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Reported by PMD.
modules/core/misc/java/test/CvTypeTest.java
19 issues
Line: 6
import org.opencv.core.CvType;
import org.opencv.test.OpenCVTestCase;
public class CvTypeTest extends OpenCVTestCase {
public void testMakeType() {
assertEquals(CvType.CV_8UC4, CvType.makeType(CvType.CV_8U, 4));
}
Reported by PMD.
Line: 8
public class CvTypeTest extends OpenCVTestCase {
public void testMakeType() {
assertEquals(CvType.CV_8UC4, CvType.makeType(CvType.CV_8U, 4));
}
public void testCV_8UC() {
assertEquals(CvType.CV_8UC4, CvType.CV_8UC(4));
Reported by PMD.
Line: 12
assertEquals(CvType.CV_8UC4, CvType.makeType(CvType.CV_8U, 4));
}
public void testCV_8UC() {
assertEquals(CvType.CV_8UC4, CvType.CV_8UC(4));
}
public void testCV_8SC() {
assertEquals(CvType.CV_8SC4, CvType.CV_8SC(4));
Reported by PMD.
Line: 16
assertEquals(CvType.CV_8UC4, CvType.CV_8UC(4));
}
public void testCV_8SC() {
assertEquals(CvType.CV_8SC4, CvType.CV_8SC(4));
}
public void testCV_16UC() {
assertEquals(CvType.CV_16UC4, CvType.CV_16UC(4));
Reported by PMD.
Line: 20
assertEquals(CvType.CV_8SC4, CvType.CV_8SC(4));
}
public void testCV_16UC() {
assertEquals(CvType.CV_16UC4, CvType.CV_16UC(4));
}
public void testCV_16SC() {
assertEquals(CvType.CV_16SC4, CvType.CV_16SC(4));
Reported by PMD.
Line: 24
assertEquals(CvType.CV_16UC4, CvType.CV_16UC(4));
}
public void testCV_16SC() {
assertEquals(CvType.CV_16SC4, CvType.CV_16SC(4));
}
public void testCV_32SC() {
assertEquals(CvType.CV_32SC4, CvType.CV_32SC(4));
Reported by PMD.
Line: 28
assertEquals(CvType.CV_16SC4, CvType.CV_16SC(4));
}
public void testCV_32SC() {
assertEquals(CvType.CV_32SC4, CvType.CV_32SC(4));
}
public void testCV_32FC() {
assertEquals(CvType.CV_32FC4, CvType.CV_32FC(4));
Reported by PMD.
Line: 32
assertEquals(CvType.CV_32SC4, CvType.CV_32SC(4));
}
public void testCV_32FC() {
assertEquals(CvType.CV_32FC4, CvType.CV_32FC(4));
}
public void testCV_64FC() {
assertEquals(CvType.CV_64FC4, CvType.CV_64FC(4));
Reported by PMD.
Line: 36
assertEquals(CvType.CV_32FC4, CvType.CV_32FC(4));
}
public void testCV_64FC() {
assertEquals(CvType.CV_64FC4, CvType.CV_64FC(4));
}
public void testCV_16FC() {
assertEquals(CvType.CV_16FC1, CvType.CV_16FC(1));
Reported by PMD.
Line: 40
assertEquals(CvType.CV_64FC4, CvType.CV_64FC(4));
}
public void testCV_16FC() {
assertEquals(CvType.CV_16FC1, CvType.CV_16FC(1));
assertEquals(CvType.CV_16FC2, CvType.CV_16FC(2));
assertEquals(CvType.CV_16FC3, CvType.CV_16FC(3));
assertEquals(CvType.CV_16FC4, CvType.CV_16FC(4));
}
Reported by PMD.
modules/objdetect/misc/python/test/test_facedetect.py
19 issues
Line: 11
Column: 1
from __future__ import print_function
import numpy as np
import cv2 as cv
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.275, minNeighbors=4, minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
Reported by Pylint.
Line: 21
Column: 1
rects[:,2:] += rects[:,:2]
return rects
from tests_common import NewOpenCVTests, intersectionRate
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
Reported by Pylint.
Line: 10
Column: 1
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.275, minNeighbors=4, minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE)
Reported by Pylint.
Line: 13
Column: 1
import numpy as np
import cv2 as cv
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.275, minNeighbors=4, minSize=(30, 30),
flags=cv.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
Reported by Pylint.
Line: 21
Column: 1
rects[:,2:] += rects[:,:2]
return rects
from tests_common import NewOpenCVTests, intersectionRate
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
Reported by Pylint.
Line: 23
Column: 1
from tests_common import NewOpenCVTests, intersectionRate
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
Reported by Pylint.
Line: 23
Column: 1
from tests_common import NewOpenCVTests, intersectionRate
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
Reported by Pylint.
Line: 23
Column: 1
from tests_common import NewOpenCVTests, intersectionRate
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
Reported by Pylint.
Line: 25
Column: 5
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
cascade = cv.CascadeClassifier(cascade_fn)
nested = cv.CascadeClassifier(nested_fn)
Reported by Pylint.
Line: 25
Column: 5
class facedetect_test(NewOpenCVTests):
def test_facedetect(self):
cascade_fn = self.repoPath + '/data/haarcascades/haarcascade_frontalface_alt.xml'
nested_fn = self.repoPath + '/data/haarcascades/haarcascade_eye.xml'
cascade = cv.CascadeClassifier(cascade_fn)
nested = cv.CascadeClassifier(nested_fn)
Reported by Pylint.
samples/dnn/dnn_model_runner/dnn_conversion/tf/tf_model.py
19 issues
Line: 1
Column: 1
import cv2
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "TF"
MODEL_FORMAT = ".pb"
Reported by Pylint.
Line: 2
Column: 1
import cv2
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "TF"
MODEL_FORMAT = ".pb"
Reported by Pylint.
Line: 3
Column: 1
import cv2
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "TF"
MODEL_FORMAT = ".pb"
Reported by Pylint.
Line: 5
Column: 1
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "TF"
MODEL_FORMAT = ".pb"
Reported by Pylint.
Line: 6
Column: 1
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "TF"
MODEL_FORMAT = ".pb"
Reported by Pylint.
Line: 40
Column: 42
def _set_dnn_model(self):
if not self._is_ready_to_transfer_graph:
# get model TF graph
tf_model_graph = tf.function(lambda x: self._original_model(x))
tf_model_graph = tf_model_graph.get_concrete_function(
tf.TensorSpec(self._original_model.inputs[0].shape, self._original_model.inputs[0].dtype))
# obtain frozen concrete function
Reported by Pylint.
Line: 1
Column: 1
import cv2
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
from ..common.abstract_model import AbstractModel, Framework
from ..common.utils import DNN_LIB, get_full_model_path
CURRENT_LIB = "TF"
MODEL_FORMAT = ".pb"
Reported by Pylint.
Line: 12
Column: 1
MODEL_FORMAT = ".pb"
class TFModelPreparer(AbstractModel):
""" Class for the preparation of the TF models: original and converted OpenCV Net.
Args:
model_name: TF model name
original_model: TF configured model object or session
Reported by Pylint.
Line: 43
Column: 1
tf_model_graph = tf.function(lambda x: self._original_model(x))
tf_model_graph = tf_model_graph.get_concrete_function(
tf.TensorSpec(self._original_model.inputs[0].shape, self._original_model.inputs[0].dtype))
# obtain frozen concrete function
frozen_tf_func = convert_variables_to_constants_v2(tf_model_graph)
frozen_tf_func.graph.as_graph_def()
Reported by Pylint.
Line: 77
Column: 5
return model_paths_dict
def get_prepared_models(self):
original_lib_name = CURRENT_LIB + " " + self._model_name
configured_model_dict = {
original_lib_name: self._original_model,
DNN_LIB + " " + self._model_name: self._dnn_model
}
Reported by Pylint.
samples/dnn/action_recognition.py
19 issues
Line: 3
Column: 1
import os
import numpy as np
import cv2 as cv
import argparse
from common import findFile
parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.')
Reported by Pylint.
Line: 1
Column: 1
import os
import numpy as np
import cv2 as cv
import argparse
from common import findFile
parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.')
Reported by Pylint.
Line: 1
Column: 1
import os
import numpy as np
import cv2 as cv
import argparse
from common import findFile
parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.')
Reported by Pylint.
Line: 4
Column: 1
import os
import numpy as np
import cv2 as cv
import argparse
from common import findFile
parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.')
Reported by Pylint.
Line: 7
Column: 1
import argparse
from common import findFile
parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', required=True, help='Path to model.')
parser.add_argument('--classes', default=findFile('action_recongnition_kinetics.txt'), help='Path to classes list.')
Reported by Pylint.
Line: 9
Column: 1
parser = argparse.ArgumentParser(description='Use this script to run action recognition using 3D ResNet34',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', required=True, help='Path to model.')
parser.add_argument('--classes', default=findFile('action_recongnition_kinetics.txt'), help='Path to classes list.')
# To get net download original repository https://github.com/kenshohara/video-classification-3d-cnn-pytorch
# For correct ONNX export modify file: video-classification-3d-cnn-pytorch/models/resnet.py
Reported by Pylint.
Line: 11
Column: 1
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', help='Path to input video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', required=True, help='Path to model.')
parser.add_argument('--classes', default=findFile('action_recongnition_kinetics.txt'), help='Path to classes list.')
# To get net download original repository https://github.com/kenshohara/video-classification-3d-cnn-pytorch
# For correct ONNX export modify file: video-classification-3d-cnn-pytorch/models/resnet.py
# change
# - def downsample_basic_block(x, planes, stride):
Reported by Pylint.
Line: 13
Column: 1
parser.add_argument('--model', required=True, help='Path to model.')
parser.add_argument('--classes', default=findFile('action_recongnition_kinetics.txt'), help='Path to classes list.')
# To get net download original repository https://github.com/kenshohara/video-classification-3d-cnn-pytorch
# For correct ONNX export modify file: video-classification-3d-cnn-pytorch/models/resnet.py
# change
# - def downsample_basic_block(x, planes, stride):
# - out = F.avg_pool3d(x, kernel_size=1, stride=stride)
# - zero_pads = torch.Tensor(out.size(0), planes - out.size(1),
Reported by Pylint.
Line: 35
Column: 1
# To ONNX export use torch.onnx.export(model, inputs, model_name)
def get_class_names(path):
class_names = []
with open(path) as f:
for row in f:
class_names.append(row[:-1])
return class_names
Reported by Pylint.
Line: 37
Column: 24
def get_class_names(path):
class_names = []
with open(path) as f:
for row in f:
class_names.append(row[:-1])
return class_names
def classify_video(video_path, net_path):
Reported by Pylint.