The following issues were found
samples/dnn/object_detection.py
81 issues
Line: 1
Column: 1
import cv2 as cv
import argparse
import numpy as np
import sys
import time
from threading import Thread
if sys.version_info[0] == 2:
import Queue as queue
else:
Reported by Pylint.
Line: 8
Column: 5
import time
from threading import Thread
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
from common import *
from tf_text_graph_common import readTextMessage
Reported by Pylint.
Line: 12
Column: 1
else:
import queue
from common import *
from tf_text_graph_common import readTextMessage
from tf_text_graph_ssd import createSSDGraph
from tf_text_graph_faster_rcnn import createFasterRCNNGraph
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
Reported by Pylint.
Line: 12
Column: 1
else:
import queue
from common import *
from tf_text_graph_common import readTextMessage
from tf_text_graph_ssd import createSSDGraph
from tf_text_graph_faster_rcnn import createFasterRCNNGraph
backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV,
Reported by Pylint.
Line: 97
Column: 24
confThreshold = args.thr
nmsThreshold = args.nms
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
def drawPred(classId, conf, left, top, right, bottom):
# Draw a bounding box.
Reported by Pylint.
Line: 97
Column: 17
confThreshold = args.thr
nmsThreshold = args.nms
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
def drawPred(classId, conf, left, top, right, bottom):
# Draw a bounding box.
Reported by Pylint.
Line: 105
Column: 9
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))
label = '%.2f' % conf
# Print a label of class.
if classes:
assert(classId < len(classes))
label = '%s: %s' % (classes[classId], label)
Reported by Pylint.
Line: 202
Column: 5
cv.namedWindow(winName, cv.WINDOW_NORMAL)
def callback(pos):
global confThreshold
confThreshold = pos / 100.0
cv.createTrackbar('Confidence threshold, %', winName, int(confThreshold * 100), 99, callback)
cap = cv.VideoCapture(cv.samples.findFileOrKeep(args.input) if args.input else 0)
Reported by Pylint.
Line: 232
Column: 5
#
framesQueue = QueueFPS()
def framesThreadBody():
global framesQueue, process
while process:
hasFrame, frame = cap.read()
if not hasFrame:
break
Reported by Pylint.
Line: 235
Column: 19
global framesQueue, process
while process:
hasFrame, frame = cap.read()
if not hasFrame:
break
framesQueue.put(frame)
Reported by Pylint.
modules/java/generator/android/java/org/opencv/android/JavaCameraView.java
79 issues
Line: 30
* When frame is delivered via callback from Camera - it processed via OpenCV to be
* converted to RGBA32 and then passed to the external callback for modifications if required.
*/
public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallback {
private static final int MAGIC_TEXTURE_ID = 10;
private static final String TAG = "JavaCameraView";
private byte mBuffer[];
Reported by PMD.
Line: 30
* When frame is delivered via callback from Camera - it processed via OpenCV to be
* converted to RGBA32 and then passed to the external callback for modifications if required.
*/
public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallback {
private static final int MAGIC_TEXTURE_ID = 10;
private static final String TAG = "JavaCameraView";
private byte mBuffer[];
Reported by PMD.
Line: 35
private static final int MAGIC_TEXTURE_ID = 10;
private static final String TAG = "JavaCameraView";
private byte mBuffer[];
private Mat[] mFrameChain;
private int mChainIdx = 0;
private Thread mThread;
private boolean mStopThread;
Reported by PMD.
Line: 36
private static final String TAG = "JavaCameraView";
private byte mBuffer[];
private Mat[] mFrameChain;
private int mChainIdx = 0;
private Thread mThread;
private boolean mStopThread;
protected Camera mCamera;
Reported by PMD.
Line: 37
private byte mBuffer[];
private Mat[] mFrameChain;
private int mChainIdx = 0;
private Thread mThread;
private boolean mStopThread;
protected Camera mCamera;
protected JavaCameraFrame[] mCameraFrame;
Reported by PMD.
Line: 37
private byte mBuffer[];
private Mat[] mFrameChain;
private int mChainIdx = 0;
private Thread mThread;
private boolean mStopThread;
protected Camera mCamera;
protected JavaCameraFrame[] mCameraFrame;
Reported by PMD.
Line: 38
private byte mBuffer[];
private Mat[] mFrameChain;
private int mChainIdx = 0;
private Thread mThread;
private boolean mStopThread;
protected Camera mCamera;
protected JavaCameraFrame[] mCameraFrame;
private SurfaceTexture mSurfaceTexture;
Reported by PMD.
Line: 39
private Mat[] mFrameChain;
private int mChainIdx = 0;
private Thread mThread;
private boolean mStopThread;
protected Camera mCamera;
protected JavaCameraFrame[] mCameraFrame;
private SurfaceTexture mSurfaceTexture;
private int mPreviewFormat = ImageFormat.NV21;
Reported by PMD.
Line: 41
private Thread mThread;
private boolean mStopThread;
protected Camera mCamera;
protected JavaCameraFrame[] mCameraFrame;
private SurfaceTexture mSurfaceTexture;
private int mPreviewFormat = ImageFormat.NV21;
public static class JavaCameraSizeAccessor implements ListItemAccessor {
Reported by PMD.
Line: 42
private boolean mStopThread;
protected Camera mCamera;
protected JavaCameraFrame[] mCameraFrame;
private SurfaceTexture mSurfaceTexture;
private int mPreviewFormat = ImageFormat.NV21;
public static class JavaCameraSizeAccessor implements ListItemAccessor {
Reported by PMD.
modules/java/generator/android-21/java/org/opencv/android/CameraGLRendererBase.java
75 issues
Line: 21
import android.view.View;
@TargetApi(15)
public abstract class CameraGLRendererBase implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
protected final String LOGTAG = "CameraGLRendererBase";
// shaders
private final String vss = ""
Reported by PMD.
Line: 21
import android.view.View;
@TargetApi(15)
public abstract class CameraGLRendererBase implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
protected final String LOGTAG = "CameraGLRendererBase";
// shaders
private final String vss = ""
Reported by PMD.
Line: 23
@TargetApi(15)
public abstract class CameraGLRendererBase implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
protected final String LOGTAG = "CameraGLRendererBase";
// shaders
private final String vss = ""
+ "attribute vec2 vPosition;\n"
+ "attribute vec2 vTexCoord;\n" + "varying vec2 texCoord;\n"
Reported by PMD.
Line: 23
@TargetApi(15)
public abstract class CameraGLRendererBase implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
protected final String LOGTAG = "CameraGLRendererBase";
// shaders
private final String vss = ""
+ "attribute vec2 vPosition;\n"
+ "attribute vec2 vTexCoord;\n" + "varying vec2 texCoord;\n"
Reported by PMD.
Line: 23
@TargetApi(15)
public abstract class CameraGLRendererBase implements GLSurfaceView.Renderer, SurfaceTexture.OnFrameAvailableListener {
protected final String LOGTAG = "CameraGLRendererBase";
// shaders
private final String vss = ""
+ "attribute vec2 vPosition;\n"
+ "attribute vec2 vTexCoord;\n" + "varying vec2 texCoord;\n"
Reported by PMD.
Line: 26
protected final String LOGTAG = "CameraGLRendererBase";
// shaders
private final String vss = ""
+ "attribute vec2 vPosition;\n"
+ "attribute vec2 vTexCoord;\n" + "varying vec2 texCoord;\n"
+ "void main() {\n" + " texCoord = vTexCoord;\n"
+ " gl_Position = vec4 ( vPosition.x, vPosition.y, 0.0, 1.0 );\n"
+ "}";
Reported by PMD.
Line: 26
protected final String LOGTAG = "CameraGLRendererBase";
// shaders
private final String vss = ""
+ "attribute vec2 vPosition;\n"
+ "attribute vec2 vTexCoord;\n" + "varying vec2 texCoord;\n"
+ "void main() {\n" + " texCoord = vTexCoord;\n"
+ " gl_Position = vec4 ( vPosition.x, vPosition.y, 0.0, 1.0 );\n"
+ "}";
Reported by PMD.
Line: 33
+ " gl_Position = vec4 ( vPosition.x, vPosition.y, 0.0, 1.0 );\n"
+ "}";
private final String fssOES = ""
+ "#extension GL_OES_EGL_image_external : require\n"
+ "precision mediump float;\n"
+ "uniform samplerExternalOES sTexture;\n"
+ "varying vec2 texCoord;\n"
+ "void main() {\n"
Reported by PMD.
Line: 33
+ " gl_Position = vec4 ( vPosition.x, vPosition.y, 0.0, 1.0 );\n"
+ "}";
private final String fssOES = ""
+ "#extension GL_OES_EGL_image_external : require\n"
+ "precision mediump float;\n"
+ "uniform samplerExternalOES sTexture;\n"
+ "varying vec2 texCoord;\n"
+ "void main() {\n"
Reported by PMD.
Line: 41
+ "void main() {\n"
+ " gl_FragColor = texture2D(sTexture,texCoord);\n" + "}";
private final String fss2D = ""
+ "precision mediump float;\n"
+ "uniform sampler2D sTexture;\n"
+ "varying vec2 texCoord;\n"
+ "void main() {\n"
+ " gl_FragColor = texture2D(sTexture,texCoord);\n" + "}";
Reported by PMD.
samples/python/mouse_and_match.py
75 issues
Line: 18
Column: 1
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
import os
import sys
import glob
Reported by Pylint.
Line: 22
Column: 1
# built-in modules
import os
import sys
import glob
import argparse
from math import *
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
Line: 25
Column: 1
import sys
import glob
import argparse
from math import *
class App():
drag_start = None
sel = (0,0,0,0)
Reported by Pylint.
modules/gapi/misc/python/test/test_gapi_render.py
72 issues
Line: 4
Column: 1
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import os
import sys
import unittest
from tests_common import NewOpenCVTests
Reported by Pylint.
Line: 9
Column: 1
import sys
import unittest
from tests_common import NewOpenCVTests
try:
if sys.version_info[:2] < (3, 0):
raise unittest.SkipTest('Python 2.x is not supported')
Reported by Pylint.
Line: 221
Column: 9
def setUp(self):
self.skipTest('Skip tests: ' + message)
def test_skip():
pass
pass
if __name__ == '__main__':
Reported by Pylint.
Line: 16
Column: 3
if sys.version_info[:2] < (3, 0):
raise unittest.SkipTest('Python 2.x is not supported')
# FIXME: FText isn't supported yet.
class gapi_render_test(NewOpenCVTests):
def __init__(self, *args):
super().__init__(*args)
self.size = (300, 300, 3)
Reported by Pylint.
Line: 95
Column: 1
return y_out, uv_out
def cvt_bgr_to_yuv_color(self, bgr):
y = bgr[2] * 0.299000 + bgr[1] * 0.587000 + bgr[0] * 0.114000;
u = bgr[2] * -0.168736 + bgr[1] * -0.331264 + bgr[0] * 0.500000 + 128;
v = bgr[2] * 0.500000 + bgr[1] * -0.418688 + bgr[0] * -0.081312 + 128;
return (y, u, v)
def blend_img(self, background, org, img, alpha):
Reported by Pylint.
Line: 96
Column: 1
def cvt_bgr_to_yuv_color(self, bgr):
y = bgr[2] * 0.299000 + bgr[1] * 0.587000 + bgr[0] * 0.114000;
u = bgr[2] * -0.168736 + bgr[1] * -0.331264 + bgr[0] * 0.500000 + 128;
v = bgr[2] * 0.500000 + bgr[1] * -0.418688 + bgr[0] * -0.081312 + 128;
return (y, u, v)
def blend_img(self, background, org, img, alpha):
x, y = org
Reported by Pylint.
Line: 97
Column: 1
def cvt_bgr_to_yuv_color(self, bgr):
y = bgr[2] * 0.299000 + bgr[1] * 0.587000 + bgr[0] * 0.114000;
u = bgr[2] * -0.168736 + bgr[1] * -0.331264 + bgr[0] * 0.500000 + 128;
v = bgr[2] * 0.500000 + bgr[1] * -0.418688 + bgr[0] * -0.081312 + 128;
return (y, u, v)
def blend_img(self, background, org, img, alpha):
x, y = org
h, w, _ = img.shape
Reported by Pylint.
Line: 116
Column: 50
# This is quite naive implementations used as a simple reference
# doesn't consider corner cases.
def draw_mosaic(self, img, mos, cell_sz, decim):
x,y,w,h = mos
mosaic_area = img[x:x+w, y:y+h, :]
for i in range(0, mosaic_area.shape[0], cell_sz):
for j in range(0, mosaic_area.shape[1], cell_sz):
cell_roi = mosaic_area[j:j+cell_sz, i:i+cell_sz, :]
Reported by Pylint.
Line: 224
Column: 5
def test_skip():
pass
pass
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python
import numpy as np
import cv2 as cv
import os
import sys
import unittest
from tests_common import NewOpenCVTests
Reported by Pylint.
modules/core/misc/java/src/java/core+Mat.java
72 issues
Line: 1145
} else if (clazz == Short.class || clazz == short.class) {
return (Atable<T>)new AtableShort(this, row, col);
} else {
throw new RuntimeException("Unsupported class type");
}
}
// javadoc:Mat::at(clazz, idx)
@SuppressWarnings("unchecked")
Reported by PMD.
Line: 1163
} else if (clazz == Short.class || clazz == short.class) {
return (Atable<T>)new AtableShort(this, idx);
} else {
throw new RuntimeException("Unsupported class parameter");
}
}
public static class Tuple2<T> {
public Tuple2(T _0, T _1) {
Reported by PMD.
Line: 187
//
// javadoc: Mat::clone()
public Mat clone() {
return new Mat(n_clone(nativeObj));
}
//
// C++: Mat Mat::col(int x)
Reported by PMD.
Line: 1
package org.opencv.core;
import java.nio.ByteBuffer;
// C++: class Mat
//javadoc: Mat
public class Mat {
public final long nativeObj;
Reported by PMD.
Line: 7
// C++: class Mat
//javadoc: Mat
public class Mat {
public final long nativeObj;
public Mat(long addr) {
if (addr == 0)
Reported by PMD.
Line: 7
// C++: class Mat
//javadoc: Mat
public class Mat {
public final long nativeObj;
public Mat(long addr) {
if (addr == 0)
Reported by PMD.
Line: 7
// C++: class Mat
//javadoc: Mat
public class Mat {
public final long nativeObj;
public Mat(long addr) {
if (addr == 0)
Reported by PMD.
Line: 7
// C++: class Mat
//javadoc: Mat
public class Mat {
public final long nativeObj;
public Mat(long addr) {
if (addr == 0)
Reported by PMD.
Line: 9
//javadoc: Mat
public class Mat {
public final long nativeObj;
public Mat(long addr) {
if (addr == 0)
throw new UnsupportedOperationException("Native object address is NULL");
nativeObj = addr;
Reported by PMD.
Line: 187
//
// javadoc: Mat::clone()
public Mat clone() {
return new Mat(n_clone(nativeObj));
}
//
// C++: Mat Mat::col(int x)
Reported by PMD.
modules/ts/misc/run_suite.py
70 issues
Line: 147
Column: 72
env = {}
if not self.options.valgrind and self.options.trace:
env['OPENCV_TRACE'] = '1'
env['OPENCV_TRACE_LOCATION'] = 'OpenCVTrace-{}'.format(self.getLogBaseName(exe))
env['OPENCV_TRACE_SYNC_OPENCL'] = '1'
tempDir = TempEnvDir('OPENCV_TEMP_PATH', "__opencv_temp.")
tempDir.init()
cmd = self.wrapCommand(module, [exe] + args, env)
log.warning("Run: %s" % " ".join(cmd))
Reported by Pylint.
Line: 10
Column: 40
class TestSuite(object):
def __init__(self, options, cache, id):
self.options = options
self.cache = cache
self.nameprefix = "opencv_" + self.options.mode + "_"
self.tests = self.cache.gatherTests(self.nameprefix + "*", self.isTest)
self.id = id
Reported by Pylint.
Line: 64
Column: 9
for t in self.tests:
if name in self.getAliases(t):
return t
raise Err("Can not find test: %s", name)
def getTestList(self, white, black):
res = [t for t in white or self.tests if self.getAlias(t) not in black]
if len(res) == 0:
raise Err("No tests found")
Reported by Pylint.
Line: 107
Column: 9
try:
if 0 == execute(cmd, cwd=workingDir):
return True
except:
pass
return False
def runTest(self, module, path, logfile, workingDir, args=[]):
args = args[:]
Reported by Pylint.
Line: 111
Column: 5
pass
return False
def runTest(self, module, path, logfile, workingDir, args=[]):
args = args[:]
exe = os.path.abspath(path)
if module == "java":
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type]
if self.options.package:
Reported by Pylint.
Line: 152
Column: 13
tempDir = TempEnvDir('OPENCV_TEMP_PATH', "__opencv_temp.")
tempDir.init()
cmd = self.wrapCommand(module, [exe] + args, env)
log.warning("Run: %s" % " ".join(cmd))
ret = execute(cmd, cwd=workingDir, env=env)
try:
if not self.options.valgrind and self.options.trace and int(self.options.trace_dump) >= 0:
import trace_profiler
trace = trace_profiler.Trace(env['OPENCV_TRACE_LOCATION']+'.txt')
Reported by Pylint.
Line: 160
Column: 13
trace = trace_profiler.Trace(env['OPENCV_TRACE_LOCATION']+'.txt')
trace.process()
trace.dump(max_entries=int(self.options.trace_dump))
except:
import traceback
traceback.print_exc()
pass
tempDir.clean()
hostlogpath = os.path.join(workingDir, logfile)
Reported by Pylint.
Line: 163
Column: 17
except:
import traceback
traceback.print_exc()
pass
tempDir.clean()
hostlogpath = os.path.join(workingDir, logfile)
if os.path.isfile(hostlogpath):
return hostlogpath, ret
return None, ret
Reported by Pylint.
Line: 170
Column: 5
return hostlogpath, ret
return None, ret
def runTests(self, tests, black, workingDir, args=[]):
args = args[:]
logs = []
test_list = self.getTestList(tests, black)
if len(test_list) != 1:
args = [a for a in args if not a.startswith("--gtest_output=")]
Reported by Pylint.
Line: 1
Column: 1
#!/usr/bin/env python
import os
import re
import sys
from run_utils import Err, log, execute, getPlatformVersion, isColorEnabled, TempEnvDir
from run_long import LONG_TESTS_DEBUG_VALGRIND, longTestFilter
class TestSuite(object):
Reported by Pylint.
samples/dnn/text_detection.py
67 issues
Line: 25
Column: 1
# Import required modules
import numpy as np
import cv2 as cv
import math
import argparse
############ Add argument parser for command line arguments ############
parser = argparse.ArgumentParser(
Reported by Pylint.
Line: 10
Column: 1
Using classes from here: https://github.com/meijieru/crnn.pytorch/blob/master/models/crnn.py
More converted onnx text recognition models can be downloaded directly here:
Download link: https://drive.google.com/drive/folders/1cTbQ3nuZG-EKWak6emD_s8_hHXWz7lAr?usp=sharing
And these models taken from here:https://github.com/clovaai/deep-text-recognition-benchmark
import torch
from models.crnn import CRNN
Reported by Pylint.
Line: 26
Column: 1
# Import required modules
import numpy as np
import cv2 as cv
import math
import argparse
############ Add argument parser for command line arguments ############
parser = argparse.ArgumentParser(
description="Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of "
Reported by Pylint.
Line: 27
Column: 1
import numpy as np
import cv2 as cv
import math
import argparse
############ Add argument parser for command line arguments ############
parser = argparse.ArgumentParser(
description="Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of "
"EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)"
Reported by Pylint.
Line: 31
Column: 1
############ Add argument parser for command line arguments ############
parser = argparse.ArgumentParser(
description="Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of "
"EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)"
"The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorch"
"Or you can download trained OCR model directly from https://drive.google.com/drive/folders/1cTbQ3nuZG-EKWak6emD_s8_hHXWz7lAr?usp=sharing")
parser.add_argument('--input',
help='Path to input image or video file. Skip this argument to capture frames from a camera.')
Reported by Pylint.
Line: 32
Column: 1
############ Add argument parser for command line arguments ############
parser = argparse.ArgumentParser(
description="Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of "
"EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)"
"The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorch"
"Or you can download trained OCR model directly from https://drive.google.com/drive/folders/1cTbQ3nuZG-EKWak6emD_s8_hHXWz7lAr?usp=sharing")
parser.add_argument('--input',
help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', '-m', required=True,
Reported by Pylint.
Line: 33
Column: 1
parser = argparse.ArgumentParser(
description="Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of "
"EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)"
"The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorch"
"Or you can download trained OCR model directly from https://drive.google.com/drive/folders/1cTbQ3nuZG-EKWak6emD_s8_hHXWz7lAr?usp=sharing")
parser.add_argument('--input',
help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', '-m', required=True,
help='Path to a binary .pb file contains trained detector network.')
Reported by Pylint.
Line: 34
Column: 1
description="Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of "
"EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)"
"The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorch"
"Or you can download trained OCR model directly from https://drive.google.com/drive/folders/1cTbQ3nuZG-EKWak6emD_s8_hHXWz7lAr?usp=sharing")
parser.add_argument('--input',
help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', '-m', required=True,
help='Path to a binary .pb file contains trained detector network.')
parser.add_argument('--ocr', default="crnn.onnx",
Reported by Pylint.
Line: 36
Column: 1
"The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorch"
"Or you can download trained OCR model directly from https://drive.google.com/drive/folders/1cTbQ3nuZG-EKWak6emD_s8_hHXWz7lAr?usp=sharing")
parser.add_argument('--input',
help='Path to input image or video file. Skip this argument to capture frames from a camera.')
parser.add_argument('--model', '-m', required=True,
help='Path to a binary .pb file contains trained detector network.')
parser.add_argument('--ocr', default="crnn.onnx",
help="Path to a binary .pb or .onnx file contains trained recognition network", )
parser.add_argument('--width', type=int, default=320,
Reported by Pylint.
Line: 40
Column: 1
parser.add_argument('--model', '-m', required=True,
help='Path to a binary .pb file contains trained detector network.')
parser.add_argument('--ocr', default="crnn.onnx",
help="Path to a binary .pb or .onnx file contains trained recognition network", )
parser.add_argument('--width', type=int, default=320,
help='Preprocess input image by resizing to a specific width. It should be multiple by 32.')
parser.add_argument('--height', type=int, default=320,
help='Preprocess input image by resizing to a specific height. It should be multiple by 32.')
parser.add_argument('--thr', type=float, default=0.5,
Reported by Pylint.
modules/java/generator/android-21/java/org/opencv/android/JavaCamera2View.java
67 issues
Line: 325
createCameraPreviewSession();
}
} catch (RuntimeException e) {
throw new RuntimeException("Interrupted while setCameraPreviewSize.", e);
}
return true;
}
private class JavaCamera2Frame implements CvCameraViewFrame {
Reported by PMD.
Line: 42
*/
@TargetApi(21)
public class JavaCamera2View extends CameraBridgeViewBase {
private static final String LOGTAG = "JavaCamera2View";
protected ImageReader mImageReader;
protected int mPreviewFormat = ImageFormat.YUV_420_888;
Reported by PMD.
Line: 42
*/
@TargetApi(21)
public class JavaCamera2View extends CameraBridgeViewBase {
private static final String LOGTAG = "JavaCamera2View";
protected ImageReader mImageReader;
protected int mPreviewFormat = ImageFormat.YUV_420_888;
Reported by PMD.
Line: 46
private static final String LOGTAG = "JavaCamera2View";
protected ImageReader mImageReader;
protected int mPreviewFormat = ImageFormat.YUV_420_888;
protected CameraDevice mCameraDevice;
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
Reported by PMD.
Line: 47
private static final String LOGTAG = "JavaCamera2View";
protected ImageReader mImageReader;
protected int mPreviewFormat = ImageFormat.YUV_420_888;
protected CameraDevice mCameraDevice;
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
protected String mCameraID;
Reported by PMD.
Line: 49
protected ImageReader mImageReader;
protected int mPreviewFormat = ImageFormat.YUV_420_888;
protected CameraDevice mCameraDevice;
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
protected String mCameraID;
protected android.util.Size mPreviewSize = new android.util.Size(-1, -1);
Reported by PMD.
Line: 50
protected int mPreviewFormat = ImageFormat.YUV_420_888;
protected CameraDevice mCameraDevice;
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
protected String mCameraID;
protected android.util.Size mPreviewSize = new android.util.Size(-1, -1);
private HandlerThread mBackgroundThread;
Reported by PMD.
Line: 51
protected CameraDevice mCameraDevice;
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
protected String mCameraID;
protected android.util.Size mPreviewSize = new android.util.Size(-1, -1);
private HandlerThread mBackgroundThread;
protected Handler mBackgroundHandler;
Reported by PMD.
Line: 52
protected CameraDevice mCameraDevice;
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
protected String mCameraID;
protected android.util.Size mPreviewSize = new android.util.Size(-1, -1);
private HandlerThread mBackgroundThread;
protected Handler mBackgroundHandler;
Reported by PMD.
Line: 53
protected CameraCaptureSession mCaptureSession;
protected CaptureRequest.Builder mPreviewRequestBuilder;
protected String mCameraID;
protected android.util.Size mPreviewSize = new android.util.Size(-1, -1);
private HandlerThread mBackgroundThread;
protected Handler mBackgroundHandler;
public JavaCamera2View(Context context, int cameraId) {
Reported by PMD.
modules/objdetect/misc/java/test/HOGDescriptorTest.java
66 issues
Line: 1
package org.opencv.test.objdetect;
import org.opencv.objdetect.HOGDescriptor;
import org.opencv.test.OpenCVTestCase;
public class HOGDescriptorTest extends OpenCVTestCase {
public void testCheckDetectorSize() {
fail("Not yet implemented");
Reported by PMD.
Line: 6
import org.opencv.objdetect.HOGDescriptor;
import org.opencv.test.OpenCVTestCase;
public class HOGDescriptorTest extends OpenCVTestCase {
public void testCheckDetectorSize() {
fail("Not yet implemented");
}
Reported by PMD.
Line: 8
public class HOGDescriptorTest extends OpenCVTestCase {
public void testCheckDetectorSize() {
fail("Not yet implemented");
}
public void testComputeGradientMatMatMat() {
fail("Not yet implemented");
Reported by PMD.
Line: 9
public class HOGDescriptorTest extends OpenCVTestCase {
public void testCheckDetectorSize() {
fail("Not yet implemented");
}
public void testComputeGradientMatMatMat() {
fail("Not yet implemented");
}
Reported by PMD.
Line: 12
fail("Not yet implemented");
}
public void testComputeGradientMatMatMat() {
fail("Not yet implemented");
}
public void testComputeGradientMatMatMatSize() {
fail("Not yet implemented");
Reported by PMD.
Line: 16
fail("Not yet implemented");
}
public void testComputeGradientMatMatMatSize() {
fail("Not yet implemented");
}
public void testComputeGradientMatMatMatSizeSize() {
fail("Not yet implemented");
Reported by PMD.
Line: 20
fail("Not yet implemented");
}
public void testComputeGradientMatMatMatSizeSize() {
fail("Not yet implemented");
}
public void testComputeMatListOfFloat() {
fail("Not yet implemented");
Reported by PMD.
Line: 24
fail("Not yet implemented");
}
public void testComputeMatListOfFloat() {
fail("Not yet implemented");
}
public void testComputeMatListOfFloatSize() {
fail("Not yet implemented");
Reported by PMD.
Line: 28
fail("Not yet implemented");
}
public void testComputeMatListOfFloatSize() {
fail("Not yet implemented");
}
public void testComputeMatListOfFloatSizeSize() {
fail("Not yet implemented");
Reported by PMD.
Line: 32
fail("Not yet implemented");
}
public void testComputeMatListOfFloatSizeSize() {
fail("Not yet implemented");
}
public void testComputeMatListOfFloatSizeSizeListOfPoint() {
fail("Not yet implemented");
Reported by PMD.