Merge remote-tracking branch 'refs/remotes/upstream/master'
This commit is contained in:
commit
3a4d9a77b2
@ -59,7 +59,7 @@ denotes they are the parameters of possible lines in the image. (Image courtesy:
|
||||
|
||||

|
||||
|
||||
Hough Tranform in OpenCV
|
||||
Hough Transform in OpenCV
|
||||
=========================
|
||||
|
||||
Everything explained above is encapsulated in the OpenCV function, \*\*cv2.HoughLines()\*\*. It simply returns an array of :math:(rho,
|
||||
@ -78,7 +78,8 @@ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
||||
edges = cv2.Canny(gray,50,150,apertureSize = 3)
|
||||
|
||||
lines = cv2.HoughLines(edges,1,np.pi/180,200)
|
||||
for rho,theta in lines[0]:
|
||||
for line in lines:
|
||||
rho,theta = line[0]
|
||||
a = np.cos(theta)
|
||||
b = np.sin(theta)
|
||||
x0 = a*rho
|
||||
@ -123,10 +124,9 @@ import numpy as np
|
||||
img = cv2.imread('dave.jpg')
|
||||
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
|
||||
edges = cv2.Canny(gray,50,150,apertureSize = 3)
|
||||
minLineLength = 100
|
||||
maxLineGap = 10
|
||||
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)
|
||||
for x1,y1,x2,y2 in lines[0]:
|
||||
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=100,maxLineGap=10)
|
||||
for line in lines:
|
||||
x1,y1,x2,y2 = line[0]
|
||||
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
|
||||
|
||||
cv2.imwrite('houghlines5.jpg',img)
|
||||
|
||||
@ -327,9 +327,9 @@ struct HWFeatures
|
||||
if (cpufile >= 0)
|
||||
{
|
||||
Elf32_auxv_t auxv;
|
||||
const size_t size_auxv_t = sizeof(Elf32_auxv_t);
|
||||
const size_t size_auxv_t = sizeof(auxv);
|
||||
|
||||
while ((size_t)read(cpufile, &auxv, sizeof(Elf32_auxv_t)) == size_auxv_t)
|
||||
while ((size_t)read(cpufile, &auxv, size_auxv_t) == size_auxv_t)
|
||||
{
|
||||
if (auxv.a_type == AT_HWCAP)
|
||||
{
|
||||
|
||||
@ -136,7 +136,8 @@ enum { CAP_PROP_POS_MSEC =0,
|
||||
// Currently, these are supported through the libv4l interface only.
|
||||
enum { CAP_MODE_BGR = 0, // BGR24 (default)
|
||||
CAP_MODE_RGB = 1, // RGB24
|
||||
CAP_MODE_GRAY = 2 // Y8
|
||||
CAP_MODE_GRAY = 2, // Y8
|
||||
CAP_MODE_YUYV = 3 // YUYV
|
||||
};
|
||||
|
||||
|
||||
|
||||
@ -299,7 +299,8 @@ enum
|
||||
{
|
||||
CV_CAP_MODE_BGR = 0, // BGR24 (default)
|
||||
CV_CAP_MODE_RGB = 1, // RGB24
|
||||
CV_CAP_MODE_GRAY = 2 // Y8
|
||||
CV_CAP_MODE_GRAY = 2, // Y8
|
||||
CV_CAP_MODE_YUYV = 3 // YUYV
|
||||
};
|
||||
|
||||
enum
|
||||
|
||||
@ -646,6 +646,8 @@ static inline int channels_for_mode(int mode)
|
||||
switch(mode) {
|
||||
case CV_CAP_MODE_GRAY:
|
||||
return 1;
|
||||
case CV_CAP_MODE_YUYV:
|
||||
return 2;
|
||||
default:
|
||||
return 3;
|
||||
}
|
||||
@ -713,31 +715,26 @@ static int _capture_V4L2 (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
/* libv4l will convert from any format to V4L2_PIX_FMT_BGR24,
|
||||
V4L2_PIX_FMT_RGV24, or V4L2_PIX_FMT_YUV420 */
|
||||
unsigned int requestedPixelFormat;
|
||||
int width;
|
||||
int height;
|
||||
switch (capture->mode) {
|
||||
case CV_CAP_MODE_RGB:
|
||||
requestedPixelFormat = V4L2_PIX_FMT_RGB24;
|
||||
width = capture->width;
|
||||
height = capture->height;
|
||||
break;
|
||||
case CV_CAP_MODE_GRAY:
|
||||
requestedPixelFormat = V4L2_PIX_FMT_YUV420;
|
||||
width = capture->width;
|
||||
height = capture->height;
|
||||
break;
|
||||
case CV_CAP_MODE_YUYV:
|
||||
requestedPixelFormat = V4L2_PIX_FMT_YUYV;
|
||||
break;
|
||||
default:
|
||||
requestedPixelFormat = V4L2_PIX_FMT_BGR24;
|
||||
width = capture->width;
|
||||
height = capture->height;
|
||||
break;
|
||||
}
|
||||
CLEAR (capture->form);
|
||||
capture->form.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
|
||||
capture->form.fmt.pix.pixelformat = requestedPixelFormat;
|
||||
capture->form.fmt.pix.field = V4L2_FIELD_ANY;
|
||||
capture->form.fmt.pix.width = width;
|
||||
capture->form.fmt.pix.height = height;
|
||||
capture->form.fmt.pix.width = capture->width;
|
||||
capture->form.fmt.pix.height = capture->height;
|
||||
|
||||
if (-1 == xioctl (capture->deviceHandle, VIDIOC_S_FMT, &capture->form)) {
|
||||
fprintf(stderr, "VIDEOIO ERROR: libv4l unable to ioctl S_FMT\n");
|
||||
@ -949,6 +946,10 @@ static int _capture_V4L (CvCaptureCAM_V4L *capture, char *deviceName)
|
||||
requestedVideoPalette = VIDEO_PALETTE_YUV420;
|
||||
depth = 8;
|
||||
break;
|
||||
case CV_CAP_MODE_YUYV:
|
||||
requestedVideoPalette = VIDEO_PALETTE_YUYV;
|
||||
depth = 16;
|
||||
break;
|
||||
default:
|
||||
requestedVideoPalette = VIDEO_PALETTE_RGB24;
|
||||
depth = 24;
|
||||
@ -1319,6 +1320,7 @@ static IplImage* icvRetrieveFrameCAM_V4L( CvCaptureCAM_V4L* capture, int) {
|
||||
switch(capture->imageProperties.palette) {
|
||||
case VIDEO_PALETTE_RGB24:
|
||||
case VIDEO_PALETTE_YUV420:
|
||||
case VIDEO_PALETTE_YUYV:
|
||||
memcpy((char *)capture->frame.imageData,
|
||||
(char *)(capture->memoryMap + capture->memoryBuffer.offsets[capture->bufferIndex]),
|
||||
capture->frame.imageSize);
|
||||
@ -1464,6 +1466,10 @@ static int icvSetVideoSize( CvCaptureCAM_V4L* capture, int w, int h) {
|
||||
cropHeight = h*8;
|
||||
cropWidth = w*8;
|
||||
break;
|
||||
case CV_CAP_MODE_YUYV:
|
||||
cropHeight = h*16;
|
||||
cropWidth = w*16;
|
||||
break;
|
||||
default:
|
||||
cropHeight = h*24;
|
||||
cropWidth = w*24;
|
||||
@ -1719,6 +1725,7 @@ static int icvSetPropertyCAM_V4L(CvCaptureCAM_V4L* capture, int property_id, dou
|
||||
case CV_CAP_MODE_BGR:
|
||||
case CV_CAP_MODE_RGB:
|
||||
case CV_CAP_MODE_GRAY:
|
||||
case CV_CAP_MODE_YUYV:
|
||||
capture->mode = mode;
|
||||
/* recreate the capture buffer for the same output resolution
|
||||
but a different pixel format */
|
||||
|
||||
@ -18,23 +18,25 @@ src = cv2.imread(fn)
|
||||
dst = cv2.Canny(src, 50, 200)
|
||||
cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
|
||||
|
||||
# HoughLines()
|
||||
# lines = cv2.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0)
|
||||
# a,b,c = lines.shape
|
||||
# for i in range(b):
|
||||
# rho = lines[0][i][0]
|
||||
# theta = lines[0][i][1]
|
||||
# a = math.cos(theta)
|
||||
# b = math.sin(theta)
|
||||
# x0, y0 = a*rho, b*rho
|
||||
# pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
|
||||
# pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
|
||||
# cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
|
||||
if True: # HoughLinesP
|
||||
lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 40, np.array([]), 50, 10)
|
||||
a,b,c = lines.shape
|
||||
for i in range(a):
|
||||
cv2.line(cdst, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA)
|
||||
|
||||
else: # HoughLines
|
||||
lines = cv2.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0)
|
||||
a,b,c = lines.shape
|
||||
for i in range(a):
|
||||
rho = lines[i][0][0]
|
||||
theta = lines[i][0][1]
|
||||
a = math.cos(theta)
|
||||
b = math.sin(theta)
|
||||
x0, y0 = a*rho, b*rho
|
||||
pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
|
||||
pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
|
||||
cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
|
||||
|
||||
lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 50, np.array([]), 50, 10)
|
||||
a,b,c = lines.shape
|
||||
for i in range(b):
|
||||
cv2.line(cdst, (lines[0][i][0], lines[0][i][1]), (lines[0][i][2], lines[0][i][3]), (0, 0, 255), 3, cv2.LINE_AA)
|
||||
|
||||
cv2.imshow("source", src)
|
||||
cv2.imshow("detected lines", cdst)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user