【深度学习】使用CNN进行车牌识别并搭建简单GUI

主要参考博客:https://blog.csdn.net/GK_2014/article/details/84779166

主体算法没有修改,这里添加了通过H(色调)和S(饱和度)来对车牌颜色进行判断,然后使用tkinter搭建了简单的GUI,可以实现打开摄像头拍摄照片然后再对照片进行识别。

界面如下:

carPlateIdentity.py代码如下,添加了一些注释:

  1 import cv2
  2 import os
  3 import sys
  4 import numpy as np
  5 import tensorflow as tf
  6 
  7 config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
  8 sess = tf.Session(config=config)
  9 
 10 char_table = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
 11               'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '', '', '', '', '',
 12               '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
 13               '', '', '', '', '', '', '', '', '']
 14 
 15 def hist_image(img):
 16     assert img.ndim==2
 17     hist = [0 for i in range(256)]
 18     img_h,img_w = img.shape[0],img.shape[1]
 19 
 20     for row in range(img_h):
 21         for col in range(img_w):
 22             hist[img[row,col]] += 1
 23     p = [hist[n]/(img_w*img_h) for n in range(256)]
 24     p1 = np.cumsum(p)
 25     for row in range(img_h):
 26         for col in range(img_w):
 27             v = img[row,col]
 28             img[row,col] = p1[v]*255
 29     return img
 30 
 31 def find_board_area(img):
 32     assert img.ndim==2
 33     img_h,img_w = img.shape[0],img.shape[1]
 34     top,bottom,left,right = 0,img_h,0,img_w
 35     flag = False
 36     h_proj = [0 for i in range(img_h)]
 37     v_proj = [0 for i in range(img_w)]
 38 
 39     for row in range(round(img_h*0.5),round(img_h*0.8),3):
 40         for col in range(img_w):
 41             if img[row,col]==255:
 42                 h_proj[row] += 1
 43         if flag==False and h_proj[row]>12:
 44             flag = True
 45             top = row
 46         if flag==True and row>top+8 and h_proj[row]<12:
 47             bottom = row
 48             flag = False
 49 
 50     for col in range(round(img_w*0.3),img_w,1):
 51         for row in range(top,bottom,1):
 52             if img[row,col]==255:
 53                 v_proj[col] += 1
 54         if flag==False and (v_proj[col]>10 or v_proj[col]-v_proj[col-1]>5):
 55             left = col
 56             break
 57     return left,top,120,bottom-top-10
 58 
 59 def verify_scale(rotate_rect):
 60    error = 0.4
 61    aspect = 4#4.7272
 62    min_area = 10*(10*aspect)#min_area=10*(10*4)=400
 63    max_area = 150*(150*aspect)#max_area=150*(150*4)=90000
 64    min_aspect = aspect*(1-error)#min_aspect=4*(1-0.4)=2.4
 65    max_aspect = aspect*(1+error)#max_aspect=4*(1+0.4)=6.4
 66    theta = 30
 67 
 68    # 宽或高为0,不满足矩形直接返回False
 69    if rotate_rect[1][0]==0 or rotate_rect[1][1]==0:    
 70        return False
 71    '''
 72    rotate_rect[0]为外接矩形的中心坐标(x,y);[1][0]为宽,[1][1]为高,[2]为旋转角度.
 73    旋转角度θ是水平轴(x轴)逆时针旋转,直到碰到矩形的第一条边停住,此时该边与水平轴的夹角。并且这个边的边长是width,另一条边边长是height
 74    在opencv中,坐标系原点在左上角,相对于x轴,逆时针旋转角度为负,顺时针旋转角度为正。所以,θ∈(-90度,0]
 75    '''
 76 
 77    r = rotate_rect[1][0]/rotate_rect[1][1]#r=宽除以高
 78    r = max(r,1/r)
 79    area = rotate_rect[1][0]*rotate_rect[1][1]#area为实际面积
 80    if area>min_area and area<max_area and r>min_aspect and r<max_aspect:#如果实际面积大于最小面积且小于最大面积,并且2.4<r<6.4
 81        # 矩形的倾斜角度不超过theta
 82        if ((rotate_rect[1][0] < rotate_rect[1][1] and rotate_rect[2] >= -90 and rotate_rect[2] < -(90 - theta)) or#旋转角度在[-90,-60)
 83                (rotate_rect[1][1] < rotate_rect[1][0] and rotate_rect[2] > -theta and rotate_rect[2] <= 0)):#旋转角度在(-30,0]
 84            return True
 85    return False
 86 
 87 def img_Transform(car_rect,image):#传入填充掩膜后的最小矩形,原图
 88     img_h,img_w = image.shape[:2]
 89     rect_w,rect_h = car_rect[1][0],car_rect[1][1]
 90     angle = car_rect[2]
 91 
 92     return_flag = False
 93     if car_rect[2]==0:#旋转角度为0
 94         return_flag = True
 95     if car_rect[2]==-90 and rect_w<rect_h:#旋转角度=-90并且矩形的宽<高
 96         rect_w, rect_h = rect_h, rect_w
 97         return_flag = True
 98     if return_flag:
 99         car_img = image[int(car_rect[0][1]-rect_h/2):int(car_rect[0][1]+rect_h/2),
100                   int(car_rect[0][0]-rect_w/2):int(car_rect[0][0]+rect_w/2)]
101         return car_img
102 
103     car_rect = (car_rect[0],(rect_w,rect_h),angle)
104     box = cv2.boxPoints(car_rect)#获取矩形的四个顶点坐标
105 
106     heigth_point = right_point = [0,0]
107     left_point = low_point = [car_rect[0][0], car_rect[0][1]]#矩形中心点坐标(x,y)
108     for point in box:
109         if left_point[0] > point[0]:
110             left_point = point
111         if low_point[1] > point[1]:
112             low_point = point
113         if heigth_point[1] < point[1]:
114             heigth_point = point
115         if right_point[0] < point[0]:
116             right_point = point
117 
118     if left_point[1] <= right_point[1]:  # 正角度
119         new_right_point = [right_point[0], heigth_point[1]]
120         pts1 = np.float32([left_point, heigth_point, right_point])
121         pts2 = np.float32([left_point, heigth_point, new_right_point])  # 字符只是高度需要改变
122         M = cv2.getAffineTransform(pts1, pts2)
123         print('Mat1',M)
124         print('pts1_1',pts1)
125         print('pts1_2',pts2)
126         '''
127         仿射变换,其实是将图形在2D平面内做变换,变换前后图片中原来平行的线仍会保持平行,可以想象是将长方形变换为平行四边形
128         M=cv2.getAffineTransform(pos1,pos2),其中两个位置就是变换前后的对应位置关系。输出的就是仿射矩阵M,shape为[2,3]
129         cv.getAffineTransform将创建一个2x3矩阵,该矩阵将传递给cv.warpAffine。
130         '''
131         dst = cv2.warpAffine(image, M, (round(img_w*2), round(img_h*2)))
132         '''
133         cv2.warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) → dst
134                        dsize为输出图像的大小;
135                        flags表示插值方式,默认为 flags=cv2.INTER_LINEAR,表示线性插值,此外还有:cv2.INTER_NEAREST(最近邻插值)、cv2.INTER_AREA(区域插值)、cv2.INTER_CUBIC(三次样条插值)、cv2.INTER_LANCZOS4(Lanczos插值)
136                        borderMode - 边界像素模式
137                        borderValue - 边界填充值; 默认情况下,它为0
138         round() 方法返回浮点数x的四舍五入值。round(x,n) 返回浮点数x的四舍五入的小数点后的n位数值
139         '''
140         car_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]
141 
142     elif left_point[1] > right_point[1]:  # 负角度
143         new_left_point = [left_point[0], heigth_point[1]]
144         pts1 = np.float32([left_point, heigth_point, right_point])
145         pts2 = np.float32([new_left_point, heigth_point, right_point])  # 字符只是高度需要改变
146         print('pts2_1',pts1)
147         print('pts2_2',pts2)
148         M = cv2.getAffineTransform(pts1, pts2)
149         print('Mat2',M)
150         dst = cv2.warpAffine(image, M, (round(img_w*2), round(img_h*2)))
151         car_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]
152 
153     return car_img
154 
155 def pre_process(orig_img):
156 
157     gray_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2GRAY)    #将原图转换为灰度图
158     cv2.imwrite('./carIdentityData/opencv_output/gray_img.jpg', gray_img)
159     #cv2.imshow('gray_img', gray_img)
160 
161     blur_img = cv2.blur(gray_img, (3, 3))   #均值滤波
162     cv2.imwrite('./carIdentityData/opencv_output/blur.jpg', blur_img)
163     #cv2.imshow('blur', blur_img)
164 
165     sobel_img = cv2.Sobel(blur_img, cv2.CV_16S, 1, 0, ksize=3)   #沿x轴求导,找边缘
166     sobel_img = cv2.convertScaleAbs(sobel_img)   #转换图片格式
167     cv2.imwrite('./carIdentityData/opencv_output/sobel.jpg', sobel_img)
168     #cv2.imshow('sobel', sobel_img)
169 
170     hsv_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2HSV)   #将原图的颜色域转到HSV域,色调、饱和度、亮度
171     cv2.imwrite('./carIdentityData/opencv_output/hsv_pic.jpg', hsv_img)
172     #cv2.imshow('hsv_pic',hsv_img)
173 
174     h, s, v = hsv_img[:, :, 0], hsv_img[:, :, 1], hsv_img[:, :, 2]   #h,s,v分别取矩阵的第一列、第二列、第三列的所有元素
175     # 黄色色调区间[26,34],蓝色色调区间:[100,124],饱和度和亮度均需要高于70
176     blue_img = (((h > 11) & (h < 34)) | ((h > 35) & (h < 99)) | ((h > 100) & (h < 124))) & (s > 70) & (v > 70)
177     blue_img = blue_img.astype('float32')  #将blue_img格式转换为浮点型32位
178     cv2.imwrite('./carIdentityData/opencv_output/blue&yellow.jpg', blue_img)
179     #cv2.imshow('blue&yellow',blue_img)
180 
181     mix_img = np.multiply(sobel_img, blue_img)    #两个数组或矩阵相乘,对应位置直接相乘
182     cv2.imwrite('./carIdentityData/opencv_output/mix.jpg', mix_img)
183     #cv2.imshow('mix', mix_img)
184 
185     mix_img = mix_img.astype(np.uint8)
186 
187     ret, binary_img = cv2.threshold(mix_img, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)  
188     #ret, binary_img = cv2.threshold(mix_img, 50, 255, cv2.THRESH_BINARY)    
189     '''
190     使用最大类间方差法将图像二值化,cv2.THRESH_OTSU自适应找出最合适的阈值
191     cv2.threshold(src, thresh, maxval, type[, dst]) → retval, dst
192                   src:表示的是图片源
193                   thresh:表示的是阈值(起始值)
194                   maxval:表示的是最大值
195                   type:表示的是这里划分的时候使用的是什么类型的算法,常用值为0(cv2.THRESH_BINARY),超过阈值的设置为最大值255,其他设置为0
196     返回值:
197           ret :cv2.THRESH_OTSU 求解出的阈值
198           binary_img :二值图像
199     '''
200     print('ret',ret)
201     cv2.imwrite('./carIdentityData/opencv_output/binary.jpg', binary_img)
202     #cv2.imshow('binary',binary_img)
203 
204     kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(21,5))     #获得结构元素
205     close_img = cv2.morphologyEx(binary_img, cv2.MORPH_CLOSE, kernel)  #闭操作,先膨胀再腐蚀,使图像轮廓更光滑(need more)
206     cv2.imwrite('./carIdentityData/opencv_output/close.jpg', close_img)
207     #cv2.imshow('close', close_img)
208 
209     return close_img
210 
211 # 给候选车牌区域做漫水填充算法,一方面补全上一步求轮廓可能存在轮廓歪曲的问题,
212 # 另一方面也可以将非车牌区排除掉
213 def verify_color(rotate_rect,src_image):
214     img_h,img_w = src_image.shape[:2]#shape[0],shape[1]
215     mask = np.zeros(shape=[img_h+2,img_w+2],dtype=np.uint8)
216     #cv2.imshow('flood_mask',mask)
217     connectivity = 4#0100
218     #种子点上下左右4邻域与种子颜色值在[loDiff,upDiff]的被涂成new_value,也可设置8邻域,
219     #如果设为4,表示填充算法只考虑当前像素水平方向和垂直方向的相邻点;如果设为 8,除上述相邻点外,还会包含对角线方向的相邻点。
220     loDiff,upDiff = 30,30#负差最大值,正差最大值.loDiff表示当前观察像素值与其部件邻域像素值或者待加入该部件的种子像素之间的亮度或颜色之负差(lower brightness/color difference)的最大值。 
221     new_value = 255
222     flags = connectivity#0100
223     print('flags1',flags)
224     flags |= cv2.FLOODFILL_FIXED_RANGE  #按位或,FLOODFILL_FIXED_RANGE=2**16=65536.考虑当前像素与种子象素之间的差,不设置的话则和邻域像素比较,运算结果为01 0000 0000 0000 0100,十进制为65540
225     print('flags2',flags)
226     print('cv2.FLOODFILL_FIXED_RANGE',cv2.FLOODFILL_FIXED_RANGE)
227     '''
228     flags = flags | cv2.FLOODFILL_FIXED_RANGE
229     cv.FLOODFILL_FIXED_RANGE: 指定颜色填充,二进制为 01 0000 0000 0000 0000.填充时的判断标准是:src(seed.x’, seed.y’) - loDiff <= src(x, y) <= src(seed.x’, seed.y’) +upDiff,此范围内被填充指定的颜色
230     cv.FLOODFILL_MASK_ONLY:    指定位置填充,二进制为 10 0000 0000 0000 0000
231     '''
232     flags |= new_value << 8  
233     #<<左移动运算符:运算数的各二进位全部左移若干位,由 << 右边的数字指定了移动的位数,高位丢弃,低位补0.  255左移8位是1111111100000000,运算结果为01 1111 1111 0000 0100,十进制为130820
234     print('flags3',flags)
235     flags |= cv2.FLOODFILL_MASK_ONLY 
236     #FLOODFILL_MASK_ONLY=2**17=131072.设置这个标识符则不会去填充改变原始图像,而是去填充掩模图像(mask),运算结果为11 1111 1111 0000 0100,十进制为261892
237     print('flags4',flags)
238     print('FLOODFILL_MASK_ONLY',cv2.FLOODFILL_MASK_ONLY)
239     '''
240     相当于flags = 4 | cv2.FLOODFILL_FIXED_RANGE | 255 << 8 | cv2.FLOODFILL_MASK_ONLY
241     通俗来讲,就是用4邻域填充,并填充固定像素值范围,填充掩码而不是填充源图像,以及设填充值为255
242     标识符的0-7位为connectivity,8-15位为new_value左移8位的值,16-23位为cv2.FLOODFILL_FIXED_RANGEcv2.FLOODFILL_MASK_ONLY或者0
243         1.低8位用于控制算法的连通性,可取4(填充算法只考虑当前享受水平方向和垂直方向)/8(还考虑对角线方向)
244         2.高8位可为0/FLOODFILL_FIXED_RANGE(考虑当前像素与种子像素之间的差)/FLOODFILL_MASK_ONLY(不填充改变原始图像,去填充掩模图像)
245         3.中间8位制定填充掩码图像的值
246     最终得到的flags为11 1111 1111 0000 0100,十进制为261892
247     '''
248     rand_seed_num = 5000 #生成多个随机种子
249     valid_seed_num = 200 #从rand_seed_num中随机挑选valid_seed_num个有效种子
250     adjust_param = 0.1
251     box_points = cv2.boxPoints(rotate_rect)
252     '''
253     cv2.boxPoints根据minAreaRect的返回值rotate_rect计算矩形的四个点
254     旋转的边界矩形,这个边界矩形是面积最小的,因为它考虑了对象的旋转。用到的函数为cv2.minAreaRect()。返回的是一个Box2D结构,
255         其中包含矩形左上角角点的坐标(x,y),矩形的宽和高(w,h),以及旋转角度。但是要绘制这个矩形需要矩形的4个角点,可以通过函数cv2.boxPoints()获得。
256         返回形式[ [x0,y0], [x1,y1], [x2,y2], [x3,y3] ]
257     '''
258     box_points_x = [n[0] for n in box_points]#每一个坐标点的x值
259     print('box_points_x1',box_points_x)
260     box_points_x.sort(reverse=False)#list.sort( key=None, reverse=False),reverse -- 排序规则,reverse = True 降序, reverse = False 升序(默认)
261     print('box_points_x2',box_points_x)
262     adjust_x = int((box_points_x[2]-box_points_x[1])*adjust_param)#=(第三个x-第二个x*0.1),对角点
263     print('adjust_x',adjust_x)
264     col_range = [box_points_x[1]+adjust_x,box_points_x[2]-adjust_x]
265     print('col_range',col_range)
266     box_points_y = [n[1] for n in box_points]#每一个坐标点的y值
267     print('box_points_y1',box_points_y)
268     box_points_y.sort(reverse=False)
269     print('box_points_y2',box_points_y)
270     adjust_y = int((box_points_y[2]-box_points_y[1])*adjust_param)
271     print('adjust_y',adjust_y)
272     row_range = [box_points_y[1]+adjust_y, box_points_y[2]-adjust_y]
273     print('row_range',row_range)
274     # 如果以上方法种子点在水平或垂直方向可移动的范围很小,则采用旋转矩阵对角线来设置随机种子点
275     if (col_range[1]-col_range[0])/(box_points_x[3]-box_points_x[0])<0.4
276         or (row_range[1]-row_range[0])/(box_points_y[3]-box_points_y[0])<0.4:#小于0.4时重新定义
277         points_row = []
278         points_col = []
279         for i in range(2):
280             pt1,pt2 = box_points[i],box_points[i+2]#第一个和第三个坐标点,第二个和第四个坐标点
281             x_adjust,y_adjust = int(adjust_param*(abs(pt1[0]-pt2[0]))),int(adjust_param*(abs(pt1[1]-pt2[1])))
282             if (pt1[0] <= pt2[0]):
283                 pt1[0], pt2[0] = pt1[0] + x_adjust, pt2[0] - x_adjust
284             else:
285                 pt1[0], pt2[0] = pt1[0] - x_adjust, pt2[0] + x_adjust
286             if (pt1[1] <= pt2[1]):
287                 pt1[1], pt2[1] = pt1[1] + adjust_y, pt2[1] - adjust_y
288             else:
289                 pt1[1], pt2[1] = pt1[1] - y_adjust, pt2[1] + y_adjust
290             temp_list_x = [int(x) for x in np.linspace(pt1[0],pt2[0],int(rand_seed_num /2))]
291             temp_list_y = [int(y) for y in np.linspace(pt1[1],pt2[1],int(rand_seed_num /2))]
292             points_col.extend(temp_list_x)
293             points_row.extend(temp_list_y)
294         print('in for')
295     else:
296         points_row = np.random.randint(row_range[0],row_range[1],size=rand_seed_num)
297         '''
298         numpy.random.randint(low, high=None, size=None, dtype='l')返回一个随机整型数,范围从低(包括)到高(不包括),即[low, high)。如果没有写参数high的值,则返回[0,low)的值。
299         定义rand_seed_num = 5000。size为输出随机数的尺寸,这里输出5000个随机数
300         '''
301         points_col = np.linspace(col_range[0],col_range[1],num=rand_seed_num).astype(np.int)
302         '''
303         np.linspace主要用来创建等差数列。np.linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None),在start和stop之间返回均匀间隔的数据
304                                                      endpoint:True则包含stop;False则不包含stop; retstep如果为True则结果会给出数据间隔
305                                          在[col_range[0],col_range[1]]之间输出包含5000个数据的等差数列,并将其修改格式为整型
306         '''
307         print('in else')
308 
309     points_row = np.array(points_row)
310     points_col = np.array(points_col)
311     hsv_img = cv2.cvtColor(src_image, cv2.COLOR_BGR2HSV)
312     h,s,v = hsv_img[:,:,0],hsv_img[:,:,1],hsv_img[:,:,2]
313     # 将随机生成的多个种子依次做漫水填充,理想情况是整个车牌被填充
314     flood_img = src_image.copy()
315     seed_cnt = 0
316     for i in range(rand_seed_num):
317         rand_index = np.random.choice(rand_seed_num,1,replace=False)#从[0,5000)之间随机抽取一个数,且不能重复
318         row,col = points_row[rand_index],points_col[rand_index]
319         # 限制随机种子必须是车牌背景色,黄色色调区间[26,34],蓝色色调区间:[100,124]
320         if (((h[row,col]>11)&(h[row,col]<34))|((h[row,col]>35)&(h[row,col]<100))|((h[row,col]>100)&(h[row,col]<124)))&(s[row,col]>70)&(v[row,col]>70):
321             cv2.floodFill(src_image, mask, (col,row), (255, 255, 255), (loDiff,) * 3, (upDiff,) * 3, flags)
322             '''
323             floodFill(image, mask, seedPoint, newVal, loDiff=None, upDiff=None, flags=None)
324             floodFill( 1.操作的图像, 2.掩模, 3.起始像素值,4.填充的颜色, 5.填充颜色的低值, 6.填充颜色的高值 ,7.填充的方法)   (255, 255, 255)是白色
325                       mask = np.zeros(shape=[img_h+2,img_w+2],dtype=np.uint8)
326                       loDiff,upDiff = 30,30;(loDiff,) * 3=(loDiff,loDiff,loDiff)
327                       
328             '''
329             cv2.circle(flood_img,center=(col,row),radius=2,color=(0,0,255),thickness=2)
330             '''
331             cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]),根据给定的圆心和半径等画圆
332             center:圆心位置;radius:圆的半径;color:圆的颜色;thickness:圆形轮廓的粗细(如果为正),负厚度表示要绘制实心圆;lineType:圆边界的类型;shift:中心坐标和半径值中的小数位数。
333             '''
334             seed_cnt += 1
335             if seed_cnt >= valid_seed_num:
336                 break
337     #======================调试用======================#
338     show_seed = np.random.uniform(1,100,1).astype(np.uint16)
339     '''
340     numpy.random.uniform(low,high,size),从一个均匀分布[low,high)中随机采样,注意定义域是左闭右开,即包含low,不包含high.
341            low: 采样下界,float类型,默认值为0;
342            high: 采样上界,float类型,默认值为1;
343            size: 输出样本数目,为int或元组(tuple)类型
344     返回值:ndarray类型,其形状和参数size中描述一致
345     '''
346     #cv2.imshow('floodfill'+str(show_seed),flood_img)
347     #cv2.imshow('flood_mask'+str(show_seed),mask)
348     
349     cv2.imwrite('./carIdentityData/opencv_output/floodfill.jpg', flood_img)
350     cv2.imwrite('./carIdentityData/opencv_output/flood_mask.jpg', mask)
351     #======================调试用======================#
352     # 获取掩模上被填充点的像素点,并求点集的最小外接矩形
353     mask_points = []
354     for row in range(1,img_h+1):
355         for col in range(1,img_w+1):
356             if mask[row,col] != 0:
357                 mask_points.append((col-1,row-1))#把不是黑色的像素点添加进mask_points,把mask被填充成白色的点集添加进去
358     mask_rotateRect = cv2.minAreaRect(np.array(mask_points))#获取点集的最小矩形
359     if verify_scale(mask_rotateRect):
360         return True,mask_rotateRect
361     else:
362         return False,mask_rotateRect
363 
364 # 车牌定位
365 def locate_carPlate(orig_img,pred_image):
366     car_plate_w, car_plate_h = 136, 36 #dengjie.tkadd
367     carPlate_list = []
368     temp1_orig_img = orig_img.copy() #调试用
369     temp2_orig_img = orig_img.copy() #调试用
370     #cloneImg,contours,heriachy = cv2.findContours(pred_image,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
371     contours, heriachy = cv2.findContours(pred_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
372     #print(contours)    #dengjie
373     # RETR_EXTERNAL找最外层轮廓,CHAIN_APPROX_SIMPLE仅保存轮廓的拐点信息,把所有轮廓拐点处的点保存入contours向量内,拐点与拐点之间直线段上的信息点不予保留。heriachy这里没有用到
374     for i,contour in enumerate(contours):     #enumerate同时获得列表或者字符串的索引和值,i是索引,contour是值
375         cv2.drawContours(temp1_orig_img, contours, i, (0, 255, 0), 2)#用绿色线宽为2的线条画出原图的所有轮廓
376         # 获取轮廓最小外接矩形,返回值rotate_rect。rotate_rect是点集数组或向量(里面存放的是点的坐标),并且这个点集中的元素不定个数(中心(x,y), (宽,高), 旋转角度)
377         rotate_rect = cv2.minAreaRect(contour)
378         # 根据矩形面积大小和长宽比判断是否是车牌
379         if verify_scale(rotate_rect):#return True
380             ret,rotate_rect2 = verify_color(rotate_rect,temp2_orig_img)#返回True和mask上被填充点的像素点集的最小矩形
381             if ret == False:
382                 continue
383             # 车牌位置矫正
384             car_plate = img_Transform(rotate_rect2, temp2_orig_img)#做仿射变换
385             car_plate = cv2.resize(car_plate,(car_plate_w,car_plate_h)) #调整尺寸为后面CNN车牌识别做准备
386             #========================调试看效果========================#
387             box = cv2.boxPoints(rotate_rect2)#获取矩形顶点坐标
388             for k in range(4):
389                 n1,n2 = k%4,(k+1)%4
390                 cv2.line(temp1_orig_img,(box[n1][0],box[n1][1]),(box[n2][0],box[n2][1]),(0,0,255),2)
391                 '''
392                 cv2.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) → img
393                        img:原图
394                        pt1:直线起点坐标,(box[n1][0],box[n1][1]);(box[0][0],box[0][1])
395                        pt2,直线终点坐标,(box[n2][0],box[n2][1]);(box[1][0],box[1][1])
396                        color,当前绘画的颜色;如在BGR模式下,传递(255,0,0)表示蓝色画笔。
397                        hickness,画笔的粗细,线宽。若是-1表示画封闭图像,如填充的圆。默认值是1
398                        lineType,线条的类型
399                 '''
400             #cv2.imshow('opencv_' + str(i), car_plate)
401             cv2.imwrite('./carIdentityData/opencv_output/opencv_%d.jpg'%(i), car_plate)
402             #========================调试看效果========================#
403             carPlate_list.append(car_plate)
404             #print('carPlate_list',carPlate_list)
405 
406     cv2.imwrite('./carIdentityData/opencv_output/contour.jpg', temp1_orig_img)
407     #cv2.imshow('contour', temp1_orig_img)
408     return carPlate_list
409 
410 # 左右切割
411 def horizontal_cut_chars(plate):#传入车牌二值图像中的字符部分
412     char_addr_list = []
413     area_left,area_right,char_left,char_right= 0,0,0,0
414     img_h,img_w = plate.shape[:2]
415     
416     # 获取车牌每列边缘像素点个数
417     def getColSum(img,col):
418         sum = 0
419         for i in range(img.shape[0]):
420             sum += round(img[i,col]/255)#二值图像,img[i,col]=0或255,获取每一列像素值为255的像素个数
421         return sum;
422 
423     sum = 0
424     for col in range(img_w):
425         sum += getColSum(plate,col)#所有列白色像素点的个数总和
426         
427     col_limit = 0
428     #col_limit = round(0.3*sum/img_w) # 每列边缘像素点必须超过均值的30%才能判断属于字符区域
429     #print('col_limit',sum,img_w,col_limit)#1344.0,136,6.0
430     # 每个字符宽度也进行限制
431     charWid_limit = [round(img_w/12),round(img_w/5)]#[11,27]
432     is_char_flag = False
433 
434     for i in range(img_w):
435         colValue = getColSum(plate,i)#每一列像素值为255的像素个数,ex:i=7时,colValue=3;i=8时,colValue=9;i=9时,colValue=19;i=18时,colValue=16;i=19时,colValue=0
436         #print('colValue'+str(i),colValue)
437         if colValue > col_limit:
438             if is_char_flag == False:
439                 area_right = round((i+char_right)/2)#ex:i=8,area_right=4
440                 area_width = area_right-area_left#area_width=4
441                 char_width = char_right-char_left#char_width=0
442                 if (area_width>charWid_limit[0]) and (area_width<charWid_limit[1]):
443                     char_addr_list.append((area_left,area_right,char_width))
444                 char_left = i#i=8
445                 area_left = round((char_left+char_right) / 2)#area_left=4
446                 is_char_flag = True
447         else:
448             if is_char_flag == True:
449                 char_right = i-1#19-1=18
450                 is_char_flag = False
451         #print('is_char_flag'+str(i),area_left,area_right,char_right,char_left)
452         #print('char_addr_list1',char_addr_list)
453     # 手动结束最后未完成的字符分割
454     if area_right < char_left:
455         area_right,char_right = img_w,img_w#以img_w为右边界
456         #area_right = round((img_w+char_right)/2)
457         area_width = area_right - area_left
458         char_width = char_right - char_left
459         if (area_width > charWid_limit[0]) and (area_width < charWid_limit[1]):
460             char_addr_list.append((area_left, area_right, char_width))#每一个字符区域的左右边界及字符的宽度
461     print('char_addr_list2',char_addr_list)#ex.char_addr_list=[(4, 20, 10), (20, 45, 14), (45, 62, 7), (62, 83, 13), (83, 96, 6), (96, 114, 14), (114, 132, 14)]
462     return char_addr_list
463 
464 def get_chars(car_plate):#传入车牌二值化图像
465     char_w, char_h = 20, 20#dengjie.tkadd
466     img_h,img_w = car_plate.shape[:2]
467     h_proj_list = [] # 水平投影长度列表
468     h_temp_len,v_temp_len = 0,0
469     h_startIndex,h_end_index = 0,0 # 水平投影记索引
470     h_proj_limit = [0.2,0.8] # 车牌在水平方向的轮廓长度少于20%或多余80%过滤掉
471     char_imgs = []
472     
473     def getColSum(img,col):
474         sum = 0
475         for i in range(img.shape[0]):
476             sum += round(img[i,col]/255)#二值图像,img[i,col]=0或255,获取每一列像素值为255的像素个数
477         return sum;
478 
479     sum = 0
480     for col in range(img_w):
481         sum += getColSum(car_plate,col)#所有列白色像素点的个数总和
482    
483     # 将二值化的车牌水平投影到Y轴,计算投影后的连续长度,连续投影长度可能不止一段
484     h_count = [0 for i in range(img_h)]
485     for row in range(img_h):
486         temp_cnt = 0
487         for col in range(img_w):
488             if car_plate[row,col] == 255:
489                 temp_cnt += 1
490         h_count[row] = temp_cnt#统计每一行像素值为255的像素个数
491         if temp_cnt/img_w<h_proj_limit[0] or temp_cnt/img_w>h_proj_limit[1]:#每一行像素值为255的像素个数/车牌宽度<0.2 或者 每一行像素值为255的像素个数/车牌宽度>0.8
492             if h_temp_len != 0:
493                 h_end_index = row-1
494                 h_proj_list.append((h_startIndex,h_end_index))
495                 print('h_proj_list1',h_proj_list)
496                 h_temp_len = 0
497             continue
498         if temp_cnt > 0:
499             if h_temp_len == 0:
500                 h_startIndex = row#从0.2<(像素值为255的像素个数/img_w)<0.8 的行开始
501                 h_temp_len = 1
502             else:
503                 h_temp_len += 1
504         else:
505             if h_temp_len > 0:
506                 h_end_index = row-1
507                 h_proj_list.append((h_startIndex,h_end_index))
508                 print('h_proj_list2',h_proj_list)
509                 h_temp_len = 0
510     print('h_temp_len',h_temp_len)
511     # 手动结束最后得水平投影长度累加
512     if h_temp_len != 0:
513         h_end_index = img_h-1
514         h_proj_list.append((h_startIndex, h_end_index))#h_temp_len不等于0时再添加一对值
515         print('h_proj_list',h_proj_list)#ex:[(1, 1), (7, 29), (34, 35)]或者[(2, 2), (7, 28), (34, 34)]
516     # 选出最长的投影,该投影长度占整个截取车牌高度的比值必须大于0.5
517     h_maxIndex,h_maxHeight = 0,0
518     for i,(start,end) in enumerate(h_proj_list):
519         if h_maxHeight < (end-start):
520             h_maxHeight = (end-start)
521             h_maxIndex = i
522     if h_maxHeight/img_h < 0.5:
523         return char_imgs
524     chars_top,chars_bottom = h_proj_list[h_maxIndex][0],h_proj_list[h_maxIndex][1]#chars_top=h_proj_list[1][0],chars_bottom=h_proj_list[1][1]
525     
526     if sum > img_h *img_w * 0.5:
527          ret,car_plate = cv2.threshold(car_plate,0,255,cv2.THRESH_BINARY_INV|cv2.THRESH_OTSU)
528          #cv2.imshow('THRESH_BINARY_INV',car_plate)
529     
530     plates = car_plate[chars_top:chars_bottom+1,:]#获取车牌二值图像中的字符高度部分,plates比car_plate要窄,然后在进行字符分割
531     cv2.imwrite('./carIdentityData/opencv_output/car_plate.jpg',car_plate)
532     cv2.imwrite('./carIdentityData/opencv_output/plates.jpg', plates)
533     char_addr_list = horizontal_cut_chars(plates)#ex.char_addr_list=[(4, 20, 10), (20, 45, 14), (45, 62, 7), (62, 83, 13), (83, 96, 6), (96, 114, 14), (114, 132, 14)]
534 
535     for i,addr in enumerate(char_addr_list):
536         char_img = car_plate[chars_top:chars_bottom+1,addr[0]:addr[1]]#输出单个字符
537         char_img = cv2.resize(char_img,(char_w,char_h))#resize字符
538         char_imgs.append(char_img)
539         #cv2.imshow('22',char_img)     #dengjie2
540         cv2.imwrite('./carIdentityData/opencv_output/char_%d.jpg'%(i),char_img)
541     return char_imgs
542 
543 def extract_char(car_plate):#传入正确的车牌
544     gray_plate = cv2.cvtColor(car_plate,cv2.COLOR_BGR2GRAY)#转换成灰度图
545     ret,binary_plate = cv2.threshold(gray_plate,0,255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)#使用最大类间方差法将图像二值化,自适应找出最合适的阈值
546     #cv2.imshow('extract_char_binary_plate',binary_plate)
547     cv2.imwrite('./carIdentityData/opencv_output/extract_char_binary_plate.jpg',binary_plate)
548     char_img_list = get_chars(binary_plate)
549     #cv2.imshow('1',binary_plate)  #dengjie
550     return char_img_list
551 
552 def cnn_select_carPlate(plate_list,model_path):
553     if len(plate_list) == 0:
554         return False,plate_list
555     g1 = tf.Graph()
556     sess1 = tf.Session(graph=g1)
557     '''
558     Tensorflow中的图(tf.Graph)和会话(tf.Session)
559     tf.Graph()表示实例化一个用于tensorflow计算和表示用的数据流图,不负责运行计算
560     1、使用g = tf.Graph()函数创建新的计算图
561     2、在with g.as_default():语句下定义属于计算图g的张量和操作
562     3、在with tf.Session()中通过参数graph=xxx指定当前会话所运行的计算图
563     4、如果没有显示指定张量和操作所属的计算图,则这些张量和操作属于默认计算图
564     5、一个图可以在多个sess中运行,一个sess也能运行多个图
565     '''
566     with sess1.as_default():
567         with sess1.graph.as_default():#使用此图作为默认图的上下文管理器
568             model_dir = os.path.dirname(model_path)# 获取文件的完整目录,得到当前文件的绝对路径
569             saver = tf.train.import_meta_graph(model_path)#用来加载训练模型meta文件中的图,以及图上定义的结点参数包括权重偏置项等需要训练的参数,也包括训练过程生成的中间参数
570             saver.restore(sess1, tf.train.latest_checkpoint(model_dir))#自动找到最近保存的变量文件并载入
571             graph = tf.get_default_graph()#获取当前默认的计算图
572             net1_x_place = graph.get_tensor_by_name('x_place:0')#按tensor名称获取tensor信息,Tensor("x_place:0", shape=(?, 36, 136, 3), dtype=float32)
573             #Once you know the name you can fetch the Tensor using <name>:0 (0 refers to endpoint which is somewhat redundant)
574             #一旦知道名称,就可以使用<name>:0来获取Tensor(0表示冗余的端点)
575             print('net1_x_place',net1_x_place)
576             net1_keep_place = graph.get_tensor_by_name('keep_place:0')#Tensor("keep_place:0", dtype=float32)
577             print('net1_keep_place',net1_keep_place)
578             net1_out = graph.get_tensor_by_name('out_put:0')#Tensor("out_put:0", shape=(?, 2), dtype=float32),获取cnn_construct()的输出
579             print('net1_out',net1_out)
580 
581             input_x = np.array(plate_list)
582             net_outs = tf.nn.softmax(net1_out)
583             preds = tf.argmax(net_outs,1) #预测结果,按行取最大值对应的索引
584             probs = tf.reduce_max(net_outs,reduction_indices=[1]) #结果概率值,按行取概率的最大值
585             pred_list,prob_list = sess1.run([preds,probs],feed_dict={net1_x_place:input_x,net1_keep_place:1.0})
586             print('pred_list',pred_list)
587             print('prob_list',prob_list)
588             # 选出概率最大的车牌
589             result_index,result_prob = -1,0.
590             for i,pred in enumerate(pred_list):
591                 if pred==1 and prob_list[i]>result_prob:
592                     result_index,result_prob = i,prob_list[i]#0,概率
593                     print('in pred')
594                     print(result_index,result_prob)
595             if result_index == -1:
596                 return False,plate_list[0]#返回第一张车牌
597             else:
598                 green = yellow = blue = 0
599                 img_hsv = cv2.cvtColor(plate_list[result_index], cv2.COLOR_BGR2HSV)
600                 row_num, col_num= img_hsv.shape[:2]
601                 # ~ 总共的像素个数
602                 card_img_count = row_num * col_num
603 
604                 for i in range(row_num):
605                     for j in range(col_num):
606                         H = img_hsv.item(i, j, 0)
607                         S = img_hsv.item(i, j, 1)
608                         V = img_hsv.item(i, j, 2)
609                         # ~ 根据HSV空间的值确定颜色
610                         if 11 < H <= 34 and S > 34:
611                             yellow += 1
612                         elif 35 < H <= 99 and S > 34:
613                             green += 1
614                         elif 99 < H <= 124 and S > 34:
615                             blue += 1
616                 color = "no"
617 
618                 # ~ 若某种颜色的像素个数占一半以上,则判别为该颜色
619                 if yellow*2 >= card_img_count:
620                     color = "yellow"
621 
622                 elif green*2 >= card_img_count:
623                     color = "green"
624 
625                 elif blue*2 >= card_img_count:
626                     color = "blue"
627                     
628                 return True,plate_list[result_index],color#返回正确的索引对应的车牌
629 
630 def cnn_recongnize_char(img_list,model_path):
631     g2 = tf.Graph()
632     sess2 = tf.Session(graph=g2)
633     text_list = []
634 
635     if len(img_list) == 0:
636         return text_list
637     with sess2.as_default():
638         with sess2.graph.as_default():
639             model_dir = os.path.dirname(model_path)
640             saver = tf.train.import_meta_graph(model_path)
641             saver.restore(sess2, tf.train.latest_checkpoint(model_dir))
642             graph = tf.get_default_graph()
643             net2_x_place = graph.get_tensor_by_name('x_place:0')
644             net2_keep_place = graph.get_tensor_by_name('keep_place:0')
645             net2_out = graph.get_tensor_by_name('out_put:0')
646 
647             data = np.array(img_list)
648             # 数字、字母、汉字,从67维向量找到概率最大的作为预测结果
649             net_out = tf.nn.softmax(net2_out)
650             preds = tf.argmax(net_out,1)
651             my_preds= sess2.run(preds, feed_dict={net2_x_place: data, net2_keep_place: 1.0})
652             print('my_preds',my_preds)#ex.my_preds=[49 11 13  8 19  5  3]
653 
654             for i in my_preds:
655                 text_list.append(char_table[i])
656             return text_list
657 
658 if __name__ == '__main__':
659     cur_dir = sys.path[0]
660     car_plate_w,car_plate_h = 136,36
661     char_w,char_h = 20,20
662     plate_model_path = os.path.join(cur_dir, './carIdentityData/model/plate_recongnize/model.ckpt-1020.meta')
663     char_model_path = os.path.join(cur_dir,'./carIdentityData/model/char_recongnize/model.ckpt-1030.meta')
664     img = cv2.imread('./plate_pic/24.jpg')
665 
666     # 预处理
667     pred_img = pre_process(img)
668 
669     # 车牌定位
670     car_plate_list = locate_carPlate(img,pred_img)
671 
672     # CNN车牌过滤
673     ret,car_plate,color = cnn_select_carPlate(car_plate_list,plate_model_path)#True,正确的车牌
674     if ret == False:
675         print("未检测到车牌")
676         sys.exit(-1)#sys.exit(-1)告诉程序退出。它基本上只是停止继续执行python代码。-1只是传入的状态码。通常0表示成功执行,其他任何数字(通常为1)表示发生故障。
677     #cv2.imshow('cnn_plate',car_plate)
678     cv2.imwrite('./carIdentityData/opencv_output/cnn_plate.jpg', car_plate)
679 
680     # 字符提取
681     char_img_list = extract_char(car_plate)
682 
683     # CNN字符识别
684     text = cnn_recongnize_char(char_img_list,char_model_path)
685     print('result:',text)
686     print(color)
687 
688     cv2.waitKey(0)

GUI代码如下:

  1 import tkinter as tk
  2 from tkinter.filedialog import *
  3 from tkinter import ttk
  4 import carPlateIdentity
  5 import cv2
  6 from PIL import Image, ImageTk
  7 #import threading
  8 import time
  9 import os
 10 # video import create_capture
 11 import sys
 12 import getopt
 13 
 14 
 15 class Surface(ttk.Frame):
 16     pic_path = ""
 17     viewhigh = 600
 18     viewwide = 600
 19     update_time = 0
 20     thread = None
 21     thread_run = False
 22     camera = None
 23     color_transform = {"green":("绿牌","#55FF55"), "yello":("黄牌","#FFFF00"), "blue":("蓝牌","#6666FF")}
 24 
 25     def __init__(self, win):
 26         ttk.Frame.__init__(self, win)
 27         frame_left = ttk.Frame(self)
 28         frame_right1 = ttk.Frame(self)
 29         frame_right2 = ttk.Frame(self)
 30         style=ttk.Style()
 31         #style.configure("BW.Tlable",foreground="blue",background="blue")
 32         style.configure("TButton",font=("Times",12),foreground="black",background="green")
 33         win.title("车牌识别")
 34         #win.state("zoomed")
 35         self.pack(fill=tk.BOTH, expand=tk.YES, padx="5", pady="5")
 36         #fill=tk.BOTH:水平和竖直方向填充;expand=tk.YES:扩展整个空白区;padx:x方向的外边距;pady:y方向的外边距
 37         frame_left.pack(side=LEFT,expand=1,fill=BOTH)
 38         #side=LEFT:按扭停靠在窗口的左侧
 39         frame_right1.pack(side=TOP,expand=1,fill=tk.Y)
 40         frame_right2.pack(side=RIGHT,expand=1)
 41         ttk.Label(frame_left, text='Original pic:',font=("Times",12)).pack(anchor="nw") #nw表示位置在上左,n是north,w是west
 42         ttk.Label(frame_right1, text='Plate Location:',font=("Times",12)).grid(column=0, row=0, sticky=tk.W)
 43         #位置在上面
 44         from_vedio_ctl = ttk.Button(frame_right2, text="Open camera", width=20, style="TButton",command=self.from_vedio)
 45         from_pic_ctl = ttk.Button(frame_right2, text="Open picture",width=20, style="TButton",command=self.from_pic)
 46         from_img_pre = ttk.Button(frame_right2, text="Show pre_img",width=20, style="TButton",command=self.show_img_pre)
 47 
 48         self.image_ctl = ttk.Label(frame_left)
 49         self.image_ctl.pack(anchor="nw")
 50 
 51         self.roi_ctl = ttk.Label(frame_right1)#车牌
 52         self.roi_ctl.grid(column=0, row=1, sticky=tk.W)
 53         ttk.Label(frame_right1, text='Recognition result:',font=("Times",12)).grid(column=0, row=2, sticky=tk.W)
 54         self.r_ctl = ttk.Label(frame_right1, text="",font=("Times",12))#字符
 55         self.r_ctl.grid(column=0, row=3, sticky=tk.W)
 56         self.color_ctl = ttk.Label(frame_right1, text="", font=("Times",12),width="20")
 57         self.color_ctl.grid(column=0, row=4, sticky=tk.W)
 58         from_pic_ctl.pack(anchor="se", pady="5")
 59         from_vedio_ctl.pack(anchor="se", pady="5")
 60         from_img_pre.pack(anchor="se", pady="5")
 61 
 62 
 63     def get_imgtk(self, img_bgr):
 64         img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
 65         im = Image.fromarray(img)#array转换成image图片
 66         imgtk = ImageTk.PhotoImage(image=im)#显示图片
 67         wide = imgtk.width()#图片的宽
 68         high = imgtk.height()#图片的高
 69         #print('wide',wide)
 70         #print('high',high)
 71         if wide > self.viewwide or high > self.viewhigh:#前面有定义,viewwide和viewhigh都为600
 72             wide_factor = self.viewwide / wide#viewwide除以图片的宽
 73             high_factor = self.viewhigh / high#viewhigh除以图片的高
 74             factor = min(wide_factor, high_factor)#取两者的最小值
 75             wide = int(wide * factor)
 76             if wide <= 0 : wide = 1#如果wide<=0,则令wide=1
 77             high = int(high * factor)
 78             if high <= 0 : high = 1
 79             im=im.resize((wide, high), Image.ANTIALIAS)#Image.ANTIALIAS:PIL高质量、抗锯齿
 80             imgtk = ImageTk.PhotoImage(image=im)
 81         return imgtk
 82         #此方法得到resize后且高质量的图片
 83 
 84     def show_roi(self, r, roi,color):#传入字符r与车牌图像roi
 85         if r :
 86             roi = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
 87             roi = Image.fromarray(roi)
 88             self.imgtk_roi = ImageTk.PhotoImage(image=roi)
 89             self.roi_ctl.configure(image=self.imgtk_roi, state='enable')
 90             self.r_ctl.configure(text=str(r))
 91             self.update_time = time.time()
 92                     
 93             try:
 94                 c = self.color_transform[color]
 95                 self.color_ctl.configure(text=c[0], background=c[1], state='enable')
 96             except:
 97                 self.color_ctl.configure(state='disabled')
 98             
 99         if self.update_time + 8 < time.time():
100             self.roi_ctl.configure(state='disabled')
101             self.r_ctl.configure(text="")
102             self.color_ctl.configure(state='disabled')
103     
104     def show_img_pre(self):
105        pre_img1=cv2.imread('./carIdentityData/opencv_output/blur.jpg')
106        pre_img2=cv2.imread('./carIdentityData/opencv_output/sobel.jpg')
107        pre_img3=cv2.imread('./carIdentityData/opencv_output/hsv_pic.jpg')
108        pre_img4=cv2.imread('./carIdentityData/opencv_output/contour.jpg')
109        pre_img5=cv2.imread('./carIdentityData/opencv_output/floodfill.jpg')
110        pre_img6=cv2.imread('./carIdentityData/opencv_output/plates.jpg')
111        pre_img7=cv2.imread('./carIdentityData/opencv_output/cnn_plate.jpg')
112        
113        cv2.imshow('blur',pre_img1)
114        cv2.imshow('sobel',pre_img2)
115        cv2.imshow('hsv_pic',pre_img3)
116        cv2.imshow('contour',pre_img4)
117        cv2.imshow('floodfill',pre_img5)
118        cv2.imshow('plates',pre_img6)
119        cv2.imshow('cnn_plate',pre_img7)
120        
121        while True:
122            ch = cv2.waitKey(1)
123            if ch == 27:
124                 break
125        cv2.destroyAllWindows()
126        
127        
128     def from_vedio(self):
129        video=[0,"http://admin:admin@192.168.0.13:8081","http://admin:admin@iPhone.local:8081","http://admin:admin@10.119.223.51:8081"]
130        '''
131        默认情况下用户名和密码都是admin,客户端与IP摄像机服务器需处于同一局域网下,wifi
132        参数为0表示打开内置摄像头,参数是视频文件路径则打开视频
133        video="http://admin:admin@192.168.0.13:8081"  
136        video = video[0]
137        capture =cv2.VideoCapture(video)
138 
139        # 建个窗口并命名
140        cv2.namedWindow("camera",1)
141        num = 0
142 
143        # 用于循环显示图片,达到显示视频的效果
144        while True:
145             ret, frame = capture.read()
146     
147             # 在frame上显示test字符
148             image1=cv2.putText(frame,'test', (50,100), 
149                         cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 0 ,0), 
150                         thickness = 2, lineType = 2)
151                 
152             cv2.imshow('camera',frame)
153     
154             # 不加waitkey() 则会图片显示后窗口直接关掉
155             key = cv2.waitKey(1)
156     
157             if key == 27:
158                 #esc键退出
159                 print("esc break...")
160                 break
161 
162             if key == ord(' '):
163                 # 保存一张图像
164                 num = num+1
165                 filename = "frames_%s.jpg" % num
166                 print('已保存图片:%s.jpg' % num)
167                 cv2.imwrite(filename,frame)
168        cv2.destroyAllWindows()
169 
186     def from_pic(self):
187         self.thread_run = False
188         self.pic_path = askopenfilename(title="选择图片", filetypes=[("jpg", "*.jpg")])
189         cur_dir = sys.path[0]
190         plate_model_path = os.path.join(cur_dir, './carIdentityData/model/plate_recongnize/model.ckpt-1020.meta')
191         char_model_path = os.path.join(cur_dir,'./carIdentityData/model/char_recongnize/model.ckpt-1030.meta')
192 
193         if self.pic_path:
194             img_bgr = cv2.imread(self.pic_path)
195             self.imgtk = self.get_imgtk(img_bgr)
196             self.image_ctl.configure(image=self.imgtk)
197             #预处理
198             pred_img = carPlateIdentity.pre_process(img_bgr)
199             # 车牌定位
200             car_plate_list = carPlateIdentity.locate_carPlate(img_bgr,pred_img)
201             # CNN车牌过滤
202             ret,car_plate,color = carPlateIdentity.cnn_select_carPlate(car_plate_list,plate_model_path)
203             cv2.imwrite('./carIdentityData/opencv_output/cnn_plate.jpg', car_plate)
204             # 字符提取
205             char_img_list = carPlateIdentity.extract_char(car_plate)
206             # CNN字符识别
207             text = carPlateIdentity.cnn_recongnize_char(char_img_list, char_model_path)
208             print('result:', text)
209             #r, roi = self.predictor.predict(img_bgr)#识别到的字符、定位的车牌图像
210             self.show_roi(text, car_plate,color)
211 
226 def close_window():
227     print("destroy")
228     if surface.thread_run :
229         surface.thread_run = False
230         surface.thread.join(2.0)
231     win.destroy()
232 
233 
234 if __name__ == '__main__':
235     win = tk.Tk()
236     win.geometry('950x500')
237 
238     surface = Surface(win)
239     win.protocol('WM_DELETE_WINDOW', close_window)
240     win.mainloop()#进入消息循环
原文地址:https://www.cnblogs.com/DJames23/p/12458388.html