口罩检测比赛

说明:

这个方向并不想做深入的研究。仅仅是依靠现有的算法基础。做做比赛而已。文档也是对整个过程做个记录。

数据类会是下一个重点研究的方向。

一、网址:

https://www.dcjingsai.com/common/cmpt/AI%E6%88%98%E7%96%AB%C2%B7%E5%8F%A3%E7%BD%A9%E4%BD%A9%E6%88%B4%E6%A3%80%E6%B5%8B%E5%A4%A7%E8%B5%9B_%E6%88%91%E7%9A%84%E9%98%9F%E4%BC%8D.html

二、任务:

三、开始做啦

 1、下载数据

20200317

训练数据:pos254,neg417

数据扩增:

import albumentations as albu
import cv2
import pdb
from glob import glob
import os
import numpy as np

def train_transform0(image, p=1):
   aug1 = albu.Compose([albu.HorizontalFlip(p=1)], p=p)
   aug2 = albu.Compose([albu.Transpose(p=1)], p=p)
   return aug1(image=image)['image'],aug2(image=image)['image']

def train_transform1(image, p=1):
   aug1 = albu.Compose([albu.Blur(blur_limit=1)], p=p)
   aug2 = albu.Compose([albu.Blur(blur_limit=2)], p=p)
   aug3 = albu.Compose([albu.Blur(blur_limit=3)], p=p)
   aug4 = albu.Compose([albu.Blur(blur_limit=4)], p=p)
   aug5 = albu.Compose([albu.Blur(blur_limit=5)], p=p)
   aug6 = albu.Compose([albu.Blur(blur_limit=6)], p=p)
   aug7 = albu.Compose([albu.Blur(blur_limit=7)], p=p)
   aug8 = albu.Compose([albu.Blur(blur_limit=8)], p=p)
   aug9 = albu.Compose([albu.Blur(blur_limit=9)], p=p)
   aug10 = albu.Compose([albu.Blur(blur_limit=10)], p=p) 
   aug11 = albu.Compose([albu.OpticalDistortion(distort_limit=0.05, shift_limit=0.05, interpolation=1, border_mode=4, p=1)], p=p)
   aug12 = albu.Compose([albu.GridDistortion(num_steps=5, distort_limit=0.3, interpolation=1, border_mode=4, p=1)], p=p)
   aug13 = albu.Compose([albu.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, interpolation=1, border_mode=4, p=1)], p=p)
   aug14 = albu.Compose([albu.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=1)], p=p) 
   aug15 = albu.Compose([albu.RandomBrightnessContrast(brightness_limit=0.4, contrast_limit=0.2, p=1)], p=p)
   aug16 = albu.Compose([albu.RandomBrightnessContrast(brightness_limit=0.6, contrast_limit=0.2, p=1)], p=p)
   aug17 = albu.Compose([albu.RandomBrightnessContrast(brightness_limit=0.8, contrast_limit=0.2, p=1)], p=p)
   aug18 = albu.Compose([albu.RandomBrightnessContrast(brightness_limit=1, contrast_limit=0.2, p=1)], p=p)  
   return aug1(image=image)['image'],aug2(image=image)['image'],aug3(image=image)['image'],aug4(image=image)['image'],aug5(image=image)['image'],aug6(image=image)['image'],aug7(image=image)['image'],aug8(image=image)['image'],aug9(image=image)['image'],aug10(image=image)['image'],aug11(image=image)['image'],aug12(image=image)['image'],aug13(image=image)['image'],aug14(image=image)['image'],aug15(image=image)['image'],aug16(image=image)['image'],aug17(image=image)['image'],aug18(image=image)['image']

def train_transform2(image, p=1):
   aug1 = albu.Compose([albu.HueSaturationValue(hue_shift_limit=15, sat_shift_limit=30, val_shift_limit=15, p=1)], p=p)
   aug2 = albu.Compose([albu.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=30, val_shift_limit=20, p=1)], p=p)
   aug3 = albu.Compose([albu.PadIfNeeded(min_height=4608, min_width=4608, border_mode=0, value=[0, 0, 0],p=1.0)], p=p)
   aug4 = albu.Compose([albu.RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=1)], p=p)
   aug5 = albu.Compose([albu.RandomBrightness(limit=0.2, p=1)], p=p)  
   aug6 = albu.Compose([albu.RandomContrast(limit=0.2, p=1)], p=p) 
   aug7 = albu.Compose([albu.MotionBlur(blur_limit=7, p=1)], p=p)    
   aug8 = albu.Compose([albu.MedianBlur(blur_limit=7, p=1)], p=p)    
   #aug22 = albu.Compose([albu.GaussianBlur(blur_limit=7, p=1)], p=p) 
   aug9 = albu.Compose([albu.GaussNoise(var_limit=(10.0, 50.0),  p=1)], p=p)  
   aug10 = albu.Compose([albu.CLAHE(clip_limit=4.0, tile_grid_size=(8, 8), p=1)], p=p)  
   aug11 = albu.Compose([albu.ChannelShuffle(always_apply=False, p=1)], p=p)  
   aug12 = albu.Compose([albu.InvertImg(always_apply=False, p=1)], p=p)   
   aug13 = albu.Compose([albu.JpegCompression(quality_lower=99, quality_upper=100, p=1)], p=p) 
   return aug1(image=image)['image'],aug2(image=image)['image'],aug3(image=image)['image'],aug4(image=image)['image'],aug5(image=image)['image'],aug6(image=image)['image'],aug7(image=image)['image'],aug8(image=image)['image'],aug9(image=image)['image'],aug10(image=image)['image'],aug11(image=image)['image'],aug12(image=image)['image'],aug13(image=image)['image']
   
def train_transform3(image, p=1):
   aug11 = albu.Compose([albu.OpticalDistortion(distort_limit=0.05, shift_limit=0.05, interpolation=1, border_mode=0, p=1)], p=p)
   aug12 = albu.Compose([albu.GridDistortion(num_steps=5, distort_limit=0.3, interpolation=1, border_mode=0, p=1)], p=p)
   aug13 = albu.Compose([albu.ElasticTransform(alpha=1, sigma=50, alpha_affine=50, interpolation=1, border_mode=0, p=1)], p=p)
   return aug11(image=image)['image'],aug12(image=image)['image'],aug13(image=image)['image']

old_dir='./train/pos/'
new_dir='./train/pos2/'
if  not os.path.exists(new_dir):
    new_dir=os.mkdir(new_dir)

train_dataset = glob(old_dir+'*.*')

num=0
for wj in range(0,len(train_dataset)):
    print (wj,num,len(train_dataset))
    img_name=train_dataset[wj].split('\')[1]
    img=cv2.imread(old_dir+img_name) 
    num+=1 

    I1,I2,I3,I4,I5,I6,I7,I8,I9,I10,I11,I12,I13,I14,I15,I16,I17,I18=train_transform1(img, p=1)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I1.jpg',I1)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I2.jpg',I2)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I3.jpg',I3)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I4.jpg',I4)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I5.jpg',I5)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I6.jpg',I6)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I7.jpg',I7)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I8.jpg',I8)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I9.jpg',I9)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I10.jpg',I10)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I11.jpg',I11)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I12.jpg',I12)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I13.jpg',I13)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I14.jpg',I14)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I15.jpg',I15)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I16.jpg',I16)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I17.jpg',I17)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I18.jpg',I18)

    I01,I02,I03,I04,I05,I06,I07,I08,I09,I010,I011,I012,I013=train_transform2(img, p=1)   
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I01.jpg',I01)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I02.jpg',I02)
    #cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I03.jpg',I03)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I04.jpg',I04)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I05.jpg',I05)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I06.jpg',I06)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I07.jpg',I07)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I08.jpg',I08)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I09.jpg',I09)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I010.jpg',I010)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I011.jpg',I011)
    #cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I012.jpg',I012)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'I013.jpg',I013)

    Ij1,Ij2,Ij3=train_transform3(img, p=1)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'Ij1.jpg',Ij1)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'Ij2.jpg',Ij2)
    cv2.imwrite(new_dir+img_name.split('.')[0]+'_'+'Ij3.jpg',Ij3)

获得label

#--coding:utf-8--
import os
import numpy as np

First_level_dir = './train/'

f=open('list.txt','w')

for Second_level_dir in (First_level_dir):
    for img_name in (First_level_dir + Second_level_dir):
        if 'neg' in Second_level_dir:
            f.write(img_name+' '+'0'+'
')
        else:
            f.write(img_name+' '+'1'+'
')

f.close()

 2、下载分类模型并训练

git clone --recursive https://github.com/bearpaw/pytorch-classification.git


原文地址:https://www.cnblogs.com/wjjcjj/p/12510332.html