利用ctc訓練字符識別

一、獲取字體

  • win10字體查找方法  [Windows + E] -> %WINDIR%/Fonts
  • CentOS Linux 字體路徑: /usr/share/fonts
root@9080e45b4485:~# apt-get install  fontconfig
root@9080e45b4485:~# fc-list
/usr/share/fonts/truetype/dejavu/DejaVuSerif-Bold.ttf: DejaVu Serif:style=Bold
/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf: DejaVu Sans Mono:style=Book
/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf: DejaVu Sans:style=Book
/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf: DejaVu Sans:style=Bold
/usr/share/fonts/truetype/dejavu/DejaVuSansMono-Bold.ttf: DejaVu Sans Mono:style=Bold
/usr/share/fonts/truetype/dejavu/DejaVuSerif.ttf: DejaVu Serif:style=Book
  • matplotlib字體路徑 /usr/local/lib/python3.5/dist-packages/matplotlib/mpl-data/fonts/ttf/

二、生成文件

  • 利用字庫文件和文字內容生成訓練數據
class MYFONT():
	def __init__(self, font_path='data'):
		self._Deng_path = 'Deng.ttf'
		self.__load_font(font_path)

	def __load_font(self, font_path):
		self.Deng_font = ImageFont.truetype(os.path.join(font_path, self._Deng_path), 23, encoding="unic")


	def set_font_size(self, font_size=71):
	    txt_font_tmp = txt_font.font_variant(size=20, encoding='unic')
		return txt_font_tmp

	def get_txt_cord(self, txt, font):  # 字體的座標信息:[x,y,w,h]
		offset = font.getoffset(txt)
		size = font.getsize(txt)
		return offset + size  # x,y,w,h

	def get_alpha_new_image(self, imgwh):
		img_w, img_h = imgwh

		pil_img = Image.new('RGB', (img_w, img_h), (255, 255, 255))
		return pil_img

	def get_txt_part_base(self, imgwh, txt, txt_cord, txt_font):
		'''
		將文字寫在圖片上,白底黑字
		:param imgwh: 目標圖分辨率
		:param txt: 待寫的文字list變量
		:param txt_cord: 對應座標
		:param txt_font: 對應字體
		:return: 結果圖
		'''
		pil_img = self.get_alpha_new_image(imgwh)
		pil_img_draw = ImageDraw.Draw(pil_img)


		pil_img_draw.text(txt_cord, txt, (0, 0 , 0), font=txt_font)
		return pil_img
#encoding: utf-8
import os
import lmdb # install lmdb by "pip install lmdb"
import cv2
import numpy as np

'''
來自https://github.com/bgshih/crnn/blob/master/tool/create_dataset.py
'''

def checkImageIsValid(imageBin):
    if imageBin is None:
        return False
    imageBuf = np.fromstring(imageBin, dtype=np.uint8)
    img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
    imgH, imgW = img.shape[0], img.shape[1]
    if imgH * imgW == 0:
        return False
    return True


def writeCache(env, cache):
    with env.begin(write=True) as txn:
        #for k, v in cache.iteritems(): #py2
        for k, v in cache.items(): #py3
            #txn.put(k, v) #py2
            txn.put(str(k).encode(), v) #py3


def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
    """
    Create LMDB dataset for CRNN training.

    ARGS:
        outputPath    : LMDB output path
        imagePathList : list of image path
        labelList     : list of corresponding groundtruth texts
        lexiconList   : (optional) list of lexicon lists
        checkValid    : if true, check the validity of every image
    """
    assert(len(imagePathList) == len(labelList))
    nSamples = len(imagePathList)
    env = lmdb.open(outputPath, map_size=1099511627776)
    cache = {}
    cnt = 1
    for i in range(nSamples):
        imagePath = imagePathList[i]
        label = labelList[i]
        if not os.path.exists(imagePath):
            print('%s does not exist' % imagePath)
            continue
        #print (imagePath)
        with open(imagePath, 'rb') as f:
            imageBin = f.read()
        if checkValid:
            if not checkImageIsValid(imageBin):
                print('%s is not a valid image' % imagePath)
                continue

        imageKey = 'image-%09d' % cnt
        labelKey = 'label-%09d' % cnt
        cache[imageKey] = imageBin
        cache[labelKey] = label
        if lexiconList:
            lexiconKey = 'lexicon-%09d' % cnt
            cache[lexiconKey] = ' '.join(lexiconList[i])
        if cnt % 1000 == 0:
            writeCache(env, cache)
            cache = {}
            print('Written %d / %d' % (cnt, nSamples))
        cnt += 1
    nSamples = cnt-1
    # cache['num-samples'] = nSamples #py2
    cache['num-samples'] = str(nSamples).encode() #py3
    writeCache(env, cache)
    print('Created dataset with %d samples' % nSamples)


def getAllUrls(txt):
    rtn = list()
    with open(txt, 'rb') as fp:
        buffs = fp.readlines()
    rtn = [tmp.strip() for tmp in buffs]
    return rtn

if __name__ == '__main__':
    img_pathfile = 'imgslist.txt'  #文件列表
    labellist = 'labellist.txt'    #文件對應的文字內容
    outpath = 'trainlmdb'
    if not os.path.exists(outpath):
        os.makedirs(outpath)
    imagePathList = getAllUrls(img_pathfile)
    labelList = getAllUrls(labellist)
    createDataset(outpath, imagePathList, labelList)

三、訓練crnn

  • 安裝pytorch ctc-loss
  • 訓練項目地址crnn.pytorch
  • 注意事項:1.使用py2環境,torch使用1.1.0;2. 學習率需要修改爲0.0001,默認參數0.01訓練loss無窮大。

問題

  • Corrupted image 657。 表明生成lmdb數據時存在問題,lmdb數據加載過程可參考這裏
#生成ldmb數據後,可在crnn.pytorch根目錄中運行該程序測試數據是否正常
import torch
import dataset   
test_dataset = dataset.lmdbDataset(
    root='carplate_lmdb/val/', transform=dataset.resizeNormalize((100, 32)))
data_loader = torch.utils.data.DataLoader(
        test_dataset, shuffle=True, batch_size=64, num_workers=1)
val_iter = iter(data_loader)  
preds = preds.squeeze(2)  #註釋該行

 

參考文獻:

  1. PyTorch bindings for Warp-ctc

  2. crnn.pytorch

  3. Location of truetype font

  4. 車牌及車標查詢

  5.  lmdb 模塊實例源碼

發佈了104 篇原創文章 · 獲贊 23 · 訪問量 9萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章