Thursday, October 11, 2018

[Python] How to do Text Recognition based on BIOS Setup Snapshot

import math
import cv2
import numpy as np
from matplotlib import pyplot as plt
import pytesseract
from PIL import Image
#alias new='C:\Program Files (x86)\Tesseract-OCR\tesseract.exe'

def main():

#------------------------------------------------------------------------------------------------

#INPUT Image Path

# 1. 讀取原圖 Pre_1_Load_Image
    
load_image = cv2.imread('amd_setup.bmp',1)

#------------------------------------------------------------------------------------------------

# 2. 非銳化濾鏡 (Un-Sharp Masking) (銳化功能), 
        #      Pre_2_sharpen_image, Pre_1_Load_Image -> Pre_2_sharpen_image
#
# 2.1. GaussianBlur
#         (5,5) = K-Size, 濾鏡尺寸
#          0.0 = SigmaX, standard deviation in X direction, If set to 0, then this value is computed from K-Size
#
# 2.2. addWeighted  把兩張 Images 合而為一
#    1.5 = load_image 的權重, -0.5 = Gaussian_Blur_Image 的權重

Gaussian_Blur_Image = cv2.GaussianBlur(load_image, (5,5), 0.0)
sharpen_load_image = cv2.addWeighted(load_image, 1.5, Gaussian_Blur_Image, -0.5, 0, load_image)
cv2.imwrite("Pre_2_sharpen_image.jpg", sharpen_load_image)

#------------------------------------------------------------------------------------------------

# 3. CANNY 邊緣偵測, Pre_3_edges_image, sharpen_image -> edges
#
#    照理說本來應該先做 GaussianBlur 再做 Canny, 但是因為我們的圖是截圖, 不會有雜訊問題
#
# 3.1. gray_load_image 來自 sharpen_load_image 的轉灰階

# 3.2. edges 來自於 gray_load_image 的 Canny 邊緣偵測
#      50 = 最小門檻值, 距離小於此值的 edges會被連通, 150 = 如果該 Pixel 的 gradient 大於此值
        #      則標示為可能的 edges 點
#

gray_load_image = cv2.cvtColor(sharpen_load_image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray_load_image,50,150)
cv2.imwrite("Pre_3_edges.jpg", edges)

#------------------------------------------------------------------------------------------------

# 4. 取得圖片的 Height/ Width of (edges)

print ('height, width:')
print (edges.shape)

height, width = edges.shape

#------------------------------------------------------------------------------------------------

# 5. 定義水平與垂直線段的區間門檻值, i.e. 多少距離內的 edges 點可以被視為同一線段?

#Height = 768, Width = 1024, 水平線寬: 3 pixels 0.39%, 垂直線寬: 4 pixels 0.39%, 
        #水平第一條線與第二條線垂直距離: 9 pixels, 考慮 0.5% 的誤差, 
#水平線之間的寬可以容忍到 3.84 pixels (取 4 pixles (含以下))
#垂直線之間的寬可以容忍到 5.12 pixels (取 6 pixles (含以下))

tolerance_hori_lines_Dis = int(math.ceil(float(height) * 0.005))
tolerance_vert_lines_Dis = int(math.ceil(float(width) * 0.005))

print ('tolerance_hori_lines_Dis, tolerance_vert_lines_Dis:')
print (tolerance_hori_lines_Dis, tolerance_vert_lines_Dis)

#------------------------------------------------------------------------------------------------

# 6. Hough 直線偵測
#    minimum pixel number = minimum line length = 0.8 * height, 因為
        #    我們最小想要偵測到的線段是最大方框兩邊的垂直線, 原 Layout 比例為 85% * height

#                               img  ,rho, angle meter , minimum pixel number, 
        #                               minimum line length, maximum distance within one line
#detect_lines = cv2.HoughLinesP(edges,1  , np.pi/180   , 30                  , minLineLength=60   , maxLineGap=10)
detect_lines = cv2.HoughLinesP(edges,1,np.pi/2, int(0.8*(float(height))), int(0.8*(float(height))), int(math.ceil(0.5*tolerance_vert_lines_Dis)))

TwoDim_detect_lines = detect_lines[:,0,:] #Convert the line info into 2D

#------------------------------------------------------------------------------------------------

# 7. 把偵測到的線段輸出以備參考 load_image -> LineDetected_load_image -> Origin_Add_Line.jpg

LineDetected_load_image=cv2.cvtColor(load_image,cv2.COLOR_BGR2GRAY)

print ('-----x1,y1,x2,y2-----')

for x1,y1,x2,y2 in TwoDim_detect_lines[:]: 
cv2.line(LineDetected_load_image,(x1,y1),(x2,y2),(0,0,255),1)
print(x1,y1,x2,y2)

cv2.imwrite("Pre_7_Origin_Add_Line.jpg", LineDetected_load_image)

#------------------------------------------------------------------------------------------------

# 8. 列印出所有線段資訊做參考

# 8.1 挑出所有的水平線, left_top, left_down, right_top, right_down 代表四個角點 -----------------

Detect_Hori_lines = []

# 找水平線 

for index in range(len(TwoDim_detect_lines)):
if TwoDim_detect_lines[index][3] == TwoDim_detect_lines[index][1]:
Detect_Hori_lines.append(TwoDim_detect_lines[index])

# 過濾水平長度不足者 (abs(x2-x1) > 0.95 * width)

for index in range(len(Detect_Hori_lines)):
if( (abs(Detect_Hori_lines[index][2]-Detect_Hori_lines[index][0])) < (0.95 * width)):
Detect_Hori_lines[index] = [0,0,0,0]
#print('too short')

print('x1,y1,x2,y2, hori lines with enough length')
for x1,y1,x2,y2 in Detect_Hori_lines[:]: 
print(x1,y1,x2,y2)

# 找出距離圖片中心最近的兩條線

minimum_distance_upper_line = height
minimum_distance_lower_line = height

for index in range(len(Detect_Hori_lines)):

#print('Processing')
#print(Detect_Hori_lines[index])

if(Detect_Hori_lines[index][1] < 0.5 * height):
#print('Processing Upper')
#print('0.5 * height - Detect_Hori_lines[index][1]), minimum_distance_upper_line')
#print(0.5 * height - Detect_Hori_lines[index][1], minimum_distance_upper_line)
if((0.5 * height - Detect_Hori_lines[index][1]) < minimum_distance_upper_line):
minimum_distance_upper_line = 0.5 * height - Detect_Hori_lines[index][1]
Detect_Upper_lines = Detect_Hori_lines[index]
#print('Detect_Upper_lines')
#print(Detect_Upper_lines)
else:
#print('Processing Lower')
#print('Detect_Hori_lines[index][1]) - 0.5 * height < minimum_distance_lower_line')
#print(Detect_Hori_lines[index][1] - 0.5 * height, minimum_distance_lower_line)
if((Detect_Hori_lines[index][1]) - 0.5 * height < minimum_distance_lower_line):
minimum_distance_lower_line = Detect_Hori_lines[index][1] - 0.5 * height
Detect_Lower_lines = Detect_Hori_lines[index]
#print('Detect_Lower_lines')
#print(Detect_Lower_lines)

print('Detect_Upper_lines, Detect_Lower_lines')
print(Detect_Upper_lines, Detect_Lower_lines)

#HoriLineDetected_load_image = load_image
#cv2.line(HoriLineDetected_load_image,(Detect_Upper_lines[0],Detect_Upper_lines[1]),(Detect_Upper_lines[2],Detect_Upper_lines[3]),(0,0,255),1)
#cv2.line(HoriLineDetected_load_image,(Detect_Lower_lines[0],Detect_Lower_lines[1]),(Detect_Lower_lines[2],Detect_Lower_lines[3]),(0,0,255),1)
#cv2.imwrite("Origin_Add_Hori_Line.jpg", HoriLineDetected_load_image)

# 標記出四個角點, 或是 left_top, right_down 就好

Inner_X_Line = (max(Detect_Upper_lines[0], Detect_Lower_lines[0]) + tolerance_vert_lines_Dis, min(Detect_Upper_lines[2], Detect_Lower_lines[2]) - tolerance_vert_lines_Dis) 
Inner_Y_Line = (Detect_Upper_lines[1] + tolerance_hori_lines_Dis, Detect_Lower_lines[1] - tolerance_hori_lines_Dis) 

print('Inner_X_Line, Inner_Y_Line')
print(Inner_X_Line, Inner_Y_Line)

left_top = (Inner_X_Line[0], Inner_Y_Line[0])
right_down = (Inner_X_Line[1], Inner_Y_Line[1]) 

print('left_top, right_down')
print(left_top, right_down)

# Debug: 輸出ROI區域線段

ROI_Lined_image=cv2.cvtColor(load_image,cv2.COLOR_BGR2GRAY)
cv2.line(ROI_Lined_image, left_top, right_down, (0,0,255), 1)
cv2.imwrite("Pre_8_1_Origin_Add_ROI_Rec.jpg", ROI_Lined_image)

# HoriLineDetected_load_image = load_image
# cv2.line(HoriLineDetected_load_image, left_top, right_down, (0,0,255), 1)
# cv2.imwrite("Origin_Add_Inner_Rec.jpg", HoriLineDetected_load_image)
# cv2.line(HoriLineDetected_load_image,(Detect_Lower_lines[0],Detect_Lower_lines[1]),(Detect_Lower_lines[2],Detect_Lower_lines[3]),(0,0,255),1)

# 8.2 挑出中間偏右的垂直分隔線 -----------------

Inner_Vert_Lines = []

# 找所有垂直線 

for index in range(len(TwoDim_detect_lines)):
if TwoDim_detect_lines[index][0] == TwoDim_detect_lines[index][2]:
Inner_Vert_Lines.append(TwoDim_detect_lines[index])

# 過濾垂直長度不足者 (abs(y1-y2) > 0.80 * height)

for index in range(len(Inner_Vert_Lines)):
if( (abs(Inner_Vert_Lines[index][1]-Inner_Vert_Lines[index][3])) < (0.80 * height)):
Inner_Vert_Lines[index] = [0,0,0,0]
#print('too short')

print('x1,y1,x2,y2, vertical lines with enough length')
for x1,y1,x2,y2 in Inner_Vert_Lines[:]: 
print(x1,y1,x2,y2)

# 挑選最靠近中心點的垂直線

minimum_distance_inner_vert_line = 0.5 * width

for index in range(len(Inner_Vert_Lines)):
if( abs(Inner_Vert_Lines[index][0]-0.5 * width) < minimum_distance_inner_vert_line):
minimum_distance_inner_vert_line = abs(Inner_Vert_Lines[index][0]-0.5 * width)
Detect_Inner_Vert_Line = Inner_Vert_Lines[index]

print('Detect_Inner_Vert_Line')
print(Detect_Inner_Vert_Line)

# 取出中間分隔線的終點, 並且內縮

#cv2.line(HoriLineDetected_load_image, left_top, (Detect_Inner_Vert_Line[0], Detect_Inner_Vert_Line[1]), (0,0,255), 1)
#cv2.imwrite("Origin_Add_Two_Inner_Rec.jpg", HoriLineDetected_load_image)

Inner_right_down = (Detect_Inner_Vert_Line[0] - tolerance_vert_lines_Dis, max(Detect_Inner_Vert_Line[1],Detect_Inner_Vert_Line[3])-tolerance_hori_lines_Dis)

print('Inner_right_down')
print(Inner_right_down)

#cv2.line(HoriLineDetected_load_image, left_top, (Inner_right_down[0], Inner_right_down[1]), (0,0,255), 1)
#cv2.imwrite("Origin_Add_Two_Inner_Rec.jpg", HoriLineDetected_load_image)

# 8.3 定出中間最大區塊的座標值

# 左上角 left_top, 右下角 right_down, 中間區塊的右下角 Inner_right_down, 格式皆為 (x,y), y 由上往下算

#----------------------------------------------------------------------------------------------------

# 9. 切割出整行的文字影像

# edges 是經過銳化 + Canny 取邊緣的黑白圖

# 9.1. 取中間主要區塊的子影像 (edges)

Region_Interest = edges[left_top[1]:Inner_right_down[1], left_top[0]:Inner_right_down[0]] #y1,y2 and x1,x2

cv2.imwrite("Pre_9_1_Region_Interest.jpg", Region_Interest)

# 9.2. 針對 Region_Interest (Cut from edges) 取一次垂直的直方圖 

SumCols = []
for Vert_Y in range(Inner_right_down[1]-left_top[1]):
SumCols.append(0)
for Hori_X in range(Inner_right_down[0]-left_top[0]):
if(Region_Interest[Vert_Y][Hori_X]!=0):
SumCols[Vert_Y]+=1

print('len(SumCols)')
print(len(SumCols))

print('index,Sumcol_Value')
for index in range(len(SumCols)):
print(index,SumCols[index])

# 9.3 從 SumCols 計算出垂直的每行文字的起始位置, 直的是 row, 橫的是 column

IsInText = False
Start_End_Each_Texts = []
Start_End_Each_Texts.append([])
Start_End_Each_Texts.append([])

#Start_End_Each_Texts[0] = Start
#Start_End_Each_Texts[1] = End

for index in range(len(SumCols)):
if (SumCols[index]!=0):                       # Text 區
if(IsInText==False):                      # 新的 Text 區首個行數
Start_End_Each_Texts[0].append(index + left_top[1]) # Start Point
IsInText = True
else:                       # 非Text區首個行數
if(SumCols[index+1]==0):
Start_End_Each_Texts[1].append(index + left_top[1] + 1)
else:                       # 非 Text 區
IsInText=False

print('Start_End_Each_Texts')
print(Start_End_Each_Texts[0])
print(Start_End_Each_Texts[1])


# 9.4 每個 Text 區塊的子影像做局部二值化 (因為有反白區域)

for index in range(len(Start_End_Each_Texts[0])):

Sub_ROI = sharpen_load_image[Start_End_Each_Texts[0][index]:Start_End_Each_Texts[1][index],left_top[0]:Inner_right_down[0]] # 要用這個 
Sub_ROI = cv2.cvtColor(Sub_ROI, cv2.COLOR_BGR2GRAY)
ret1,th1 = cv2.threshold(Sub_ROI,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)

#Sub_ROI = load_image[Start_End_Each_Texts[0][index]:Start_End_Each_Texts[1][index],left_top[0]:Inner_right_down[0]] #看圖用而已

cv2.imwrite('Sub_ROI_TH_%s.jpg'%str(index), th1)
cv2.imwrite('Sub_ROI_Gray_%s.jpg'%str(index), Sub_ROI)

#----------------------------------------------------------------------------
text = pytesseract.image_to_string(Image.open("Sub_ROI_TH_%s.jpg"%str(index)))
#text = pytesseract.image_to_string(th1)
print(text)
#----------------------------------------------------------------------------
    

# 9.5 只是為了把 ROI 中的每行文字用方框標示的圖示輸出

for index in range(len(Start_End_Each_Texts[0])):
cv2.rectangle(load_image,(left_top[0],Start_End_Each_Texts[0][index]),(Inner_right_down[0],Start_End_Each_Texts[1][index]),(55,255,155),1)

cv2.imwrite('Rec.jpg', load_image)

#------------------------------------------------------------------------------------------------------------------------------------------


if __name__ == '__main__':
    main()
    #sys.exit(main())