123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447 |
- #!/usr/bin/env python
- # -*- coding=utf-8 -*-
- import shutil,os
- import cv2
- import json
- import base64
- import logging
- import numpy as np
- import threading
- import time
- from image_detect import ImageDetect
- from multiprocessing import Process,Lock
- from multiprocessing import Pool,Manager
- import csv
- import sys
- reload(sys)
- sys.setdefaultencoding('utf8')
- plocker=Lock()
- if os.name == 'nt':
- import win32con, win32file, pywintypes
- LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
- LOCK_SH = 0 # The default value
- LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
- __overlapped = pywintypes.OVERLAPPED()
-
- def lock(file, flags):
- hfile = win32file._get_osfhandle(file.fileno())
- win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped)
-
- def unlock(file):
- hfile = win32file._get_osfhandle(file.fileno())
- win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped)
- elif os.name == 'posix':
- from fcntl import LOCK_EX, LOCK_SH, LOCK_NB
-
- def lock(file, flags):
- fcntl.flock(file.fileno( ), flags)
-
- def unlock(file):
- fcntl.flock(file.fileno( ), fcntl.LOCK_UN)
- else:
- raise RuntimeError("File Locker only support NT and Posix platforms!")
- def get_mean(img,index):
- height=img.shape[0]
- width=img.shape[1]
- top=np.mean(img[index,width/4:width*3/4])
- top2=np.mean(img[index,width*3/8:width*5/8])
- bottom=np.mean(img[height-index,width/4:width*3/4])
- left=np.mean(img[height/4:height*3/4,index])
- left2=np.mean(img[height*3/8:height*5/8,index])
- right=np.mean(img[height/4:height*3/4,width-index])
- #print("top:",top,"top2:",top2,"left:",left,"left2:",left2)
- return {"top":top,"bottom":bottom,"left":left,"right":right,"is_view":top==top2 and left==left2}
- def get_rect(img):
- height=img.shape[0]
- width=img.shape[1]
- ret, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
-
- y1=0
- margin=10
- for y in range(margin,height):
- if np.mean(img[y,width/4:width*3/4])!=255:
- y1=y
- break
- for y in range(height-1-margin,margin,-1):
- if np.mean(img[y,width/4:width*3/4])!=255:
- y2=y
- break
- for x in range(margin,width):
- if np.mean(img[height/4:height*3/4,x])!=255:
- x1=x
- break
-
- for x in range(width-1-margin,margin,-1):
- if np.mean(img[height/4:height*3/4,x])!=255:
- x2=x
- break
- c_x=x1+int((x2-x1)/2*0.2)
- c_y=y1+int((y2-y1)/2*0.2)
- max_c=[x1,y1,x2-x1,y2-y1]
- print("max_c:",max_c)
- img=cv2.rectangle(img,(max_c[0],max_c[1]),(max_c[0]+max_c[2],max_c[1]+max_c[3]),(0,255,0),5)
- cv2.namedWindow("enhanced",0)
- cv2.resizeWindow("enhanced",width/2,height/2)
- cv2.imshow("enhanced",img)
- cv2.waitKey(0)
- # cv2.imwrite(r"C:\Users\lenovo\Desktop\bearing\error\bbb.jpg",img)
-
- return max_c
- def get_viewpoint(img_idx,img, is_bg_white=True):
- # height=img.shape[0]
- # width=img.shape[1]
- # logging.info("AI_log: [get_viewpoint] index:"+str(img_idx)+", width:"+str(width)+", height:"+str(height))
- # index =5
- # mean=get_mean(img,index)
- # if mean["is_view"]:
- # rect=get_rect(img)
- # # c_x=x1+int((x2-x1)/2*0.2)
- # # c_y=y1+int((y2-y1)/2*0.2)
-
- # # shape = "C" if img[c_y,c_x]==mean["top"] else "R"
- # return {"viewport":{"x":rect[0],"y":rect[1],"w":rect[2],"h":rect[3],"type":"C"}} if rect!=None else rect
- # else:
- # return {}
- height=img.shape[0]
- width=img.shape[1]
- l_t=int(height*0.1/2)
- l_b=int(height*0.2/2)
- l_l=int(width*0.1/2)
- l_r=int(width*0.2/2)
- mean1=np.mean(img[l_t,l_l:l_r])
- mean2=np.mean(img[l_b,l_l:l_r])
-
- #print("l_t:",l_t,"l_b:",l_b,"l_l:",l_l,"l_r:",l_r,"mean1:",mean1,"mean2:",mean2)
- if mean1==mean2:
- im_cut = None
- cut_circle = None
- im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
- im_thresh = None
- if is_bg_white:
- _, im_thresh = cv2.threshold(im_gray, 250, 255, cv2.THRESH_BINARY_INV)
- else:
- _, im_thresh = cv2.threshold(im_gray, 50, 255, cv2.THRESH_BINARY)
-
- im_contour = im_thresh.copy()
- if cv2.__version__.find('3.') == 0:
- _, cnts, _ = cv2.findContours(im_contour, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
- else:
- cnts, _ = cv2.findContours(im_contour, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
- num = len(cnts)
- if num == 0:
- logging.info('[get_viewpoint] cannot find any contours')
- return {}
- cnts = sorted(cnts, key = cv2.contourArea, reverse = True)
- logging.info('[get_viewpoint] found {} contours'.format(len(cnts)))
- x,y,w,h = cv2.boundingRect(cnts[0])
- return {"viewport":{"x":x,"y":y,"w":w,"h":h,"type":"C"}}
- else:
- return {}
- debug_img = False
- def _show_img(im_inp, msg=None):
- if debug_img:
- if msg is not None:
- print('[IMAGE] ' + msg)
- h,w = im_inp.shape[:2]
- cv2.namedWindow("Example", cv2.WINDOW_NORMAL)
- cv2.resizeWindow('Example', int(w*0.4), int(h*0.4))
- cv2.moveWindow('Example', 600, 20)
- cv2.imshow("Example", im_inp)
- k = cv2.waitKey()
- if k == 27:
- cv2.destroyAllWindows()
- def ai_progress(index,jpg_file,json_obj,json_obj2,ai,queue,stop_file,img,img_src):
- result_json={'index':index}
- try:
- if os.path.exists(stop_file):
- progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+" program is stopped"
- logging.info(progress_str)
- return ""
- progress_str=""
- if not os.path.exists(jpg_file):
- progress_str="AI_error:jpg_file not found:"+jpg_file
- logging.info(progress_str)
- else:
- vp={}
- logging.info("read:"+jpg_file)
- progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)
- logging.info(progress_str)
- try:
- #try to get viewport from fld file
- vp= get_fld_viewport(json_obj,jpg_file)
- if vp is None:
- try:
- #check whether the viewport exists in image or not
- vp=get_viewpoint(index,img_src)
- if "viewport" in vp:
- progress_str="AI_log:[get_viewpoint]: jpg_file:"+jpg_file+" ,index:"+str(index)+", "+str(vp)
- logging.info(progress_str)
- except Exception as ex:
- progress_str="AI_log:[get_viewpoint] jpg_file:"+jpg_file+" ,index:"+str(index)+", "+ex.message
- logging.info(progress_str)
- else:
- progress_str="AI_log:[get_fld_viewport] jpg_file:"+jpg_file+" ,index:"+str(index)+" vp:"+str(vp)
- logging.info(progress_str)
- if json_obj2!=None:
- vp.update(json_obj2)
- progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+" vp:"+str(vp)
- logging.info(progress_str)
- except Exception as e:
- progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+", [viewort-error]:"+e.message
- logging.info(progress_str)
- #time.sleep(6-index)
- ai_result=ai.do_pred_by_data(img,vp)
- progress_str="AI_result:"+str(index)+","+ai_result
- logging.info(progress_str)
- temp_json=store_ai_result(json_obj,jpg_file,ai_result)
- result_json.update(temp_json)
- except Exception as e:
- progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+", [AI_log-error]:"+e.message
- logging.info(progress_str)
- finally:
- queue.put(result_json)
- logging.info("AI_log:finally jpg_file:"+jpg_file+" ,index:"+str(index))
- return progress_str
- def check_create_dir(dest_path):
- dir_dest=os.path.dirname(dest_path)
- if not os.path.exists(dir_dest):
- os.makedirs(dir_dest)
- def store_ai_result(json_obj,jpg_file,ai_result):
- ai_json=json.loads(ai_result)
- arr_path=os.path.split(jpg_file)
- ai_path=os.path.join(arr_path[0],get_AI_dir(json_obj),arr_path[1])
- ai_file=os.path.splitext( ai_path)[0]+".ai"
- check_create_dir(ai_file)
- #move image_path
- try:
- image_path=ai_json["content"]["additional"]["image_path"]
- if os.path.exists( image_path):
- shutil.move(image_path,ai_path)
- with open(ai_file,'w') as fw:
- json.dump(ai_json,fw)
- except:
- print("key:image_path not found!")
-
- #write csv
- try:
- if ai_json["content"]["details"].has_key("csv"):
- data=ai_json["content"]["details"]["data"]
- ai_file=os.path.splitext( ai_path)[0]+".csv"
- with open(ai_file,'w') as csvfile:
- cw=csv.writer(csvfile,lineterminator='\n')
- for k,v in data.items():
- for item in v:
- item.insert(0,k)
- cw.writerows(v)
- except Exception as e:
- print("csv error!",e.message)
- finally:
- pass
- result_json={}
- try:
- result_json['file_results']={arr_path[1]:ai_json['content']['details']['value']}
- except:
- print("result_json error:",ai_json)
- result_json['file_results']={arr_path[1]:""}
- pass
- return result_json
- #reading the json viewport data from /AI/jpg.fld
- def get_fld_viewport(json_obj,jpg_file):
- try:
- dir_path =os.path.join( os.path.dirname(jpg_file),get_AI_dir(json_obj))
- file_name=os.path.basename(jpg_file)
- fld_file=os.path.join(dir_path,os.path.splitext(file_name)[0]+".fld")
- logging.info("fld_file:"+fld_file)
- if os.path.exists(fld_file):
- with open(fld_file,'r') as load_f:
- vp= json.load(load_f)
- if vp!=None:
- vp["viewport"]["x"]=int(vp["viewport"]["x"])
- vp["viewport"]["y"]=int(vp["viewport"]["y"])
- vp["viewport"]["w"]=int(vp["viewport"]["w"])
- vp["viewport"]["h"]=int(vp["viewport"]["h"])
- return vp
- else:
- return None
- except Exception as e:
- progress_str="AI_error:[get_fld_viewport]"+e.message
- logging.info(progress_str)
- return None
- def get_AI_dir(json_obj):
- if json_obj.has_key("version") and json_obj["version"]>"1.0":
- num="00"
- if json_obj.has_key("num"):
- num=json_obj["num"]
- return "AI_%s%s%s" %(json_obj["catagory"].zfill(2),json_obj["items"].zfill(2),num.zfill(2))
- else:
- return "AI"
- def write_ai_end(json_obj,ai_tmp_report,report_json):
- #end batch detect report
- if json_obj.has_key("end"):
- #set end time
- report_json['end']=json_obj['end']
- #store .ai_ with report_json
- if os.path.exists(ai_tmp_report):
- with open(ai_tmp_report,'w') as fw:
- if report_json.has_key("file_count"):
- print("file_count:",report_json['file_count'])
- json.dump(report_json,fw)
- if json_obj.has_key("end"):
- #end rename .ai_ to ai
- ai_report=os.path.join(json_obj['path'],get_AI_dir(json_obj),"report.ai")
- if os.path.exists(ai_tmp_report):
- shutil.move(ai_tmp_report,ai_report)
- def ai_detect(THREAD_COUNT,json_obj,json_obj2):
- start = time.time()
- stop_file=os.path.join(os.path.split(os.path.realpath(__file__))[0] , "stop.ai")
- if os.path.exists(stop_file):
- os.remove(stop_file)
- progress_str="AI_log:"+stop_file+" found,it's removed "
- logging.info(progress_str)
- #start batch detect
- ai_tmp_report=os.path.join( json_obj['path'],get_AI_dir(json_obj),"report.ai_")
- report_json={}
- if json_obj.has_key("start"):
- report_json['start']=json_obj['start']
- report_json['path']=json_obj['path_client']
- report_json['file_count']=0
- report_json['file_results']={}
- check_create_dir(ai_tmp_report)
- with open(ai_tmp_report,'w') as fw:
- json.dump(report_json,fw)
- else:
- if os.path.exists(ai_tmp_report):
- with open(ai_tmp_report,'r') as fr:
- report_json=json.load(fr)
- num=0
- image_names=[]
- if len(json_obj["images"])>0:
- #images not empty
- image_names= json_obj["images"].split(";")
- ai = ImageDetect()
- initTime=time.time()
- ai.init(json.dumps(json_obj))
- progress_str="AI_log:init OK! consume:"+str(time.time()-initTime)+", images:"+str(len(image_names))
- logging.info(progress_str)
- pool = Pool(THREAD_COUNT)
- index =0
- queue = Manager().Queue()
-
- for image_name in image_names:
- jpg_file=os.path.join(json_obj["path"],image_name)
- if not os.path.exists(jpg_file):
- print("file not found:",jpg_file)
- num+=1
- continue
- img_src=cv2.imdecode(np.fromfile(jpg_file, dtype=np.uint8), -1)
- img = cv2.imencode('.jpg', img_src)[1]
- img = base64.b64encode(img)
- pool.apply_async(func=ai_progress, args=(index,jpg_file,json_obj,json_obj2,ai,queue,stop_file,img,img_src ) )
- index+=1
- #waiting for these threads to finish
-
- while num<len(image_names):
- res= queue.get()
- logging.info("AI_progress:"+str(num))
- logging.info("AI_queue:"+str(num)+", index:"+str(res['index']))
- num+=1
- if res.has_key('file_results'):
- if report_json.has_key('file_results'):
- report_json['file_results'].update(res['file_results'])
- else:
- print("index:",str(res['index']),"report_json no file_results:",report_json)
- else:
- print("index:",str(res['index']),"no file_results:",res)
-
- if report_json.has_key("file_count"):
- report_json['file_count']+=num
- write_ai_end(json_obj,ai_tmp_report,report_json)
- else:
- print("report_json no file_count:",report_json)
- pool.close()
- pool.join()
- else:
- write_ai_end(json_obj,ai_tmp_report,report_json)
-
- progress_str="AI_log:"+str(len(image_names))+" jpg files detected! consume:"+str(time.time()-start)
- logging.info(progress_str)
- return num
- if __name__ == '__main__':
- THREAD_COUNT=3
- if len(sys.argv)<2:
- progress_str="AI_error:missing json parameter"
- logging.info(progress_str)
- sys.exit()
- json_str=sys.argv[1].decode('gbk').encode()
- logging.info("sys.argv[1]:"+json_str)
- json_obj2=None
- if len(sys.argv)>2:
- json_str2=sys.argv[2].decode('gbk').encode()
- logging.info("sys.argv[2]:"+json_str2)
- json_obj2=json.loads(json_str2)
- json_obj=json.loads(json_str)
-
- if not "func" in json_obj:
- progress_str="AI_error:missing func property"
- logging.info(progress_str)
- sys.exit()
- if not "images" in json_obj:
- progress_str="AI_error:missing images property"
- logging.info(progress_str)
- sys.exit()
- if not "path" in json_obj:
- progress_str="AI_error:missing path property"
- logging.info(progress_str)
- sys.exit()
- if json_obj["func"]=="zzsb":
- THREAD_COUNT=1
- #removing stop file if the stop.ai has existed
- logging.info("AI_log: ibm_ai start... PROCESS_COUNT:"+str(THREAD_COUNT))
- ai_detect(THREAD_COUNT,json_obj,json_obj2)
|