#!/usr/bin/env python # -*- coding=utf-8 -*- import shutil,os import cv2 import json import base64 import logging import numpy as np import threading import time from image_detect import ImageDetect from multiprocessing import Process,Lock from multiprocessing import Pool,Manager import csv import sys reload(sys) sys.setdefaultencoding('utf8') plocker=Lock() if os.name == 'nt': import win32con, win32file, pywintypes LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK LOCK_SH = 0 # The default value LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY __overlapped = pywintypes.OVERLAPPED() def lock(file, flags): hfile = win32file._get_osfhandle(file.fileno()) win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped) def unlock(file): hfile = win32file._get_osfhandle(file.fileno()) win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped) elif os.name == 'posix': from fcntl import LOCK_EX, LOCK_SH, LOCK_NB def lock(file, flags): fcntl.flock(file.fileno( ), flags) def unlock(file): fcntl.flock(file.fileno( ), fcntl.LOCK_UN) else: raise RuntimeError("File Locker only support NT and Posix platforms!") def get_mean(img,index): height=img.shape[0] width=img.shape[1] top=np.mean(img[index,width/4:width*3/4]) top2=np.mean(img[index,width*3/8:width*5/8]) bottom=np.mean(img[height-index,width/4:width*3/4]) left=np.mean(img[height/4:height*3/4,index]) left2=np.mean(img[height*3/8:height*5/8,index]) right=np.mean(img[height/4:height*3/4,width-index]) #print("top:",top,"top2:",top2,"left:",left,"left2:",left2) return {"top":top,"bottom":bottom,"left":left,"right":right,"is_view":top==top2 and left==left2} def get_rect(img): height=img.shape[0] width=img.shape[1] ret, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU) y1=0 margin=10 for y in range(margin,height): if np.mean(img[y,width/4:width*3/4])!=255: y1=y break for y in range(height-1-margin,margin,-1): if np.mean(img[y,width/4:width*3/4])!=255: y2=y break for x in range(margin,width): if np.mean(img[height/4:height*3/4,x])!=255: x1=x break for x in range(width-1-margin,margin,-1): if np.mean(img[height/4:height*3/4,x])!=255: x2=x break c_x=x1+int((x2-x1)/2*0.2) c_y=y1+int((y2-y1)/2*0.2) max_c=[x1,y1,x2-x1,y2-y1] print("max_c:",max_c) img=cv2.rectangle(img,(max_c[0],max_c[1]),(max_c[0]+max_c[2],max_c[1]+max_c[3]),(0,255,0),5) cv2.namedWindow("enhanced",0) cv2.resizeWindow("enhanced",width/2,height/2) cv2.imshow("enhanced",img) cv2.waitKey(0) # cv2.imwrite(r"C:\Users\lenovo\Desktop\bearing\error\bbb.jpg",img) return max_c def get_viewpoint(img_idx,img, is_bg_white=True): # height=img.shape[0] # width=img.shape[1] # logging.info("AI_log: [get_viewpoint] index:"+str(img_idx)+", width:"+str(width)+", height:"+str(height)) # index =5 # mean=get_mean(img,index) # if mean["is_view"]: # rect=get_rect(img) # # c_x=x1+int((x2-x1)/2*0.2) # # c_y=y1+int((y2-y1)/2*0.2) # # shape = "C" if img[c_y,c_x]==mean["top"] else "R" # return {"viewport":{"x":rect[0],"y":rect[1],"w":rect[2],"h":rect[3],"type":"C"}} if rect!=None else rect # else: # return {} height=img.shape[0] width=img.shape[1] l_t=int(height*0.1/2) l_b=int(height*0.2/2) l_l=int(width*0.1/2) l_r=int(width*0.2/2) mean1=np.mean(img[l_t,l_l:l_r]) mean2=np.mean(img[l_b,l_l:l_r]) #print("l_t:",l_t,"l_b:",l_b,"l_l:",l_l,"l_r:",l_r,"mean1:",mean1,"mean2:",mean2) if mean1==mean2: im_cut = None cut_circle = None im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) im_thresh = None if is_bg_white: _, im_thresh = cv2.threshold(im_gray, 250, 255, cv2.THRESH_BINARY_INV) else: _, im_thresh = cv2.threshold(im_gray, 50, 255, cv2.THRESH_BINARY) im_contour = im_thresh.copy() if cv2.__version__.find('3.') == 0: _, cnts, _ = cv2.findContours(im_contour, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) else: cnts, _ = cv2.findContours(im_contour, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) num = len(cnts) if num == 0: logging.info('[get_viewpoint] cannot find any contours') return {} cnts = sorted(cnts, key = cv2.contourArea, reverse = True) logging.info('[get_viewpoint] found {} contours'.format(len(cnts))) x,y,w,h = cv2.boundingRect(cnts[0]) return {"viewport":{"x":x,"y":y,"w":w,"h":h,"type":"C"}} else: return {} debug_img = False def _show_img(im_inp, msg=None): if debug_img: if msg is not None: print('[IMAGE] ' + msg) h,w = im_inp.shape[:2] cv2.namedWindow("Example", cv2.WINDOW_NORMAL) cv2.resizeWindow('Example', int(w*0.4), int(h*0.4)) cv2.moveWindow('Example', 600, 20) cv2.imshow("Example", im_inp) k = cv2.waitKey() if k == 27: cv2.destroyAllWindows() def ai_progress(index,jpg_file,json_obj,json_obj2,ai,queue,stop_file,img,img_src): result_json={'index':index} try: if os.path.exists(stop_file): progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+" program is stopped" logging.info(progress_str) return "" progress_str="" if not os.path.exists(jpg_file): progress_str="AI_error:jpg_file not found:"+jpg_file logging.info(progress_str) else: vp={} logging.info("read:"+jpg_file) progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index) logging.info(progress_str) try: #try to get viewport from fld file vp= get_fld_viewport(json_obj,jpg_file) if vp is None: try: #check whether the viewport exists in image or not vp=get_viewpoint(index,img_src) if "viewport" in vp: progress_str="AI_log:[get_viewpoint]: jpg_file:"+jpg_file+" ,index:"+str(index)+", "+str(vp) logging.info(progress_str) except Exception as ex: progress_str="AI_log:[get_viewpoint] jpg_file:"+jpg_file+" ,index:"+str(index)+", "+ex.message logging.info(progress_str) else: progress_str="AI_log:[get_fld_viewport] jpg_file:"+jpg_file+" ,index:"+str(index)+" vp:"+str(vp) logging.info(progress_str) if json_obj2!=None: vp.update(json_obj2) progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+" vp:"+str(vp) logging.info(progress_str) except Exception as e: progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+", [viewort-error]:"+e.message logging.info(progress_str) #time.sleep(6-index) ai_result=ai.do_pred_by_data(img,vp) progress_str="AI_result:"+str(index)+","+ai_result logging.info(progress_str) temp_json=store_ai_result(json_obj,jpg_file,ai_result) result_json.update(temp_json) except Exception as e: progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+", [AI_log-error]:"+e.message logging.info(progress_str) finally: queue.put(result_json) logging.info("AI_log:finally jpg_file:"+jpg_file+" ,index:"+str(index)) return progress_str def check_create_dir(dest_path): dir_dest=os.path.dirname(dest_path) if not os.path.exists(dir_dest): os.makedirs(dir_dest) def store_ai_result(json_obj,jpg_file,ai_result): ai_json=json.loads(ai_result) arr_path=os.path.split(jpg_file) ai_path=os.path.join(arr_path[0],get_AI_dir(json_obj),arr_path[1]) ai_file=os.path.splitext( ai_path)[0]+".ai" check_create_dir(ai_file) #move image_path try: image_path=ai_json["content"]["additional"]["image_path"] if os.path.exists( image_path): shutil.move(image_path,ai_path) with open(ai_file,'w') as fw: json.dump(ai_json,fw) except: print("key:image_path not found!") #write csv try: if ai_json["content"]["details"].has_key("csv"): data=ai_json["content"]["details"]["data"] ai_file=os.path.splitext( ai_path)[0]+".csv" with open(ai_file,'w') as csvfile: cw=csv.writer(csvfile,lineterminator='\n') for k,v in data.items(): for item in v: item.insert(0,k) cw.writerows(v) except Exception as e: print("csv error!",e.message) finally: pass result_json={} try: result_json['file_results']={arr_path[1]:ai_json['content']['details']['value']} except: print("result_json error:",ai_json) result_json['file_results']={arr_path[1]:""} pass return result_json #reading the json viewport data from /AI/jpg.fld def get_fld_viewport(json_obj,jpg_file): try: dir_path =os.path.join( os.path.dirname(jpg_file),get_AI_dir(json_obj)) file_name=os.path.basename(jpg_file) fld_file=os.path.join(dir_path,os.path.splitext(file_name)[0]+".fld") logging.info("fld_file:"+fld_file) if os.path.exists(fld_file): with open(fld_file,'r') as load_f: vp= json.load(load_f) if vp!=None: vp["viewport"]["x"]=int(vp["viewport"]["x"]) vp["viewport"]["y"]=int(vp["viewport"]["y"]) vp["viewport"]["w"]=int(vp["viewport"]["w"]) vp["viewport"]["h"]=int(vp["viewport"]["h"]) return vp else: return None except Exception as e: progress_str="AI_error:[get_fld_viewport]"+e.message logging.info(progress_str) return None def get_AI_dir(json_obj): if json_obj.has_key("version") and json_obj["version"]>"1.0": num="00" if json_obj.has_key("num"): num=json_obj["num"] return "AI_%s%s%s" %(json_obj["catagory"].zfill(2),json_obj["items"].zfill(2),num.zfill(2)) else: return "AI" def write_ai_end(json_obj,ai_tmp_report,report_json): #end batch detect report if json_obj.has_key("end"): #set end time report_json['end']=json_obj['end'] #store .ai_ with report_json if os.path.exists(ai_tmp_report): with open(ai_tmp_report,'w') as fw: if report_json.has_key("file_count"): print("file_count:",report_json['file_count']) json.dump(report_json,fw) if json_obj.has_key("end"): #end rename .ai_ to ai ai_report=os.path.join(json_obj['path'],get_AI_dir(json_obj),"report.ai") if os.path.exists(ai_tmp_report): shutil.move(ai_tmp_report,ai_report) def ai_detect(THREAD_COUNT,json_obj,json_obj2): start = time.time() stop_file=os.path.join(os.path.split(os.path.realpath(__file__))[0] , "stop.ai") if os.path.exists(stop_file): os.remove(stop_file) progress_str="AI_log:"+stop_file+" found,it's removed " logging.info(progress_str) #start batch detect ai_tmp_report=os.path.join( json_obj['path'],get_AI_dir(json_obj),"report.ai_") report_json={} if json_obj.has_key("start"): report_json['start']=json_obj['start'] report_json['path']=json_obj['path_client'] report_json['file_count']=0 report_json['file_results']={} check_create_dir(ai_tmp_report) with open(ai_tmp_report,'w') as fw: json.dump(report_json,fw) else: if os.path.exists(ai_tmp_report): with open(ai_tmp_report,'r') as fr: report_json=json.load(fr) num=0 image_names=[] if len(json_obj["images"])>0: #images not empty image_names= json_obj["images"].split(";") ai = ImageDetect() initTime=time.time() ai.init(json.dumps(json_obj)) progress_str="AI_log:init OK! consume:"+str(time.time()-initTime)+", images:"+str(len(image_names)) logging.info(progress_str) pool = Pool(THREAD_COUNT) index =0 queue = Manager().Queue() for image_name in image_names: jpg_file=os.path.join(json_obj["path"],image_name) if not os.path.exists(jpg_file): print("file not found:",jpg_file) num+=1 continue img_src=cv2.imdecode(np.fromfile(jpg_file, dtype=np.uint8), -1) img = cv2.imencode('.jpg', img_src)[1] img = base64.b64encode(img) pool.apply_async(func=ai_progress, args=(index,jpg_file,json_obj,json_obj2,ai,queue,stop_file,img,img_src ) ) index+=1 #waiting for these threads to finish while num2: json_str2=sys.argv[2].decode('gbk').encode() logging.info("sys.argv[2]:"+json_str2) json_obj2=json.loads(json_str2) json_obj=json.loads(json_str) if not "func" in json_obj: progress_str="AI_error:missing func property" logging.info(progress_str) sys.exit() if not "images" in json_obj: progress_str="AI_error:missing images property" logging.info(progress_str) sys.exit() if not "path" in json_obj: progress_str="AI_error:missing path property" logging.info(progress_str) sys.exit() if json_obj["func"]=="zzsb": THREAD_COUNT=1 #removing stop file if the stop.ai has existed logging.info("AI_log: ibm_ai start... PROCESS_COUNT:"+str(THREAD_COUNT)) ai_detect(THREAD_COUNT,json_obj,json_obj2)