ibm_ai.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. #!/usr/bin/env python
  2. # -*- coding=utf-8 -*-
  3. import shutil,os
  4. import cv2
  5. import json
  6. import base64
  7. import logging
  8. import numpy as np
  9. import threading
  10. import time
  11. from image_detect import ImageDetect
  12. from multiprocessing import Process,Lock
  13. from multiprocessing import Pool,Manager
  14. import csv
  15. import sys
  16. reload(sys)
  17. sys.setdefaultencoding('utf8')
  18. plocker=Lock()
  19. if os.name == 'nt':
  20. import win32con, win32file, pywintypes
  21. LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
  22. LOCK_SH = 0 # The default value
  23. LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
  24. __overlapped = pywintypes.OVERLAPPED()
  25. def lock(file, flags):
  26. hfile = win32file._get_osfhandle(file.fileno())
  27. win32file.LockFileEx(hfile, flags, 0, 0xffff0000, __overlapped)
  28. def unlock(file):
  29. hfile = win32file._get_osfhandle(file.fileno())
  30. win32file.UnlockFileEx(hfile, 0, 0xffff0000, __overlapped)
  31. elif os.name == 'posix':
  32. from fcntl import LOCK_EX, LOCK_SH, LOCK_NB
  33. def lock(file, flags):
  34. fcntl.flock(file.fileno( ), flags)
  35. def unlock(file):
  36. fcntl.flock(file.fileno( ), fcntl.LOCK_UN)
  37. else:
  38. raise RuntimeError("File Locker only support NT and Posix platforms!")
  39. def get_mean(img,index):
  40. height=img.shape[0]
  41. width=img.shape[1]
  42. top=np.mean(img[index,width/4:width*3/4])
  43. top2=np.mean(img[index,width*3/8:width*5/8])
  44. bottom=np.mean(img[height-index,width/4:width*3/4])
  45. left=np.mean(img[height/4:height*3/4,index])
  46. left2=np.mean(img[height*3/8:height*5/8,index])
  47. right=np.mean(img[height/4:height*3/4,width-index])
  48. #print("top:",top,"top2:",top2,"left:",left,"left2:",left2)
  49. return {"top":top,"bottom":bottom,"left":left,"right":right,"is_view":top==top2 and left==left2}
  50. def get_rect(img):
  51. height=img.shape[0]
  52. width=img.shape[1]
  53. ret, img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
  54. y1=0
  55. margin=10
  56. for y in range(margin,height):
  57. if np.mean(img[y,width/4:width*3/4])!=255:
  58. y1=y
  59. break
  60. for y in range(height-1-margin,margin,-1):
  61. if np.mean(img[y,width/4:width*3/4])!=255:
  62. y2=y
  63. break
  64. for x in range(margin,width):
  65. if np.mean(img[height/4:height*3/4,x])!=255:
  66. x1=x
  67. break
  68. for x in range(width-1-margin,margin,-1):
  69. if np.mean(img[height/4:height*3/4,x])!=255:
  70. x2=x
  71. break
  72. c_x=x1+int((x2-x1)/2*0.2)
  73. c_y=y1+int((y2-y1)/2*0.2)
  74. max_c=[x1,y1,x2-x1,y2-y1]
  75. print("max_c:",max_c)
  76. img=cv2.rectangle(img,(max_c[0],max_c[1]),(max_c[0]+max_c[2],max_c[1]+max_c[3]),(0,255,0),5)
  77. cv2.namedWindow("enhanced",0)
  78. cv2.resizeWindow("enhanced",width/2,height/2)
  79. cv2.imshow("enhanced",img)
  80. cv2.waitKey(0)
  81. # cv2.imwrite(r"C:\Users\lenovo\Desktop\bearing\error\bbb.jpg",img)
  82. return max_c
  83. def get_viewpoint(img_idx,img, is_bg_white=True):
  84. # height=img.shape[0]
  85. # width=img.shape[1]
  86. # logging.info("AI_log: [get_viewpoint] index:"+str(img_idx)+", width:"+str(width)+", height:"+str(height))
  87. # index =5
  88. # mean=get_mean(img,index)
  89. # if mean["is_view"]:
  90. # rect=get_rect(img)
  91. # # c_x=x1+int((x2-x1)/2*0.2)
  92. # # c_y=y1+int((y2-y1)/2*0.2)
  93. # # shape = "C" if img[c_y,c_x]==mean["top"] else "R"
  94. # return {"viewport":{"x":rect[0],"y":rect[1],"w":rect[2],"h":rect[3],"type":"C"}} if rect!=None else rect
  95. # else:
  96. # return {}
  97. height=img.shape[0]
  98. width=img.shape[1]
  99. l_t=int(height*0.1/2)
  100. l_b=int(height*0.2/2)
  101. l_l=int(width*0.1/2)
  102. l_r=int(width*0.2/2)
  103. mean1=np.mean(img[l_t,l_l:l_r])
  104. mean2=np.mean(img[l_b,l_l:l_r])
  105. #print("l_t:",l_t,"l_b:",l_b,"l_l:",l_l,"l_r:",l_r,"mean1:",mean1,"mean2:",mean2)
  106. if mean1==mean2:
  107. im_cut = None
  108. cut_circle = None
  109. im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  110. im_thresh = None
  111. if is_bg_white:
  112. _, im_thresh = cv2.threshold(im_gray, 250, 255, cv2.THRESH_BINARY_INV)
  113. else:
  114. _, im_thresh = cv2.threshold(im_gray, 50, 255, cv2.THRESH_BINARY)
  115. im_contour = im_thresh.copy()
  116. if cv2.__version__.find('3.') == 0:
  117. _, cnts, _ = cv2.findContours(im_contour, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
  118. else:
  119. cnts, _ = cv2.findContours(im_contour, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
  120. num = len(cnts)
  121. if num == 0:
  122. logging.info('[get_viewpoint] cannot find any contours')
  123. return {}
  124. cnts = sorted(cnts, key = cv2.contourArea, reverse = True)
  125. logging.info('[get_viewpoint] found {} contours'.format(len(cnts)))
  126. x,y,w,h = cv2.boundingRect(cnts[0])
  127. return {"viewport":{"x":x,"y":y,"w":w,"h":h,"type":"C"}}
  128. else:
  129. return {}
  130. debug_img = False
  131. def _show_img(im_inp, msg=None):
  132. if debug_img:
  133. if msg is not None:
  134. print('[IMAGE] ' + msg)
  135. h,w = im_inp.shape[:2]
  136. cv2.namedWindow("Example", cv2.WINDOW_NORMAL)
  137. cv2.resizeWindow('Example', int(w*0.4), int(h*0.4))
  138. cv2.moveWindow('Example', 600, 20)
  139. cv2.imshow("Example", im_inp)
  140. k = cv2.waitKey()
  141. if k == 27:
  142. cv2.destroyAllWindows()
  143. def ai_progress(index,jpg_file,json_obj,json_obj2,ai,queue,stop_file,img,img_src):
  144. result_json={'index':index}
  145. try:
  146. if os.path.exists(stop_file):
  147. progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+" program is stopped"
  148. logging.info(progress_str)
  149. return ""
  150. progress_str=""
  151. if not os.path.exists(jpg_file):
  152. progress_str="AI_error:jpg_file not found:"+jpg_file
  153. logging.info(progress_str)
  154. else:
  155. vp={}
  156. logging.info("read:"+jpg_file)
  157. progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)
  158. logging.info(progress_str)
  159. try:
  160. #try to get viewport from fld file
  161. vp= get_fld_viewport(json_obj,jpg_file)
  162. if vp is None:
  163. try:
  164. #check whether the viewport exists in image or not
  165. vp=get_viewpoint(index,img_src)
  166. if "viewport" in vp:
  167. progress_str="AI_log:[get_viewpoint]: jpg_file:"+jpg_file+" ,index:"+str(index)+", "+str(vp)
  168. logging.info(progress_str)
  169. except Exception as ex:
  170. progress_str="AI_log:[get_viewpoint] jpg_file:"+jpg_file+" ,index:"+str(index)+", "+ex.message
  171. logging.info(progress_str)
  172. else:
  173. progress_str="AI_log:[get_fld_viewport] jpg_file:"+jpg_file+" ,index:"+str(index)+" vp:"+str(vp)
  174. logging.info(progress_str)
  175. if json_obj2!=None:
  176. vp.update(json_obj2)
  177. progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+" vp:"+str(vp)
  178. logging.info(progress_str)
  179. except Exception as e:
  180. progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+", [viewort-error]:"+e.message
  181. logging.info(progress_str)
  182. #time.sleep(6-index)
  183. ai_result=ai.do_pred_by_data(img,vp)
  184. progress_str="AI_result:"+str(index)+","+ai_result
  185. logging.info(progress_str)
  186. temp_json=store_ai_result(json_obj,jpg_file,ai_result)
  187. result_json.update(temp_json)
  188. except Exception as e:
  189. progress_str="AI_log: jpg_file:"+jpg_file+" ,index:"+str(index)+", [AI_log-error]:"+e.message
  190. logging.info(progress_str)
  191. finally:
  192. queue.put(result_json)
  193. logging.info("AI_log:finally jpg_file:"+jpg_file+" ,index:"+str(index))
  194. return progress_str
  195. def check_create_dir(dest_path):
  196. dir_dest=os.path.dirname(dest_path)
  197. if not os.path.exists(dir_dest):
  198. os.makedirs(dir_dest)
  199. def store_ai_result(json_obj,jpg_file,ai_result):
  200. ai_json=json.loads(ai_result)
  201. arr_path=os.path.split(jpg_file)
  202. ai_path=os.path.join(arr_path[0],get_AI_dir(json_obj),arr_path[1])
  203. ai_file=os.path.splitext( ai_path)[0]+".ai"
  204. check_create_dir(ai_file)
  205. #move image_path
  206. try:
  207. image_path=ai_json["content"]["additional"]["image_path"]
  208. if os.path.exists( image_path):
  209. shutil.move(image_path,ai_path)
  210. with open(ai_file,'w') as fw:
  211. json.dump(ai_json,fw)
  212. except:
  213. print("key:image_path not found!")
  214. #write csv
  215. try:
  216. if ai_json["content"]["details"].has_key("csv"):
  217. data=ai_json["content"]["details"]["data"]
  218. ai_file=os.path.splitext( ai_path)[0]+".csv"
  219. with open(ai_file,'w') as csvfile:
  220. cw=csv.writer(csvfile,lineterminator='\n')
  221. for k,v in data.items():
  222. for item in v:
  223. item.insert(0,k)
  224. cw.writerows(v)
  225. except Exception as e:
  226. print("csv error!",e.message)
  227. finally:
  228. pass
  229. result_json={}
  230. try:
  231. result_json['file_results']={arr_path[1]:ai_json['content']['details']['value']}
  232. except:
  233. print("result_json error:",ai_json)
  234. result_json['file_results']={arr_path[1]:""}
  235. pass
  236. return result_json
  237. #reading the json viewport data from /AI/jpg.fld
  238. def get_fld_viewport(json_obj,jpg_file):
  239. try:
  240. dir_path =os.path.join( os.path.dirname(jpg_file),get_AI_dir(json_obj))
  241. file_name=os.path.basename(jpg_file)
  242. fld_file=os.path.join(dir_path,os.path.splitext(file_name)[0]+".fld")
  243. logging.info("fld_file:"+fld_file)
  244. if os.path.exists(fld_file):
  245. with open(fld_file,'r') as load_f:
  246. vp= json.load(load_f)
  247. if vp!=None:
  248. vp["viewport"]["x"]=int(vp["viewport"]["x"])
  249. vp["viewport"]["y"]=int(vp["viewport"]["y"])
  250. vp["viewport"]["w"]=int(vp["viewport"]["w"])
  251. vp["viewport"]["h"]=int(vp["viewport"]["h"])
  252. return vp
  253. else:
  254. return None
  255. except Exception as e:
  256. progress_str="AI_error:[get_fld_viewport]"+e.message
  257. logging.info(progress_str)
  258. return None
  259. def get_AI_dir(json_obj):
  260. if json_obj.has_key("version") and json_obj["version"]>"1.0":
  261. num="00"
  262. if json_obj.has_key("num"):
  263. num=json_obj["num"]
  264. return "AI_%s%s%s" %(json_obj["catagory"].zfill(2),json_obj["items"].zfill(2),num.zfill(2))
  265. else:
  266. return "AI"
  267. def write_ai_end(json_obj,ai_tmp_report,report_json):
  268. #end batch detect report
  269. if json_obj.has_key("end"):
  270. #set end time
  271. report_json['end']=json_obj['end']
  272. #store .ai_ with report_json
  273. if os.path.exists(ai_tmp_report):
  274. with open(ai_tmp_report,'w') as fw:
  275. if report_json.has_key("file_count"):
  276. print("file_count:",report_json['file_count'])
  277. json.dump(report_json,fw)
  278. if json_obj.has_key("end"):
  279. #end rename .ai_ to ai
  280. ai_report=os.path.join(json_obj['path'],get_AI_dir(json_obj),"report.ai")
  281. if os.path.exists(ai_tmp_report):
  282. shutil.move(ai_tmp_report,ai_report)
  283. def ai_detect(THREAD_COUNT,json_obj,json_obj2):
  284. start = time.time()
  285. stop_file=os.path.join(os.path.split(os.path.realpath(__file__))[0] , "stop.ai")
  286. if os.path.exists(stop_file):
  287. os.remove(stop_file)
  288. progress_str="AI_log:"+stop_file+" found,it's removed "
  289. logging.info(progress_str)
  290. #start batch detect
  291. ai_tmp_report=os.path.join( json_obj['path'],get_AI_dir(json_obj),"report.ai_")
  292. report_json={}
  293. if json_obj.has_key("start"):
  294. report_json['start']=json_obj['start']
  295. report_json['path']=json_obj['path_client']
  296. report_json['file_count']=0
  297. report_json['file_results']={}
  298. check_create_dir(ai_tmp_report)
  299. with open(ai_tmp_report,'w') as fw:
  300. json.dump(report_json,fw)
  301. else:
  302. if os.path.exists(ai_tmp_report):
  303. with open(ai_tmp_report,'r') as fr:
  304. report_json=json.load(fr)
  305. num=0
  306. image_names=[]
  307. if len(json_obj["images"])>0:
  308. #images not empty
  309. image_names= json_obj["images"].split(";")
  310. ai = ImageDetect()
  311. initTime=time.time()
  312. ai.init(json.dumps(json_obj))
  313. progress_str="AI_log:init OK! consume:"+str(time.time()-initTime)+", images:"+str(len(image_names))
  314. logging.info(progress_str)
  315. pool = Pool(THREAD_COUNT)
  316. index =0
  317. queue = Manager().Queue()
  318. for image_name in image_names:
  319. jpg_file=os.path.join(json_obj["path"],image_name)
  320. if not os.path.exists(jpg_file):
  321. print("file not found:",jpg_file)
  322. num+=1
  323. continue
  324. img_src=cv2.imdecode(np.fromfile(jpg_file, dtype=np.uint8), -1)
  325. img = cv2.imencode('.jpg', img_src)[1]
  326. img = base64.b64encode(img)
  327. pool.apply_async(func=ai_progress, args=(index,jpg_file,json_obj,json_obj2,ai,queue,stop_file,img,img_src ) )
  328. index+=1
  329. #waiting for these threads to finish
  330. while num<len(image_names):
  331. res= queue.get()
  332. logging.info("AI_progress:"+str(num))
  333. logging.info("AI_queue:"+str(num)+", index:"+str(res['index']))
  334. num+=1
  335. if res.has_key('file_results'):
  336. if report_json.has_key('file_results'):
  337. report_json['file_results'].update(res['file_results'])
  338. else:
  339. print("index:",str(res['index']),"report_json no file_results:",report_json)
  340. else:
  341. print("index:",str(res['index']),"no file_results:",res)
  342. if report_json.has_key("file_count"):
  343. report_json['file_count']+=num
  344. write_ai_end(json_obj,ai_tmp_report,report_json)
  345. else:
  346. print("report_json no file_count:",report_json)
  347. pool.close()
  348. pool.join()
  349. else:
  350. write_ai_end(json_obj,ai_tmp_report,report_json)
  351. progress_str="AI_log:"+str(len(image_names))+" jpg files detected! consume:"+str(time.time()-start)
  352. logging.info(progress_str)
  353. return num
  354. if __name__ == '__main__':
  355. THREAD_COUNT=3
  356. if len(sys.argv)<2:
  357. progress_str="AI_error:missing json parameter"
  358. logging.info(progress_str)
  359. sys.exit()
  360. json_str=sys.argv[1].decode('gbk').encode()
  361. logging.info("sys.argv[1]:"+json_str)
  362. json_obj2=None
  363. if len(sys.argv)>2:
  364. json_str2=sys.argv[2].decode('gbk').encode()
  365. logging.info("sys.argv[2]:"+json_str2)
  366. json_obj2=json.loads(json_str2)
  367. json_obj=json.loads(json_str)
  368. if not "func" in json_obj:
  369. progress_str="AI_error:missing func property"
  370. logging.info(progress_str)
  371. sys.exit()
  372. if not "images" in json_obj:
  373. progress_str="AI_error:missing images property"
  374. logging.info(progress_str)
  375. sys.exit()
  376. if not "path" in json_obj:
  377. progress_str="AI_error:missing path property"
  378. logging.info(progress_str)
  379. sys.exit()
  380. if json_obj["func"]=="zzsb":
  381. THREAD_COUNT=1
  382. #removing stop file if the stop.ai has existed
  383. logging.info("AI_log: ibm_ai start... PROCESS_COUNT:"+str(THREAD_COUNT))
  384. ai_detect(THREAD_COUNT,json_obj,json_obj2)