diff --git a/Code/Control/Laptop_Code/ESP32_AT/imageTread_AT.py b/Code/Control/Laptop_Code/ESP32_AT/imageTread_AT.py new file mode 100644 index 0000000000000000000000000000000000000000..fd39118bde4a5c31f7d1d3f9c4117676820b4c0a --- /dev/null +++ b/Code/Control/Laptop_Code/ESP32_AT/imageTread_AT.py @@ -0,0 +1,166 @@ +import cv2 +from urllib.request import urlopen, Request +import numpy as np +import time + +# import apriltag +from pupil_apriltags import Detector +def nothing(x): + pass + + +# detector = apriltag.Detector() + +detector = Detector() + +def get_AT_6DOF_info(url): + header = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36."} + req = Request(url, headers=header) + img_resp = urlopen(req, timeout=60) + imgnp = np.array(bytearray(img_resp.read()), dtype=np.uint8) + frame = cv2.imdecode(imgnp, -1) + gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + h, w, _ = frame.shape + # put a dot in center of the frame + cv2.circle(frame, (w // 2, h // 2), 7, (255, 0, 0), -1) + # set camera parameters + fx = 800 + fy = 600 + cx = 400 + cy = 300 + results = detector.detect(gray_image, estimate_tag_pose=True, camera_params=[fx, fy, cx, cy], tag_size=0.16) + debug_print = 0 + for r in results: + # extract the bounding box (x, y)-coordinates for the AprilTag + # and convert each of the (x, y)-coordinate pairs to integers + (ptA, ptB, ptC, ptD) = r.corners + ptB = (int(ptB[0]), int(ptB[1])) + ptC = (int(ptC[0]), int(ptC[1])) + ptD = (int(ptD[0]), int(ptD[1])) + ptA = (int(ptA[0]), int(ptA[1])) + tx, ty, tz = r.pose_t # in meters + rx,ry,rz = r.pose_R + + # draw the bounding box of the AprilTag detection + cv2.line(frame, ptA, ptB, (0, 255, 0), 5) + cv2.line(frame, ptB, ptC, (0, 255, 0), 5) + cv2.line(frame, ptC, ptD, (0, 255, 0), 5) + cv2.line(frame, ptD, ptA, (0, 255, 0), 5) + # draw the center (x, y)-coordinates of the AprilTag + (cX, cY) = (int(r.center[0]), int(r.center[1])) + cv2.circle(frame, (cX, cY), 5, (0, 0, 255), -1) + + # draw the tag family on the image + + + tagFamily = r.tag_family.decode("utf-8") + tid = r.tag_id + cv2.putText(frame, tagFamily, (ptA[0], ptA[1] - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + + cv2.putText(frame, "tx: {:.2f} ty: {:.2f} tz:{:.2f}".format(tx[0], ty[0], tz[0]), (ptA[0], ptA[1] + 30), + cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + + if debug_print == 1: + print("tx,ty,tz:{},{},{}".format(tx, ty, tz)) + print("cX,cY:{},{}".format(cX, cY)) + print("[INFO] tag id: {}".format(tid)) + + # show the output image after AprilTag detection + cv2.imshow("Image", frame) + + return tid,tx,ty,tz,rx,ry,rz + +if __name__ == "__main__": + #change the IP address below according to the + #IP shown in the Serial monitor of Arduino code + # url='http://192.168.4.1/cam-hi.jpg' + # url='http://192.168.1.107/cam-hi.jpg' + url='http://192.168.4.1/cam-hi.jpg' + # url = 'http://192.168.1.118/cam-hi.jpg' + + + # cv2.namedWindow("live transmission", cv2.WINDOW_AUTOSIZE) + + # cv2.namedWindow("live transmission", cv2.WINDOW_NORMAL) + + while True: + header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36."} + req = Request(url, headers=header) + img_resp = urlopen(req, timeout=60) + imgnp=np.array(bytearray(img_resp.read()),dtype=np.uint8) + frame=cv2.imdecode(imgnp,-1) + + gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + #print(len(gray_image.shape)) + h,w,_ = frame.shape + + # put a dot in center of the frame + cv2.circle(frame, (w//2, h//2), 7, (255, 0, 0), -1) + + """ + If you also want to extract the tag pose, estimate_tag_pose should be set to True + and camera_params ([fx, fy, cx, cy]) + and tag_size (in meters) should be supplied. + The detect method returns a list of Detection objects each having + the following attributes + (note that the ones with an asterisks are computed only if estimate_tag_pose=True): + """ + + """ + So fx and fy are the focal lengths expressed in pixels. + Cx and Cy describe the coordinates of the so called principal + point that should be in the center of the image. + It is e.g. not in the center of the image if you cropped the image, + what you should never do when calibrating. + + fx, fy, cx, cy are given in Pixels in Computer Vision ( and openCV) + but e.g. in Photogrammetry you often use mm + """ + fx = 800 + fy = 600 + cx = 400 + cy = 300 + results = detector.detect(gray_image,estimate_tag_pose=True, camera_params=[fx, fy, cx, cy], tag_size=0.16) + + # loop over the AprilTag detection results + for r in results: + # extract the bounding box (x, y)-coordinates for the AprilTag + # and convert each of the (x, y)-coordinate pairs to integers + (ptA, ptB, ptC, ptD) = r.corners + ptB = (int(ptB[0]), int(ptB[1])) + ptC = (int(ptC[0]), int(ptC[1])) + ptD = (int(ptD[0]), int(ptD[1])) + ptA = (int(ptA[0]), int(ptA[1])) + tx,ty,tz = r.pose_t + print("tx,ty,tz:{},{},{}".format(tx,ty,tz)) + # draw the bounding box of the AprilTag detection + cv2.line(frame, ptA, ptB, (0, 255, 0), 5) + cv2.line(frame, ptB, ptC, (0, 255, 0), 5) + cv2.line(frame, ptC, ptD, (0, 255, 0), 5) + cv2.line(frame, ptD, ptA, (0, 255, 0), 5) + # draw the center (x, y)-coordinates of the AprilTag + (cX, cY) = (int(r.center[0]), int(r.center[1])) + cv2.circle(frame, (cX, cY), 5, (0, 0, 255), -1) + # draw the tag family on the image + print("cX,cY:{},{}".format(cX,cY)) + + + tagFamily = r.tag_family.decode("utf-8") + tid = r.tag_id + cv2.putText(frame, tagFamily, (ptA[0], ptA[1] - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + + cv2.putText(frame, "tx: {:.2f} ty: {:.2f} tz:{:.2f}".format(tx[0],ty[0],tz[0]), (ptA[0], ptA[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) + + print("[INFO] tag id: {}".format(tid)) + + # show the output image after AprilTag detection + cv2.imshow("Image", frame) + + + key=cv2.waitKey(5) + if key==ord('q'): + break + + + #cv2.destroyAllWindows()