diff --git a/Code/Ball_Detection/PyTorch_with_ESPCAM/imageTread_AT.py b/Code/Ball_Detection/PyTorch_with_ESPCAM/imageTread_AT.py
new file mode 100644
index 0000000000000000000000000000000000000000..7130fad4dfd7f0f8ea26f3f23f8ccaeeda05d550
--- /dev/null
+++ b/Code/Ball_Detection/PyTorch_with_ESPCAM/imageTread_AT.py
@@ -0,0 +1,102 @@
+import cv2
+from urllib.request import urlopen, Request
+import numpy as np
+import time
+
+# import apriltag
+from pupil_apriltags import Detector
+def nothing(x):
+    pass
+
+
+# detector = apriltag.Detector()
+
+detector = Detector()
+
+
+if __name__ == "__main__":
+    #change the IP address below according to the
+    #IP shown in the Serial monitor of Arduino code
+    # url='http://192.168.4.1/cam-hi.jpg'
+    # url='http://192.168.1.107/cam-hi.jpg'
+    url='http://192.168.4.1/cam-mid.jpg'
+    url = 'http://192.168.1.118/cam-hi.jpg'
+
+
+    # cv2.namedWindow("live transmission", cv2.WINDOW_AUTOSIZE)
+
+    # cv2.namedWindow("live transmission", cv2.WINDOW_NORMAL)
+
+    while True:
+        header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36."}
+        req = Request(url, headers=header)
+        img_resp = urlopen(req, timeout=60)
+        imgnp=np.array(bytearray(img_resp.read()),dtype=np.uint8)
+        frame=cv2.imdecode(imgnp,-1)
+
+        gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+        #print(len(gray_image.shape))
+        h,w,_ = frame.shape
+
+        """
+        If you also want to extract the tag pose, estimate_tag_pose should be set to True 
+        and camera_params ([fx, fy, cx, cy]) 
+        and tag_size (in meters) should be supplied. 
+        The detect method returns a list of Detection objects each having 
+        the following attributes 
+        (note that the ones with an asterisks are computed only if estimate_tag_pose=True):
+        """
+
+        """
+        So fx and fy are the focal lengths expressed in pixels.
+        Cx and Cy describe the coordinates of the so called principal 
+        point that should be in the center of the image. 
+        It is e.g. not in the center of the image if you cropped the image, 
+        what you should never do when calibrating.
+        
+        fx, fy, cx, cy are given in Pixels in Computer Vision ( and openCV) 
+        but e.g. in Photogrammetry you often use mm
+        """
+        fx = 600
+        fy = 800
+        cx = 0
+        cy = 0
+        results = detector.detect(gray_image,estimate_tag_pose=True,camera_params=[fx, fy, cx, cy],tag_size=0.16)
+
+        # loop over the AprilTag detection results
+        for r in results:
+            # extract the bounding box (x, y)-coordinates for the AprilTag
+            # and convert each of the (x, y)-coordinate pairs to integers
+            (ptA, ptB, ptC, ptD) = r.corners
+            ptB = (int(ptB[0]), int(ptB[1]))
+            ptC = (int(ptC[0]), int(ptC[1]))
+            ptD = (int(ptD[0]), int(ptD[1]))
+            ptA = (int(ptA[0]), int(ptA[1]))
+            tx,ty,tz = r.pose_t
+            print("tx,ty,tz:{},{},{}".format(tx,ty,tz))
+            # draw the bounding box of the AprilTag detection
+            cv2.line(frame, ptA, ptB, (0, 255, 0), 5)
+            cv2.line(frame, ptB, ptC, (0, 255, 0), 5)
+            cv2.line(frame, ptC, ptD, (0, 255, 0), 5)
+            cv2.line(frame, ptD, ptA, (0, 255, 0), 5)
+            # draw the center (x, y)-coordinates of the AprilTag
+            (cX, cY) = (int(r.center[0]), int(r.center[1]))
+            cv2.circle(frame, (cX, cY), 5, (0, 0, 255), -1)
+            # draw the tag family on the image
+            print("cX,cY:{},{}".format(cX,cY))
+            tagFamily = r.tag_family.decode("utf-8")
+            tid = r.tag_id
+            cv2.putText(frame, tagFamily, (ptA[0], ptA[1] - 15),
+            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
+            print("[INFO] tag id: {}".format(tid))
+
+        # show the output image after AprilTag detection
+        cv2.imshow("Image", frame)
+
+
+        key=cv2.waitKey(5)
+        if key==ord('q'):
+            break
+
+
+    #cv2.destroyAllWindows()
diff --git a/Code/Ball_Detection/PyTorch_with_ESPCAM/imageTread_web.py b/Code/Ball_Detection/PyTorch_with_ESPCAM/imageTread_web.py
new file mode 100644
index 0000000000000000000000000000000000000000..82bdc6bfb25423b7a9d44cb7249ccefe970bf329
--- /dev/null
+++ b/Code/Ball_Detection/PyTorch_with_ESPCAM/imageTread_web.py
@@ -0,0 +1,23 @@
+import urllib.request
+import cv2
+import numpy as np
+
+if __name__ == '__main__':
+
+    frame = None
+    key = None
+    url = 'http://192.168.1.117/capture?'
+    # url = 'http://192.168.1.117:81/stream'
+    print('Start')
+    while True:
+        imgResponse = urllib.request.urlopen(url)
+        imgNp = np.array(bytearray(imgResponse.read()),dtype=np.uint8)
+        frame = cv2.imdecode(imgNp,-1)
+        cv2.imshow('Window',frame)
+        h, w, _ = frame.shape
+        print("with:{},high:{}".format(w, h))
+        key = cv2.waitKey(5)
+        if key == ord('q'):
+            break
+
+    cv2.destroyAllWindows()