Skip to content
Snippets Groups Projects
Commit dd99427f authored by Zhaoliang Zheng's avatar Zhaoliang Zheng
Browse files

Merge remote-tracking branch 'origin/main' into main

parents 02845070 b5614519
No related merge requests found
...@@ -6,31 +6,31 @@ def nothing(x): ...@@ -6,31 +6,31 @@ def nothing(x):
pass pass
if __name__ == "__main__":
#change the IP address below according to the
#IP shown in the Serial monitor of Arduino code
# url='http://192.168.4.1/cam-hi.jpg'
# url='http://192.168.1.107/cam-hi.jpg'
url='http://192.168.4.1/cam-mid.jpg'
#change the IP address below according to the
#IP shown in the Serial monitor of Arduino code
url='http://192.168.4.1/cam-hi.jpg'
# url='http://192.168.1.107/cam-hi.jpg'
# url='http://192.168.1.107/cam-mid.jpg'
# cv2.namedWindow("live transmission", cv2.WINDOW_AUTOSIZE)
# cv2.namedWindow("live transmission", cv2.WINDOW_AUTOSIZE) cv2.namedWindow("live transmission", cv2.WINDOW_NORMAL)
cv2.namedWindow("live transmission", cv2.WINDOW_NORMAL) while True:
header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36."}
req = Request(url, headers=header)
img_resp = urlopen(req, timeout=60)
imgnp=np.array(bytearray(img_resp.read()),dtype=np.uint8)
frame=cv2.imdecode(imgnp,-1)
while True:
header = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36."}
req = Request(url, headers=header)
img_resp = urlopen(req, timeout=60)
imgnp=np.array(bytearray(img_resp.read()),dtype=np.uint8)
frame=cv2.imdecode(imgnp,-1)
cv2.imshow("live transmission", frame)
h,w,_ = frame.shape
print("with:{},high:{}".format(w,h))
key=cv2.waitKey(5)
if key==ord('q'):
break
cv2.imshow("live transmission", frame) cv2.destroyAllWindows()
h,w,_ = frame.shape
print("with:{},high:{}".format(w,h))
key=cv2.waitKey(5)
if key==ord('q'):
break
cv2.destroyAllWindows()
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
#include <Wire.h> #include <Wire.h>
#include <Adafruit_MotorShield.h> #include <Adafruit_MotorShield.h>
int largeVal = 255; int largeVal = 100;
int smallVal = 127; int smallVal = 50;
Adafruit_MotorShield AFMS = Adafruit_MotorShield(); Adafruit_MotorShield AFMS = Adafruit_MotorShield();
Adafruit_DCMotor *motorVL = AFMS.getMotor(1); Adafruit_DCMotor *motorVL = AFMS.getMotor(1);
......
import image, network, math, rpc, sensor, struct, tf, ucollections, pyb
import ubinascii
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)
sensor.skip_frames(time = 2000)
red_led = pyb.LED(1)
green_led = pyb.LED(2)
blue_led = pyb.LED(3)
red_led.off()
green_led.off()
blue_led.off()
red_led.on()
interface = rpc.rpc_i2c_slave(slave_addr=0x12)
MAX_BLOBS = 4
TAG_SIZE = 138
MAX_TAGS = 2
XRES = 320
YRES = 240
SIZE = 16.3
f_x = (2.8 / 3.673) * XRES
f_y = (2.8 / 2.738) * YRES
c_x = XRES * 0.5
c_y = YRES * 0.5
def draw_detections(img, dects):
for d in dects:
c = d.corners()
l = len(c)
for i in range(l): img.draw_line(c[(i+0)%l] + c[(i+1)%l], color = (0, 255, 0))
img.draw_rectangle(d.rect(), color = (255, 0, 0))
def face_detection(data):
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface"))
if not faces: return struct.pack("<HHHH", 0, 0, 0, 0)
for f in faces: sensor.get_fb().draw_rectangle(f, color = (255, 255, 255))
out_face = max(faces, key = lambda f: f[2] * f[3])
return struct.pack("<HHHH", out_face[0], out_face[1], out_face[2], out_face[3])
def find_position(box):
x = box[0]
y = box[1]
w = box[2]
h = box[3]
cx = x + w/2
cy = y + h/2
frame_cx = sensor.width()/2
frame_cy = sensor.height()/2
if cx < frame_cx:
if cy < frame_cy:
pos = "UL"
else:
pos = "LL"
else:
if cy < frame_cy:
pos = "UR"
else:
pos = "LR"
x_diff = cx - frame_cx
y_diff = cy - frame_cy
area = w*h
return pos
def find_dist(box, w_actual, l_actual):
w_dist = w_actual*2.8/box[2]
l_dist = l_actual*2.8/box[3]
dist = (w_dist+l_dist)/2
return dist*100
ignore_blue = (0, 0, sensor.width(), sensor.height())
def color_detection(thresholds):
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)
thresholdss = struct.unpack("<bbbbbb", thresholds)
img = sensor.snapshot()
blobs = [0,0,0,0,0,0,0,0,0,0,0,0]
n_blobs = 0
for blob in img.find_blobs([thresholdss], pixels_threshold=200, area_threshold=200, merge=True):
img.draw_edges(blob.min_corners(), color=(0,0,255))
item = blob.rect()
area = item[2] * item[3]
if n_blobs < MAX_BLOBS:
blobs[0 + (n_blobs*3): 2 + (n_blobs*3)] = [int(blob.cx()), int(blob.cy()), int(area)]
n_blobs += 1
print(blobs)
return struct.pack("<hhhhhhhhh", blobs[0], blobs[1], blobs[2], blobs[3], blobs[4], blobs[5], blobs[6],
blobs[7], blobs[8])
def color_detection_single(data):
red_led.off()
green_led.on()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)
thresholds = struct.unpack("<bbbbbb", data)
print(thresholds)
blobs = sensor.snapshot().find_blobs([thresholds],
pixels_threshold=500,
area_threshold=500,
merge=True,
margin=20)
if not blobs:
red_led.on()
green_led.off()
return struct.pack("<HH", 0, 0)
for b in blobs:
sensor.get_fb().draw_rectangle(b.rect(), color = (255, 0, 0))
sensor.get_fb().draw_cross(b.cx(), b.cy(), color = (0, 255, 0))
out_blob = max(blobs, key = lambda b: b.density())
red_led.on()
green_led.off()
return struct.pack("<HH", out_blob.cx() , out_blob.cy())
def recalibration():
img = sensor.snapshot()
for blob in img.find_blobs(thresholds[2], invert=True, pixels_threshold=200, area_threshold=200, merge=True):
img.draw_edges(blob.min_corners(), color=(0,0,255))
ignore_blue = (blob.x(), blob.y(), blob.w(), blob.h())
def change_tag_size(size):
TAG_SIZE = size
def degrees(radians):
return (180 * radians) / math.pi
def dist_conversion(z):
z = z*100*2
scale = TAG_SIZE/138
res_scale = (240/X_RES + 240/Y_RES)/2
return z*scale*res_scale
def xy_to_mm(tag1_cx, tag1_cy, tag2_cx, tag2_cy, tag3_cx, tag3_cy):
mm_distx = 590
mm_disty = 1
pixel_distx = tag1_cx - tag2_cx
pixel_disty = tag1_cy - tag2_cy
target_distx = tag1_cx - tag3_cx
target_disty = tag1_cy - tag3_cy
distx = target_distx*mm_distx/pixel_distx/2.8
disty = target_disty*mm_disty/pixel_disty/2.8
return (distx, disty)
def apriltags(data):
red_led.off()
green_led.on()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)
LENS_MM = 2.8
LENS_TO_CAM_MM = 22
datas = struct.unpack("<bb", data)
id1 = datas[0]
tagsize1 = 138
if id1 == 0:
tagsize1 = 138
img = sensor.snapshot()
f_x = (LENS_MM / 3.984) * X_RES
f_y = (LENS_MM / 2.952) * Y_RES
c_x = X_RES * 0.5
c_y = Y_RES * 0.5
tags = [0, 0, 0]
for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y):
img.draw_rectangle(tag.rect(), color = (255, 0, 0))
img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
dist = dist_conversion(tag.z_translation())
img.draw_string(tag.cx(), tag.cy(), str(dist))
if tag.id() == id1:
print(tag.id())
TAG_SIZE = tagsize1
tags[0:2] = [int(tag.cx()), int(tag.cy()), int(degrees(tag.z_rotation()))]
red_led.on()
green_led.off()
return struct.pack("<hhh", tags[0], tags[1], tags[2])
def qrcode_detection(data):
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
codes = sensor.snapshot().find_qrcodes()
if not codes: return bytes()
draw_detections(sensor.get_fb(), codes)
return str(codes).encode()
def ATDistance(val,size):
new_val = -10*val*size/16.3
return new_val
def goalfinder(data):
green_led.off()
red_led.off()
blue_led.on()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(False)
sensor.set_auto_whitebal(False)
goal_id = struct.unpack("<bbb", data)
nearest_tag = [0,0,0,10000,0,0,0]
img = sensor.snapshot()
for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y):
img.draw_rectangle(tag.rect(), color = (255, 0, 0))
img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
for i in goal_id:
if tag.id() == i and ATDistance(tag.z_translation(),SIZE) < nearest_tag[3]:
nearest_tag[0] = int(tag.id())
nearest_tag[1] = int(tag.cx())
nearest_tag[2] = int(tag.cy())
nearest_tag[3] = int(ATDistance(tag.z_translation(),SIZE))
nearest_tag[4] = int(tag.x_rotation())
nearest_tag[5] = int(tag.y_rotation())
nearest_tag[6] = int(tag.z_rotation())
red_led.on()
green_led.off()
blue_led.off()
return struct.pack("<hhhhhhh", nearest_tag[0],nearest_tag[1],nearest_tag[2],nearest_tag[3],nearest_tag[4],nearest_tag[5],nearest_tag[6])
interface.register_callback(face_detection)
interface.register_callback(color_detection)
interface.register_callback(apriltags)
interface.register_callback(color_detection_single)
interface.register_callback(recalibration)
interface.register_callback(goalfinder)
interface.loop()
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment