diff --git a/Code/OpenMV Code/Test_code/new_main.py b/Code/OpenMV Code/Test_code/new_main.py new file mode 100644 index 0000000000000000000000000000000000000000..034ee9bf6c16c7e5679b4fdb532218543a20d083 --- /dev/null +++ b/Code/OpenMV Code/Test_code/new_main.py @@ -0,0 +1,206 @@ +import image, network, math, rpc, sensor, struct, tf, ucollections, pyb +import ubinascii +sensor.reset() +sensor.set_pixformat(sensor.RGB565) +sensor.set_framesize(sensor.QVGA) +sensor.set_auto_gain(False) +sensor.set_auto_whitebal(False) +sensor.skip_frames(time = 2000) +red_led = pyb.LED(1) +green_led = pyb.LED(2) +blue_led = pyb.LED(3) +red_led.off() +green_led.off() +blue_led.off() +red_led.on() +interface = rpc.rpc_i2c_slave(slave_addr=0x12) +MAX_BLOBS = 4 +TAG_SIZE = 138 +MAX_TAGS = 2 +XRES = 320 +YRES = 240 +SIZE = 16.3 +f_x = (2.8 / 3.673) * XRES +f_y = (2.8 / 2.738) * YRES +c_x = XRES * 0.5 +c_y = YRES * 0.5 +def draw_detections(img, dects): + for d in dects: + c = d.corners() + l = len(c) + for i in range(l): img.draw_line(c[(i+0)%l] + c[(i+1)%l], color = (0, 255, 0)) + img.draw_rectangle(d.rect(), color = (255, 0, 0)) +def face_detection(data): + sensor.set_pixformat(sensor.GRAYSCALE) + sensor.set_framesize(sensor.QVGA) + faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface")) + if not faces: return struct.pack("<HHHH", 0, 0, 0, 0) + for f in faces: sensor.get_fb().draw_rectangle(f, color = (255, 255, 255)) + out_face = max(faces, key = lambda f: f[2] * f[3]) + return struct.pack("<HHHH", out_face[0], out_face[1], out_face[2], out_face[3]) +def find_position(box): + x = box[0] + y = box[1] + w = box[2] + h = box[3] + cx = x + w/2 + cy = y + h/2 + frame_cx = sensor.width()/2 + frame_cy = sensor.height()/2 + if cx < frame_cx: + if cy < frame_cy: + pos = "UL" + else: + pos = "LL" + else: + if cy < frame_cy: + pos = "UR" + else: + pos = "LR" + x_diff = cx - frame_cx + y_diff = cy - frame_cy + area = w*h + return pos +def find_dist(box, w_actual, l_actual): + w_dist = w_actual*2.8/box[2] + l_dist = l_actual*2.8/box[3] + dist = (w_dist+l_dist)/2 + return dist*100 +ignore_blue = (0, 0, sensor.width(), sensor.height()) +def color_detection(thresholds): + sensor.set_pixformat(sensor.RGB565) + sensor.set_framesize(sensor.QVGA) + sensor.set_auto_gain(False) + sensor.set_auto_whitebal(False) + thresholdss = struct.unpack("<bbbbbb", thresholds) + img = sensor.snapshot() + blobs = [0,0,0,0,0,0,0,0,0,0,0,0] + n_blobs = 0 + for blob in img.find_blobs([thresholdss], pixels_threshold=200, area_threshold=200, merge=True): + img.draw_edges(blob.min_corners(), color=(0,0,255)) + item = blob.rect() + area = item[2] * item[3] + if n_blobs < MAX_BLOBS: + blobs[0 + (n_blobs*3): 2 + (n_blobs*3)] = [int(blob.cx()), int(blob.cy()), int(area)] + n_blobs += 1 + print(blobs) + return struct.pack("<hhhhhhhhh", blobs[0], blobs[1], blobs[2], blobs[3], blobs[4], blobs[5], blobs[6], + blobs[7], blobs[8]) +def color_detection_single(data): + red_led.off() + green_led.on() + sensor.set_pixformat(sensor.RGB565) + sensor.set_framesize(sensor.QVGA) + thresholds = struct.unpack("<bbbbbb", data) + print(thresholds) + blobs = sensor.snapshot().find_blobs([thresholds], + pixels_threshold=500, + area_threshold=500, + merge=True, + margin=20) + if not blobs: return struct.pack("<HH", 0, 0) + for b in blobs: + sensor.get_fb().draw_rectangle(b.rect(), color = (255, 0, 0)) + sensor.get_fb().draw_cross(b.cx(), b.cy(), color = (0, 255, 0)) + out_blob = max(blobs, key = lambda b: b.density()) + red_led.on() + green_led.off() + return struct.pack("<HH", out_blob.cx() , out_blob.cy()) +def recalibration(): + img = sensor.snapshot() + for blob in img.find_blobs(thresholds[2], invert=True, pixels_threshold=200, area_threshold=200, merge=True): + img.draw_edges(blob.min_corners(), color=(0,0,255)) + ignore_blue = (blob.x(), blob.y(), blob.w(), blob.h()) +def change_tag_size(size): + TAG_SIZE = size +def degrees(radians): + return (180 * radians) / math.pi +def dist_conversion(z): + z = z*100*2 + scale = TAG_SIZE/138 + res_scale = (240/X_RES + 240/Y_RES)/2 + return z*scale*res_scale +def xy_to_mm(tag1_cx, tag1_cy, tag2_cx, tag2_cy, tag3_cx, tag3_cy): + mm_distx = 590 + mm_disty = 1 + pixel_distx = tag1_cx - tag2_cx + pixel_disty = tag1_cy - tag2_cy + target_distx = tag1_cx - tag3_cx + target_disty = tag1_cy - tag3_cy + distx = target_distx*mm_distx/pixel_distx/2.8 + disty = target_disty*mm_disty/pixel_disty/2.8 + return (distx, disty) +def apriltags(data): + red_led.off() + green_led.on() + sensor.set_pixformat(sensor.RGB565) + sensor.set_framesize(sensor.QQVGA) + sensor.set_auto_gain(False) + sensor.set_auto_whitebal(False) + LENS_MM = 2.8 + LENS_TO_CAM_MM = 22 + datas = struct.unpack("<bb", data) + id1 = datas[0] + tagsize1 = 138 + if id1 == 0: + tagsize1 = 138 + img = sensor.snapshot() + f_x = (LENS_MM / 3.984) * X_RES + f_y = (LENS_MM / 2.952) * Y_RES + c_x = X_RES * 0.5 + c_y = Y_RES * 0.5 + tags = [0, 0, 0] + for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + dist = dist_conversion(tag.z_translation()) + img.draw_string(tag.cx(), tag.cy(), str(dist)) + if tag.id() == id1: + print(tag.id()) + TAG_SIZE = tagsize1 + tags[0:2] = [int(tag.cx()), int(tag.cy()), int(degrees(tag.z_rotation()))] + red_led.on() + green_led.off() + return struct.pack("<hhh", tags[0], tags[1], tags[2]) +def qrcode_detection(data): + sensor.set_pixformat(sensor.RGB565) + sensor.set_framesize(sensor.VGA) + sensor.set_windowing((320, 240)) + codes = sensor.snapshot().find_qrcodes() + if not codes: return bytes() + draw_detections(sensor.get_fb(), codes) + return str(codes).encode() +def ATDistance(val,size): + new_val = -10*val*size/16.3 + return new_val +def goalfinder(data): + green_led.on() + sensor.set_pixformat(sensor.RGB565) + sensor.set_framesize(sensor.QVGA) + sensor.set_auto_gain(False) + sensor.set_auto_whitebal(False) + goal_id = struct.unpack("<bbb", data) + nearest_tag = [0,0,0,10000,0,0,0] + img = sensor.snapshot() + for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): + img.draw_rectangle(tag.rect(), color = (255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0)) + for i in goal_id: + if tag.id() == i and ATDistance(tag.z_translation(),SIZE) < nearest_tag[3]: + nearest_tag[0] = int(tag.id()) + nearest_tag[1] = int(tag.cx()) + nearest_tag[2] = int(tag.cy()) + nearest_tag[3] = int(ATDistance(tag.z_translation(),SIZE)) + nearest_tag[4] = int(tag.x_rotation()) + nearest_tag[5] = int(tag.y_rotation()) + nearest_tag[6] = int(tag.z_rotation()) + red_led.on() + green_led.off() + return struct.pack("<hhhhhhh", nearest_tag[0],nearest_tag[1],nearest_tag[2],nearest_tag[3],nearest_tag[4],nearest_tag[5],nearest_tag[6]) +interface.register_callback(face_detection) +interface.register_callback(color_detection) +interface.register_callback(apriltags) +interface.register_callback(color_detection_single) +interface.register_callback(recalibration) +interface.register_callback(goalfinder) +interface.loop() \ No newline at end of file