Skip to content
Snippets Groups Projects
Commit 1ab7d4a7 authored by Shahrul Kamil bin Hassan's avatar Shahrul Kamil bin Hassan
Browse files

Upload New File

parent 0d929cb4
Branches
import image, network, math, rpc, sensor, struct, tf, ucollections, pyb
#from pyb import UART,
import ubinascii
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
sensor.skip_frames(time = 2000)
red_led = pyb.LED(1)
green_led = pyb.LED(2)
blue_led = pyb.LED(3)
red_led.off()
green_led.off()
blue_led.off() #put the LEDs into a known state
red_led.on() #turn the LED to red so that we know it is on
interface = rpc.rpc_i2c_slave(slave_addr=0x12)
#Constants needed
MAX_BLOBS = 4
TAG_SIZE = 138 #length of inner black border of tag in mm (if printing to fill normal paper, = 138mm)
MAX_TAGS = 2
XRES = 320
YRES = 240
SIZE = 16.3
f_x = (2.8 / 3.673) * XRES # find_apriltags defaults to this if not set 3.984
f_y = (2.8 / 2.738) * YRES # find_apriltags defaults to this if not set
c_x = XRES * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5)
c_y = YRES * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5)
################################################################
# Call Backs
################################################################
def draw_detections(img, dects):
for d in dects:
c = d.corners()
l = len(c)
for i in range(l): img.draw_line(c[(i+0)%l] + c[(i+1)%l], color = (0, 255, 0))
img.draw_rectangle(d.rect(), color = (255, 0, 0))
# Remote control works via call back methods that the controller
# device calls via the rpc module on this device. Call backs
# are functions which take a bytes() object as their argument
# and return a bytes() object as their result. The rpc module
# takes care of moving the bytes() objects across the link.
# bytes() may be the micropython int max in size.
# When called returns x, y, w, and h of the largest face within view.
#
# data is unused
def face_detection(data):
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QVGA)
faces = sensor.snapshot().gamma_corr(contrast=1.5).find_features(image.HaarCascade("frontalface"))
if not faces: return struct.pack("<HHHH", 0, 0, 0, 0) #bytes() # No detections.
for f in faces: sensor.get_fb().draw_rectangle(f, color = (255, 255, 255))
out_face = max(faces, key = lambda f: f[2] * f[3])
return struct.pack("<HHHH", out_face[0], out_face[1], out_face[2], out_face[3])
#################### helper functions for blob detection ####################
def find_position(box):
x = box[0] #x, y correspond to top left corner
y = box[1]
w = box[2]
h = box[3]
cx = x + w/2
cy = y + h/2
frame_cx = sensor.width()/2
frame_cy = sensor.height()/2
if cx < frame_cx:
if cy < frame_cy:
pos = "UL"
else:
pos = "LL"
else:
if cy < frame_cy:
pos = "UR"
else:
pos = "LR"
#if frame size is 320x240, absolute center is 160, 120
x_diff = cx - frame_cx
y_diff = cy - frame_cy
area = w*h
return pos
def find_dist(box, w_actual, l_actual):
#find the distance between a detected object and the camera
#must know the actual dimensions in mm
w_dist = w_actual*2.8/box[2]
l_dist = l_actual*2.8/box[3]
dist = (w_dist+l_dist)/2 #average the two to get the most accurate distance
return dist*100 #convert to mm
#################### blob detection ####################
ignore_blue = (0, 0, sensor.width(), sensor.height()) #set in recalibration function
#just single color detection for now, will add multicolor blobs in a bit
def color_detection(thresholds):
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_gain(False) # must be turned off for color tracking
sensor.set_auto_whitebal(False) #must be turned off for color tracking
thresholdss = struct.unpack("<bbbbbb", thresholds)
img = sensor.snapshot()
#take calibration into account here
blobs = [0,0,0,0,0,0,0,0,0,0,0,0]
n_blobs = 0
for blob in img.find_blobs([thresholdss], pixels_threshold=200, area_threshold=200, merge=True):
img.draw_edges(blob.min_corners(), color=(0,0,255))
item = blob.rect()
#find distance? idk how dimensions would be passed in but
area = item[2] * item[3] #2 is width, 3 is height
if n_blobs < MAX_BLOBS:
blobs[0 + (n_blobs*3): 2 + (n_blobs*3)] = [int(blob.cx()), int(blob.cy()), int(area)]
n_blobs += 1
#while n_blobs < MAX_BLOBS:
#blobs += [0, 0, 0]
#n_blobs += 1
print(blobs)
return struct.pack("<hhhhhhhhh", blobs[0], blobs[1], blobs[2], blobs[3], blobs[4], blobs[5], blobs[6],
blobs[7], blobs[8])
# When called returns the x/y centroid of the largest blob
# within the OpenMV Cam's field-of-view.
#
# data is the 6-byte color tracking threshold tuple of L_MIN, L_MAX, A_MIN, A_MAX, B_MIN, B_MAX.
def color_detection_single(data):
red_led.off()
green_led.on()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
thresholds = struct.unpack("<bbbbbb", data)
print(thresholds)
blobs = sensor.snapshot().find_blobs([thresholds],
pixels_threshold=500,
area_threshold=500,
merge=True,
margin=20)
if not blobs: return struct.pack("<HH", 0, 0)# No detections.
for b in blobs:
sensor.get_fb().draw_rectangle(b.rect(), color = (255, 0, 0))
sensor.get_fb().draw_cross(b.cx(), b.cy(), color = (0, 255, 0))
out_blob = max(blobs, key = lambda b: b.density())
red_led.on()
green_led.off()
return struct.pack("<HH", out_blob.cx() , out_blob.cy())
def recalibration():
#cam facing white wall, scan to get img, id which parts are blue + white
#exclude the blue part
img = sensor.snapshot()
#double check that this actually detects blue lol
for blob in img.find_blobs(thresholds[2], invert=True, pixels_threshold=200, area_threshold=200, merge=True):
#detects non-blue things
img.draw_edges(blob.min_corners(), color=(0,0,255))
ignore_blue = (blob.x(), blob.y(), blob.w(), blob.h())
#################### constants and functions for AprilTags ####################
def change_tag_size(size):
TAG_SIZE = size
#return size
def degrees(radians):
return (180 * radians) / math.pi
def dist_conversion(z):
#returns the distance in mm
z = z*100*2
scale = TAG_SIZE/138
res_scale = (240/X_RES + 240/Y_RES)/2
return z*scale*res_scale
def xy_to_mm(tag1_cx, tag1_cy, tag2_cx, tag2_cy, tag3_cx, tag3_cy):
#find the actual distance between tag1 and tag3
mm_distx = 590 #x dist in mm between two tags (premeasured)
mm_disty = 1 #y dist ^ (lol can't be 0 or the eqn breaks)
pixel_distx = tag1_cx - tag2_cx #dist between two tags cx (in pixels)
pixel_disty = tag1_cy - tag2_cy #same but cy
target_distx = tag1_cx - tag3_cx #dist between tag1 cx and object cx
target_disty = tag1_cy - tag3_cy #same but cy
#unless that shit is constant or passed in, yikes
#the 2.8 is a scaling factor based on lens size of cam
distx = target_distx*mm_distx/pixel_distx/2.8
disty = target_disty*mm_disty/pixel_disty/2.8
return (distx, disty) #in mm
#returns a list of the x,y centroid, distance, and rotation for 2 tags of a certain ID
def apriltags(data):
red_led.off()
green_led.on()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QQVGA)
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
#need to hardcode tag size for each ID
LENS_MM = 2.8
LENS_TO_CAM_MM = 22
datas = struct.unpack("<bb", data)
id1 = datas[0]
#add more options based on what tag size will actually be
#this is default size for now
tagsize1 = 138
if id1 == 0:
tagsize1 = 138
#sensor.set_pixformat(sensor.GRAYSCALE)
#sensor.set_framesize(sensor.QVGA) # we run out of memory if the resolution is much bigger...
#sensor.set_windowing((X_RES, Y_RES)) # Look at center 160x120 pixels of the VGA resolution.
img = sensor.snapshot()
f_x = (LENS_MM / 3.984) * X_RES # find_apriltags defaults to this if not set
f_y = (LENS_MM / 2.952) * Y_RES # find_apriltags defaults to this if not set
c_x = X_RES * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5)
c_y = Y_RES * 0.5 # find_apriltags defaults to this if not set (the image.h * 0.5)
tags = [0, 0, 0]
#n_tags = 0
for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11
img.draw_rectangle(tag.rect(), color = (255, 0, 0))
img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
dist = dist_conversion(tag.z_translation())
img.draw_string(tag.cx(), tag.cy(), str(dist))
if tag.id() == id1:
print(tag.id())
TAG_SIZE = tagsize1
tags[0:2] = [int(tag.cx()), int(tag.cy()), int(degrees(tag.z_rotation()))]
red_led.on()
green_led.off()
return struct.pack("<hhh", tags[0], tags[1], tags[2])
def qrcode_detection(data):
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.VGA)
sensor.set_windowing((320, 240))
codes = sensor.snapshot().find_qrcodes()
if not codes: return bytes() # No detections.
draw_detections(sensor.get_fb(), codes)
return str(codes).encode()
#val = value from apriltag reading
#size = apriltag square length in cm
def ATDistance(val,size):
new_val = -10*val*size/16.3
return new_val
def goalfinder(data):
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA) # we run out of memory if the resolution is much bigger...
sensor.set_auto_gain(False) # must turn this off to prevent image washout...
sensor.set_auto_whitebal(False) # must turn this off to prevent image washout...
goal_id = struct.unpack("<bbb", data)
nearest_tag = [0,0,0,float('inf'),0,0,0]
img = sensor.snapshot()
for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11
img.draw_rectangle(tag.rect(), color = (255, 0, 0))
img.draw_cross(tag.cx(), tag.cy(), color = (0, 255, 0))
for i in goal_id:
if tag.id() == i and ATDistance(tag.z_translation(),SIZE) < nearest_tag[3]:
nearest_tag[0] = tag.id()
nearest_tag[1] = tag.x_translation()
nearest_tag[2] = tag.y_translation()
nearest_tag[3] = ATDistance(tag.z_translation(),SIZE)
nearest_tag[4] = tag.x_rotation()
nearest_tag[5] = tag.y_rotation()
nearest_tag[6] = tag.z_rotation()
#check to see if there's a tag in frame
if nearest_tag[3] == float('inf'):
return struct.pack("<hhhhhhh", nearest_tag)
else:
print("nearest tag id is ", nearest_tag[0], " with the distance", nearest_tag[3], " cm.")
return struct.pack("<hhhhhhh", nearest_tag)
# Register call backs.
interface.register_callback(face_detection)
interface.register_callback(color_detection)
interface.register_callback(apriltags)
interface.register_callback(color_detection_single)
interface.register_callback(recalibration)
interface.register_callback(goalfinder)
# Once all call backs have been registered we can start
# processing remote events. interface.loop() does not return.
interface.loop()
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment