From efe199e2a6242df0156b53494295d639598f8673 Mon Sep 17 00:00:00 2001 From: William Chen <billyskc@ucla.edu> Date: Tue, 4 Sep 2018 14:37:07 -0700 Subject: [PATCH] finalized the CoLo-AT 1.0 --- functions/ani.gif | Bin 2048 -> 0 bytes functions/basic_example.py | 38 --- .../__pycache__/realtime_plot.cpython-36.pyc | Bin 5833 -> 5905 bytes functions/data_analysis/realtime_plot.py | 2 +- functions/dataset_manager/random_dataset.py | 319 ------------------ .../runtime_data_generation.py | 78 ----- functions/demo_template.py | 28 -- functions/full_test9.gif | Bin 1820 -> 0 bytes functions/localization_algos/ekf_gs_sci.py | 211 ------------ functions/localization_algos/ekf_gs_sci2.py | 13 +- .../simple_ekf_meas_only.py | 100 ------ .../centralized_ekf2.py | 0 .../ekf_gs_ci2.py | 0 .../ekf_ls_bda2.py | 0 .../ekf_ls_ci2.py | 0 functions/random_walk.py | 71 ---- functions/ros_compatibility.py | 21 +- functions/ros_demo.py | 8 +- .../simulation_process/state_recorder.py | 7 +- functions/test_random.py | 72 ---- 20 files changed, 27 insertions(+), 941 deletions(-) delete mode 100644 functions/ani.gif delete mode 100644 functions/basic_example.py delete mode 100644 functions/dataset_manager/random_dataset.py delete mode 100644 functions/dataset_manager/runtime_data_generation.py delete mode 100644 functions/demo_template.py delete mode 100644 functions/full_test9.gif delete mode 100644 functions/localization_algos/ekf_gs_sci.py delete mode 100644 functions/localization_algos/simple_ekf_meas_only.py rename functions/localization_algos/{ => under_developing_algos}/centralized_ekf2.py (100%) rename functions/localization_algos/{ => under_developing_algos}/ekf_gs_ci2.py (100%) rename functions/localization_algos/{ => under_developing_algos}/ekf_ls_bda2.py (100%) rename functions/localization_algos/{ => under_developing_algos}/ekf_ls_ci2.py (100%) delete mode 100644 functions/random_walk.py delete mode 100644 functions/test_random.py diff --git a/functions/ani.gif b/functions/ani.gif deleted file mode 100644 index 5ee2136c92999e43edc2a159c3e89635722fb732..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2048 zcmXYmX;_no0f3ohUGgO)UB@~r6xxn;HU#j%iz{2_EQFA3o$CP<1Q8+RzPS<))a3he zc;E#{xR0EGh{};5B1aMuMMMNl6cIIqpq6?7CS9Lr@2~fL-mepOZQqg6;+yRI+~@y% z_SV<uz5hA(*Z%eRtnrj6n#~L7+KTo`=%T%Q1$}fT{>+@D!D;GnBL-g*?!J;eS+>6R zppn3_{+btJ0_Lox;U~BZ-#DiF85|*Bp4PoaprFCowzpEE`mU>E#ZO^=vefxbZkvC- zYVe}huHPR2eU3nUuIb9q#M3vhkH6fJ@I!9C<b3nh;d{^C!q;J<@X6!?uuRoynshD$ z0in@}DS3rZxw^|dHNS!g#KsU(DMiu>O}Ay*wTk>CEH>#!YO(BswrAx2!aKj9@Hk=` zO(?I_8LW1<2NfKFBc;<t3Z>p?b1Zt%8>)H_`XbiFh&NRC9YF>}6~Q*v^h^9hle@(m zYX>~C!03Wi*rvLn6CsIZF_KO7rqkGz&La5c2Fv-#yvc6K=0@v<n8M{%cx2Pf>YdQK zSTM4Aw4P8NRSaxVjWzF9C-;C`)Z^OJu7Y>KR?S3fmbokz+NzzrM49R=Mr_kfU**hC z_CVY8_MW1Z<#&kfEf21hAOhl~+goRDNCTpU$Q^C7!=<4q2I-EAb0Zbeg&t&7`_o%> ziRE#!s7uZ}nv^b~Uv!7-UVGk@K^A@4eZRYK#p4%q<@v*YC;%sqx%%?4r94`MitSu_ zHl|K7%454;ElhV6dQoxRZ(hur%W;ahp5>oiQ(Yo7&anD+X@1J6z!^R7Ry`{#Ui6*= zYf$lf4!$2G-g5{DAKG&m6$i^G`H&c&apa>kamLYgw4sb+K|)xj_;Y!DrsRuCaVCh- z4P`>%2H0NdCTsj&*;c!FuRO{<v{w=5fn^>43YCy`VpotP>m)vWIO`NK4!*DS8)Cw~ z)8D2^_MJ(l4evXfCWL37%akW%pU<w8WS5b3!`bCD1AKo4%bKwN0?#hlUs>QD-mesT z;5k)?Q23ncqd{O!4H$09sg=h8xpgOr_}uz4X<%+c8O@a2SSbX^O*L{nxw)YdB&$?9 z6Irb@0C}1=D?U%#VF&Ycoo-W}-rxZ!Eq$m&O6x!n<e{{g!p)S6);I*UeUz9;y)>Q% zQ9CASX6j|T5J9^#BTuAVovVasolc#Z*5x)J=-n@^iS(XVc8G3RcAM!&j|ai%U4tet zuDu^DW%L0N7RGfH4$18QkVIhK_$Xb<99T!UFb9J~NY>Ej3Ic2R3#F80!ssn5bGQ-7 zwrsKy*dtpVQnoc}(ZaUHd6Ar(U!jvYw{``~IHUN85zcKQ&W}6x4JnCx=i77{cRZOs z!o8a&^5acpDw25jvXwI4Bw0Vgo1z*0_|q&~68}EWA>-Q%7DxCFq1R9F;1HT9cz85e zE|>u$tb#{!94deIB*{a}e|#ogo<CPcx8^^o6rl>9)+mSt&l;5S0;fuEEtuCCQH8EH z8?kVq!yzwpcP?5B7Y$xi(epkusp!Q(u%hUtDZ*CtlNE<9UK%BlihmwYR}{aRq}z&L z+eK*Mn;8X3_;yaI5H36QHsOlfh!(BBw2?&bUO5yZ&+?*8<n?&by*>crfG@V{H)wIM zFNSdtNvV1-7<LVYXB_g^RQ)zWd<{uv91e6<{SF7~`v7E=gkY=TBypd=ig5%>sYay3 zuCFyQjz(&#{pjNBflkJ;7*{n~1nd74V2XEQYksc~_lIDZ5(1^>4@%gL^>`+@TT|ot zqh5RiOJ+i;u9`m?VFMdLrZfv%`>{<t5UFCyD3sa&2W)V=i7Dr3YCl;N55_o|iXvBS zuopJ;H-L4#1Y7qRS~9c~!#W|Q)crXaJ{*r{oh;SV{Ut&&OdzvPRk-T@ii4Z}1+q%( zu=QV(B&OXe)@co;J~SO}{@%no)2^uxqf5-GPS)9OSAB#CZrKB{&-G&)HYy~REDZa+ zh0?HD2_MP9v&+Ub4O{e*5ek`IKJ9AQW`tWAAiH7~+qlCfv2s-G3oc4yv;%GvnAnv| zn#R~giLJ=VR<61labEb%0|2MWH>~OJXz*qUhEt8CHvPjB4BQgqIW_*;rhi6&x1?lF zZQw%F*EnGGILN6B32ROufup4=PCb^|oR|*WK4;=IL~5Ip=-}-NC#NxHp_wEC#;O2r z)6Ou}zZKwE9fsRXpsN0(1nxB9xvJe-)qnNi9SxbQPF+xaX9UJuK&~b$O#Oom9B)@~ zwG^s4#R1&CV&dvJTJ?{M;N5N~S6{TCPWJ*6y#TMJButZmh9>$kyjCeyvo{!VZwSw8 zE7fZDML_o~WZuP!1<igOV)7=)Yp)B_=8~YvF%|EUhN{g=M@&tacpdFpEtL*UO*?s) zyBD-{5n}oQz`xQTreiAX(DW>Zf7L?Ov6YDXPx1WDF|CfPhwi(`{I2N*9p8wsKL`2U zvtjyt8)RQn@q1iUeW3&4cw^!lmbCifMaZ%0<QrEP^dc|ffe%8^>l@y35G{S+8!EVl zq_rFlMm~fk3i|wYEk`1x50QC->w)f;V>sl@2aupYB)nBZlFs<61vjv?Rwy0$XsuZ= z5UFdG(WQ?9=LLf??pB2eIr}Lhe`sfT+X;nqHY7BEm_Tbgr9?hnpO|mjt!q22mp;bk b<(pI8ZD)<hxeZXhB`f^m`A6UR_(cB#r5G#^ diff --git a/functions/basic_example.py b/functions/basic_example.py deleted file mode 100644 index 66b0fda..0000000 --- a/functions/basic_example.py +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Aug 15 14:30:24 2018 - -@author: william -""" - -""" -========================= -Simple animation examples -========================= - -This example contains two animations. The first is a random walk plot. The -second is an image animation. -""" - -import numpy as np -import matplotlib.pyplot as plt -import matplotlib.animation as animation - - -def update_line(num, data, line): - line.set_data(data[..., :num]) - return line, - -fig1 = plt.figure() - -data = np.random.rand(2, 25) -l, = plt.plot([], [], 'r-') -plt.xlim(0, 1) -plt.ylim(0, 1) -plt.xlabel('x') -plt.title('test') -line_ani = animation.FuncAnimation(fig1, update_line, 25, fargs=(data, l), - interval=50, blit=True) - -# To save the animation, use the command: line_ani.save('lines.mp4') \ No newline at end of file diff --git a/functions/data_analysis/__pycache__/realtime_plot.cpython-36.pyc b/functions/data_analysis/__pycache__/realtime_plot.cpython-36.pyc index 4be9c010e9eea6e9cce18a77d44314491f648123..366ecb8d007190f9aa9ccc357bae5995364e12c0 100644 GIT binary patch delta 980 zcmYk4%TE(g6vpSyv|~HW^hH~3Y)cUc6a{&-ib&)Y9ww$S8p8sm4865ZX{UH+j76s* z*r;()qq!Sb#-%O^aqkir{sXgOM_Bj|7#5y8D6p7c&i9>@duPskKL<VzN~4M*{rLWF zSx)Ups)%e_5tmVBcTZeGXq2Wri{dSg;|e@YdBgOT=M$Qu7rpyvVfV<Z^INvNk}n#j z?W*KUrtL8bs_E!+P6d`+sez}}bjNNxQ6-bi!MWrHz?Q3;o;S5>F|QIr#+iSU;X1<@ z!#uzi%(@AvZV(@n?xH~t<z6&FgGvb%=%z9!PP5?|TKD#|Gs!H-v_R(QA7vaBY1W6) zD&6;-kCPm9-oj(Z01mm3ggihQksSTyi=j1o;LD&v8uw@YS7TfgHQG$EEL##DqO1NR z$r3QPE_^^_k3I}!Bkz|uODRXK3swv#@i0!|&X${g4$Q7x!d<w#B{n+DM3ZZztW-IP zdzuKb`3@_EM=*~S+}Cn}BRkIMsRLK0njCaG%AL3$U&dGPKubUz=I(RpOpYwl$>128 zruASix=p_XzYf*mYYyE2+jB?NaaASjuz+D^h)fg-39yk7z<k<e)4QSV_ymYFZ;Q`~ zW)_JpJl-(UJXr*nl<4nJGHI|9Ia9=9b>q0+c50evFQHlby8Y>7h8Zj{wi}MB8YIq) zE`}+F1qK0NEA!9v!nCecopg`_Yrr20PfXb!RaYyUFW^o1O4UionIy3PMG_1WJBBYA zB+Jw_h9PFrop5-|4w2eg&CLG~oB{bHJ*@55bj{3f)HK~RoKX7C8W36KSgYm}r!AjK zX!2U=G@uasYQFAp({8K$+jm~WBo<_^6Jl^;{(#*qsJdp;N0C0XLf=GYI(Z2a=R#4) uUI2v!NT4vri=aX%#Cv%1jUGhqp-DO(O?00u_RJoScgF5uX@>Ea(cE7dt_L0f delta 890 zcmYk4O-~b16o%){lyO>STH4Y=wYEb-fTDl_s)!$8g~A5n$AT~rO6`<7O*<`jhDf!I z(P~Uch%sm5-i=F@pWsHKF4%U(&iEG$3(p-CSj?0AzUSUEGxxmjhWCd&V~XNQ9lT!I zkAL=LMWR7TTp?q7-^3+CMnRS4#akT5<#<rto$yk6Pp064`;esfj@%~yTx%<3@|LZ+ zN|}PKNmedvJNlfnLQ5`H&@(EgW4E8Ef=y?zFP%WN<g#IAY@?jdWGSWBnLo*JmEju0 zETSdY+cu(HrCui6@)rD-`^gRPD+RI&HDy+uW}EN8wtJBEq$!jMfzH4mWs<Bx(yNnY zIF!1gbO9@G@UdhBM_j~1B&1B}0{rww$UXSw9Vdej^(B3mB3zR++HJ8Y*FE}WSoa;l zMn_V4wZzqxVz}O2k2HDs)Nyw$rpI-4S8T-Xi%qVf*41K6?`aak_QmU}KBV)yqWA8) z(2*Ty_|(DvYLmlCyx61n>HYeEp4b%#r*jXv#K<C@gZusoG7UTaelibV{2!%J{Pc9f zrNK_%MKp#Y!MDUG)UflkC2Ul!1W)JCrR&fij0LJ}x#{epTh1DC!2(Ia_u%H#7&F+U zTWxr}Y|$t)h8d<9(hLGZQ_@>z?v81cow3qYR#<~)oj1>GQr66tYESX)e5ve=_TLEM zptGxYob609Tw#5JwDPE8XZ|O`asmQ!sC!xS8Kz-psujbuk8ST+15>3O>#Y6QIVGP; zEb>O-G((uoa%S6MO@l_L%jd*yk$r)XH4P0uga$A?KSHTE&fi7Cn1l`@^Oy%=f)`N* uNPw4k`W8mQ4^p@BU`w!S77epMi7==PeGDhJc;?cMFNjrH=CIAz@Zw)VXXVrY diff --git a/functions/data_analysis/realtime_plot.py b/functions/data_analysis/realtime_plot.py index 2491930..436e6fc 100644 --- a/functions/data_analysis/realtime_plot.py +++ b/functions/data_analysis/realtime_plot.py @@ -214,4 +214,4 @@ def animate_plot(dataset_labels, data_recorder, analyzer, lm = None): # Show graph plt.show() # (optional) save as GIF, comment the following line if you don't want this - ani.save('full_test9.gif', writer="imagemagick", fps=30) + ani.save('full_test9.gif', writer="imagemagick", fps=60) diff --git a/functions/dataset_manager/random_dataset.py b/functions/dataset_manager/random_dataset.py deleted file mode 100644 index 5ada9a1..0000000 --- a/functions/dataset_manager/random_dataset.py +++ /dev/null @@ -1,319 +0,0 @@ -# -*- coding: utf-8 -*- - -import numpy as np -import sys -import random -from math import cos -from math import sin -import math -from existing_dataset import Dataset -''' -sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/requests') -import request_response -''' - -def find_nearest_time_idx(l, value): - array = np.asarray(l) - idx = (np.abs(array-value)).argmin() - return idx - - - -def find_next_time_idx(array, start_idx, value): - i = start_idx - try: - array[i] - except IndexError: - i = -1 - return i - while array[i] < value: - i = i+1 - try: - array[i] - except IndexError: - i = -1 - break - return i - -def get_pos_within_rad(rad): - rad = abs(rad) - while True: - x = random.uniform(-1*rad, rad) - y = random.uniform(-1*rad, rad) - if (((x**2) + (y**2))**.5 > rad): - continue - else: - return x, y - -def sanitize_angle(angle): - if -3.1415 > angle: - temp = abs(angle) - 3.1415 - angle = 3.1415 - temp - if angle > 3.1415: - temp = abs(angle) - 3.1415 - angle = -3.1415 + temp - return angle - -class RandomDataset(Dataset): - - def generate_random_landmark_map(self): - """num_landmarks = random.randint(8, 20) - - self.landmark_map = {} - for i in range(num_landmarks): - x_coord, y_coord = get_pos_within_rad(5) - self.landmark_map[i+1] = [x_coord, y_coord]""" - - self.landmark_map = {1: [0.0, 0.50]} - print "map", self.landmark_map - return self.landmark_map - - # take an x and y from the robot and the first two params, - # and a list of coords for the landmark as the second param - # return the distance between them - def landmark_distance(self, old_x, old_y, new_coords): - new_x, new_y = new_coords[0], new_coords[1] - return math.sqrt((old_x - new_x)**2 + (old_y - new_y)**2) - - def getBearing(self, x, y): - return math.atan2(y, x) - - def getLineAngle(self, my_x, my_y, lm_x, lm_y, orientation): - theta = self.getBearing(lm_x - my_x, lm_y - my_y) - #need to account that you don't yet know which direction the angle is, - #eg robot to lm or lmm to robot - if (theta > 0) and (my_y > lm_y): - theta = (math.pi / -2) + theta - elif (theta < 0) and (my_y < lm_y): - theta = (math.pi / 2) + theta - - angle_from_robot = sanitize_angle(theta - orientation) - - return angle_from_robot - - def get_closest_landmark(self, x, y): - closest_distance = 10 - closest_lm = 0 - for key, coords in self.landmark_map.items(): - distance = self.landmark_distance(x, y, coords) - if (distance < closest_distance): - closest_distance = distance - closest_lm = key - - #print closest_lm, closest_distance, x, y, coords[0], coords[1] - return closest_lm, closest_distance - - def get_second_closest_landmark(self, x, y): - closest_lm, closest_distance = self.get_closest_landmark(x, y) - second_closest_distance = 10 - second_closest_lm = 0 - for key, coords in self.landmark_map.items(): - distance = self.landmark_distance(x, y, coords) - if (distance < second_closest_distance and not(key == closest_lm)): - second_closest_distance = distance - second_closest_lm = key - - return second_closest_lm, second_closest_distance - - def initialization_Random_Dataset(self, duration): - print ('******** Random Initialization Started ********') - self.generate_random_landmark_map()# - - self.num_robots = len(self.dataset_labels) - self.measurement_data = [[] for i in range(self.num_robots)] - self.odometry_data = [[] for i in range(self.num_robots)] - self.groundtruth_data = [[] for i in range(self.num_robots)] - self.time_arr = {'odometry': [[] for i in range(self.num_robots)], 'measurement': [[] for i in range(self.num_robots)], 'groundtruth': [[] for i in range(self.num_robots)]} - - self.duration = duration # some more time to avoid index error - self.start_time_arr = [] - - - #finding the starting time: - self.start_time = (1e9) - self.end_time = self.start_time + self.duration - print('Staring time: ', self.start_time) - - #finding starting states: - self.starting_states = {} - robot_movement_dict = {} - for i, label in enumerate(self.dataset_labels): - robot_num = str(label) - - #generate the position starts here - #x_pos, y_pos = get_pos_within_rad(5) - x_pos, y_pos = 0.0, 0.0 - orientation = 0#random.uniform(-3.1415, 3.1415) - - self.starting_states[label] = [self.start_time, x_pos, y_pos, orientation] - print('Starting states: ') - print(self.starting_states) - - # - for i, label in enumerate(self.dataset_labels): - robot_num = str(label) - - #give the robot a random starting position - velocity = random.uniform(0.0, 1.0) - inits = self.starting_states[label] - x_pos = inits[1] - y_pos = inits[2] - orientation = inits[3] - this_robot = Robot_Movement_Generator(x_pos, y_pos, orientation, robot_num) - robot_movement_dict[robot_num] = this_robot - - - measurement_file = open("random" + robot_num + "_measurement_x.dat", 'w') - - time = self.start_time - ticks = 0 - while (time < self.end_time):# this is a fixed number of iterations right now - #time moves at a constant step right now - time_delta = 0.0195 - time = time + time_delta - ticks = ticks + 1 - - velocity, orientation, x_pos, y_pos = this_robot.run_tick(time, time_delta) - - #these are biased values that are approximated by the robot from the groundtruth - odo_info = {'time':time, 'velocity':velocity, 'orientation':orientation} - self.odometry_data[i].append(odo_info) - self.time_arr['odometry'][i].append(time)# I think there may be a mistake here. This value isn't meant to be orientation, it should be angular velocity - - groundtruth_info = {'time':time, 'x_pos':x_pos, 'y_pos':y_pos, 'orientation':orientation} # groundtruth - self.groundtruth_data[i].append(groundtruth_info) - self.time_arr['groundtruth'][i].append(time) - - #choose a random landmark and calculate the distance from it - if ticks % 23 == 0: - closest_landmark, closest_distance = self.get_closest_landmark(x_pos, y_pos) - if closest_distance < 5: - measurement_range = closest_distance #+ random.gauss(0.0, .001) #TODO: find a legitimate way to choose this sigma - bearing = self.getLineAngle(x_pos, y_pos, self.landmark_map[closest_landmark][0], self.landmark_map[closest_landmark][1], orientation) - ##### - #experimental - #calculate where I think I am based on the measurment and then move to that - #experimental_meas_range = self.landmark_distance(x_pos, y_pos, self.landmark_map[closest_landmark]) - - ##### - meas_info = {'time':time, 'subject_ID':closest_landmark, 'measurment_range': measurement_range, 'bearing':bearing} - #meas_info = {'time':time, 'subject_ID':closest_landmark, 'measurment_range': 0, 'bearing':0} - print meas_info - #print x_pos, y_pos, self.landmark_map[closest_landmark][0], self.landmark_map[closest_landmark][1], bearing - measurement_file.write(str(time) + "\t" + str(closest_landmark) + "\t" + str(measurement_range) + "\t" + str(bearing) + "\n") - self.measurement_data[i].append(meas_info) - self.time_arr['measurement'][i].append(time) - """second_closest_landmark, second_closest_distance = self.get_second_closest_landmark(x_pos, y_pos) - if second_closest_distance < 3: - measurement_range = second_closest_distance #+ random.gauss(0.0, .001) #TODO: find a legitimate way to choose this sigma - bearing = self.getLineAngle(x_pos, y_pos, self.landmark_map[second_closest_landmark][0], self.landmark_map[second_closest_landmark][1], orientation) - print meas_info - meas_info = {'time':time, 'subject_ID':second_closest_landmark, 'measurment_range': measurement_range, 'bearing':bearing} - measurement_file.write(str(time) + "\t" + str(second_closest_landmark) + "\t" + str(measurement_range) + "\t" + str(bearing) + "\n") - self.measurement_data[i].append(meas_info) - self.time_arr['measurement'][i].append(time)""" - - - measurement_file.close() - this_robot.close_file() - self.data_trackers = {'odometry': np.ones((self.num_robots,), dtype=np.int),'measurement':np.ones((self.num_robots,), dtype=np.int), 'groundtruth':np.ones((self.num_robots,), dtype=np.int)} - # tracker for each robot for both each type of data to keep track of their location in the dataset - self.odo_data_trackers = np.ones((self.num_robots,), dtype=np.int) - self.dataset_data = {'odometry': self.odometry_data, 'measurement': self.measurement_data, 'groundtruth': self.groundtruth_data} - print ('******** Initialization Completed ********') - - return self.start_time, self.starting_states, self.dataset_data, self.time_arr - - - -class Robot_Movement_Generator: - def __init__(self, x, y, orientation, num): - self.robot_num = num - self.velocity = .1 - self.max_velocity = .1 - self.orientation = orientation - self.angular_velocity = .15 - self.max_angular_velocity = .15 - self.x = x - self.y = y - self.sensor_noise = 0#.25 - self.odometry_file = open("random" + self.robot_num + "_odometry.dat", 'w') - self.groundtruth_file = open("random"+ self.robot_num + "_groundtruth.dat", 'w') - - def close_file(self): - self.odometry_file.close() - self.groundtruth_file.close() - - def update_xy(self, time_delta): - #check if the update is in bounds and if so do it - temp_x = self.x + cos(self.orientation) * self.velocity * (time_delta) - temp_y = self.y + sin(self.orientation) * self.velocity * (time_delta) - if ((temp_x**2 + self.y**2)**.5 < 5) and ((self.y**2 + temp_y**2)**.5 < 5): - self.x = temp_x - self.y = temp_y - else: - self.velocity = 0 - return self.x, self.y - - - def update_angular_velocity(self): - self.angular_velocity = self.max_angular_velocity - return self.angular_velocity - - def update_velocity(self): - self.velocity = self.max_velocity - return self.velocity - - def update_orientation(self, time_delta): - self.orientation += self.update_angular_velocity() * time_delta - self.orientation = sanitize_angle(self.orientation) - return self.orientation - - - def run_tick(self, time, time_delta): - #add biasing here, after we've already computed an accurate groundtruth - v = self.update_velocity()+ random.gauss(0.0, self.sensor_noise) - o = self.update_orientation(time_delta)+ random.gauss(0.0, self.sensor_noise) - x, y = self.update_xy(time_delta) - - - self.odometry_file.write(str(time) + "\t" + str(v) + "\t\t" + str(self.angular_velocity) + "\n") - - self.groundtruth_file.write(str(time) + "\t" + str(self.x) + "\t" + str(self.y) + "\t" + str(self.orientation) + "\n") - return (v, o, x, y) - - - - -# test script -""" -rgf = open('random1_groundtruth.dat', 'r') -rof = open('random1_odometry.dat', 'r') -end_time=1000000100 -start_time = 1000000000 - -times = [] -x_poss = [] -y_poss = [] -orientations = [] - -times2 = [] -velocities = [] -orientations2 = [] -for line in rgf: - if line[0] != '#' and (end_time+2 >= float(line.split()[0]) >= start_time): - times.append(float(line.split()[0])) - x_poss.append(float(line.split()[1])) - y_poss.append(float(line.split()[2])) - orientations.append(float(line.split()[3])) - -for line in rof: - if line[0] != '#' and (end_time+2 >= float(line.split()[0]) >= start_time): - times2.append(float(line.split()[0])) - velocities.append(float(line.split()[1])) - orientations2.append(float(line.split()[2])) - -for i in range(len(rof)): - - -print orientations, orientations2""" \ No newline at end of file diff --git a/functions/dataset_manager/runtime_data_generation.py b/functions/dataset_manager/runtime_data_generation.py deleted file mode 100644 index 59010ef..0000000 --- a/functions/dataset_manager/runtime_data_generation.py +++ /dev/null @@ -1,78 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Feb 13 16:11:36 2018 - -@author: william -""" -from robot_parameters import * - -class RuntimeDataGenerator(): - '''For runtime self_generated data: propgation data and measurement data ''' - def init_(self): - pass - - def within_map(self, loc): - ''' - Parameters: - loc: array, shape = (2,1) - robot's location for [x, y] - Return: Boolean - True if within the map, False if not - ''' - center_point = [0,0] - if sqrt(pow(loc[0]-center_point[0], 2)+pow(loc[1]-center_point[1], 2)) > d_max: - return False - else: - return True - - - def generate_data(self, datatype, ): - - - def generate_propgation_data(self, current_loc, current_orientation): - ''' - To generate propagtion data randomly so that it will run within predefined map *assuming orientation is perfect - Parameters: - current_loc: array, shape = (2,1) - robot's current location for [x, y] - current_orientation: float - robot's current orientation in radian - Return: tuple, shape = (4,1) - it contain actual_v: float, actual velocity of the robot - meas_v: float, measured velocity of the robot which is actual veloctiy with measurement noises - orientation: float, updated robot's orientation in radians - actual_loc: array, shape = (2,1), actual location of the robot for [x, y] - ''' - [meas_v, meas_a_v] = [random.uniform(-max_v,max_v), random.uniform(-max_a_v, max_a_v)] - actual_v = meas_v + random.normal(0, sqrt(var_u_v)) - pre_update_position = [current_loc[0] + cos(current_orientation)*actual_v*delta_t, current_loc[1] + sin(current_orientation)*actual_v*delta_t] - - while(not within_map(pre_update_position)): - [meas_v, meas_a_v] = [random.uniform(-max_v,max_v), random.uniform(-max_a_v, max_a_v)] - actual_v = meas_v + random.normal(0, sqrt(var_u_v)) - pre_update_position = [current_loc[0] + cos(current_orientation)*actual_v*delta_t, current_loc[1] + sin(current_orientation)*actual_v*delta_t] - - orientation = current_orientation + meas_a_v*delta_t - actual_loc = pre_update_position - return [actual_v, meas_v, orientation, actual_loc] - - def generate_measurement_data(self, observer_loc, observee_loc, observer_orientation): - ''' - To generate propagtion data randomly so that it will run within predefined map *assuming orientation is perfect - Parameters: - observer_loc: array, shape = (2,1) - observer's current location for [x, y] - observee_loc: array, shape = (2,1) - observee's current location for [x, y] - observer_orientation: float - observer's current orientation in radian - Return: tuple, shape = (2,1) - it contain dis: float, actual velocity of the robot - beaing: float, observer's bearing toward observee - ''' - delta_x = observer_loc[0] - observee_loc[0] - delta_y = observer_loc[1] - observee_loc[1] - dis = sqrt(delta_y*delta_y + delta_x*delta_x) + random.normal(0, sqrt(var_dis)) - bearing = atan2(delta_y, delta_x) + random.normal(0, sqrt(var_angle)) - observer_orientation - return [dis, bearing] diff --git a/functions/demo_template.py b/functions/demo_template.py deleted file mode 100644 index 619c0bd..0000000 --- a/functions/demo_template.py +++ /dev/null @@ -1,28 +0,0 @@ -from matplotlib import pyplot as plt -import numpy as np -import mpl_toolkits.mplot3d.axes3d as p3 -from matplotlib import animation - -fig = plt.figure() - -# create the parametric curve -t=np.arange(0, 2*np.pi, 2*np.pi/100) -x=np.cos(t) -y=np.sin(t) -z=t/(2.*np.pi) - -# create the first plot -point, = ax.plot([x[0]], [y[0]], 'o') -ax.legend() -ax.set_xlim([-1.5, 1.5]) -ax.set_ylim([-1.5, 1.5]) -ax.set_zlim([-1.5, 1.5]) - -# second option - move the point position at every frame -def update_point(n): - point.set_data(np.array([n+1, n-1])) - return point - -ani=animation.FuncAnimation(fig, update_point, 99) - -plt.show() \ No newline at end of file diff --git a/functions/full_test9.gif b/functions/full_test9.gif deleted file mode 100644 index dc854071fda15ffbdb89b2c04c7aa465ef5af696..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1820 zcmV+%2jlohNk%w1VXy>*1MmO<00000001HR1ONj7001li000261cU<s0{(=LsmtvT zqnxzbi?iOm`wxcVNS5Y_rs~SJ?hD8AOxN~}=lag~{tpZahs2`sh)gP%%%<}RjY_A~ zs`ZM^YPa03_X`e-$K<m4j83c9?6&(2kIU!uy8VvN>-YS={|^`_I7nD%c!-#&xX9S( z_y`#(IZ0V*d5M{+xyjk-`3V{-I!an<dWxE=y2{$>`U)E>J4;(@dyAW^yUW|_`wJW_ zJWO0{e2ko|yv*F}{0to}JxyJ0eT|*1z0KY2{S6*2K2Bb4evY25zRuq6{th26KTlt8 ze~+K9zt7+A{|_*rz<~q{8a#+Fp~8g>8#;UlF`~qY6f0W1h%uwa{*4?vdi)47q{xvZ zOPV~1GNsCuEL*yK2{We5nKWzKyoocX&Ye7a`uqtrsL-KAiyA$OG^x_1Oq)7=3N@<K zsZ^_4y^1xf)~#H-di@GEtk|(+%bGolHm%yVY}>kh3pcLZxpeE=y^A-m-o1SL`uz(y zu;9Uj3mZO+II-fzj2k<C3^}so$&@QwzKl7u=FOZtd;SbMwCK^KOPfB8I<@N6tXsQ& z4Li2%*|clhzKuJ#?%lk5`~D3)xbWe`iyJ?VJh}4a%$qxZ4n4Z`>C~%Rzm7e-_U+ue zd;bnTy!i3t%bP!sKE3+&?AyD44?n*A`Sk1CzmGq^{{8&^`}_Y7V1NP+NML~m9*AIq z3NFZCgAP6jVT2M+NMVH*UWj3a8g9s8haP?iVu&J+NMea5o`_<KDz3<4i!QzhV~jG+ zNMnsQ-iTw4I_}70k3RkgWROA*No0{m9*Ja<N-oJ{lTJPfWt37*NoAE*UWsLvT5ic@ zmtKAeW|(4*NoJX5o{46fYOcv<n{K`dXPk1*NoSpQ-ic?PdhW?*pML%cXrO`)N@$^m z9*St9iZ04%qmDibX{3@)N@=B*UW#d^nr_Nzr=ETaYN(=)N@}U5o{DO!s;<gvtFFEZ zYpk-)N^7mQ-im9ky6(zrufF~YY_P%(OKh>m9*h2LvdS*YY_rZj3vIO0PD^dI)?SNk zw%TsXZMWWj3vRgLj!SO2=AMggy6UdWZoBTj3vayg&P#8-_TGzczWVOVZ@>Qj3vj>! z4@_{u1|N)Y!U`|UaKjEi3~|H~PfT&e7GI2U#u{(TamOBi406aKk4$pOCZCLQ$||qS za?38i40Fsf&rEa8Hs6eM&N}bRbI(5i40O;!4^4E@MjwrI(n>GQbkj~h4RzE~Pfc~z zR$q;E)>?1Pb=O{h4R+XKk4<*jW}l6A+G?-OcH3^h4R_pf&rNsTcHfP6-g@uNci(>h z4S3*!4^DXDh98c2;)*ZMc;k*g4teB~PySAM<(6NLdFGmL&Uxpae-3)+qK{5`>878K zdg`jL&U)*vzYcrsvd>O??Y7^Jd+xgL&U^2^{|<cc!Vgb;@x~vIeDcaK&wTUFKM#HM z(oau)_10gHefHXK&wcmae-D26;*U>$`R1RGe){UK&wl&vzYl->^3P9y{r2CFfByRK z&wu~^{|~?b3UGh~ET91oh`<CYaDfbLpaUNW!3aulf)uQv1uuxf3~F$L9PFS6KM2AQ zig1J^ETIWch{6=AaD^;vp$lIK!x+kNhBU094R46U9O`g~JnW$le+a}N3UP=;ETR#Q zh{Pl+afwW9q7$D8#VAT~id3wk{uQr?#Vl%Zi(KrY7rzL`Fp6=EWGtf@&xpn}s&S2M zY@-|B2*)_eagKDXqaE*v$2{tBk9_Q-AO8r*Knik@ge;^X4~fV`DsqvGY@{O}3CT!G za*~v+q$Mwj$xLc;lbr0NCqD_wP>OPtq%5T=Pl?J@s&bXAY^5t-3Cmc@a+b8Lr7drX z%UtSmm%QwyFMkQlU<z}X#4M&UkBQ7=Ds!34Y^F1x3C(CqbDGqwrZumL&1`COo80WC zH@^waaEf!B<SeH-&xy`-s&k#}Y^OWl3D0=SbDs39r#<h9&wT20pZx5nKmQ5PfC_Y= z1TCmR4~o!)Ds-U?ZKy-}9}3ZkN_3(Wt*AvWiqVW}bfX;Ys7F5v(vXUDq$Dk=Nl%K> zl&W;4EN!VvUkcNh%5<hQt*K3Kiqo9xbf-M+sZW0j)SwD=s6;KQQICq$q$+i(Ol_)D zp9<BeN_DDKt*TY8iq))Yb*o(Ms#m`X*073otYj^#S<i~rw5oNjY;CJs-wM~b%5|=E zt*c${ir2jAb+3Hwt6%>L*uV;Qu!JqFVGoPg#42{NjBTuA9}C&YN_Mi8t*m7)i`mR- zcC(!AtY<$9+R%!2w4^PqX-|vV)T(y1tZl7pUklsV%67K2t*vcui`(4lcDKCkt#5w| K+~9_!0028Y-LLZi diff --git a/functions/localization_algos/ekf_gs_sci.py b/functions/localization_algos/ekf_gs_sci.py deleted file mode 100644 index 373cf9b..0000000 --- a/functions/localization_algos/ekf_gs_sci.py +++ /dev/null @@ -1,211 +0,0 @@ -import numpy as np -from numpy import matrix -from math import cos, sin, atan2, sqrt - -def rot_mtx(theta): - return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) - -class EKF_gs_sci(): - def __init__(self, algo_name): - self.algo_name = algo_name - - def state_veriance_init(self, num_robots): - sigma_d = 0.01*np.matrix(np.identity(2*num_robots), dtype = float) - sigma_i = 0.01*np.matrix(np.identity(2*num_robots), dtype = float) - state_variance = [sigma_d, sigma_i] - return state_variance - - def algo_update(self, robot_data, update_type, sensor_data): - #return [s, orinetations, sigma_s] - [s, orinetations, sigma_s, index] = robot_data - [measurement_data, sensor_covariance] = sensor_data - if update_type == 'propagation update': - return self.propagation_update(s, orinetations, sigma_s, measurement_data, sensor_covariance, index) - elif update_type == 'landmark observation update': - return self.absolute_obser_update(s, orinetations, sigma_s, measurement_data, sensor_covariance, index) - elif update_type == 'relative observation update': - return self.relative_obser_update(s, orinetations, sigma_s, measurement_data, sensor_covariance, index) - elif update_type == 'communication': - return self.communication(s, orinetations, sigma_s, measurement_data, index) - else: - print "invalid update" - - def calculate_trace_state_variance(self, state_variance, robot_index): - [sigma_d, sigma_i] = state_variance - total_sigma = sigma_d + sigma_i - #j = robot_index*2 - #return np.trace(total_sigma[j:j+1, j:j+1]) - return np.trace(total_sigma) - - - def propagation_update(self, s, orinetations, sigma_s, input_data, sigma_odo, index): - num_robots = len(s)/2 - delta_t = input_data[0] - v = input_data[1] - orinetations[index] = input_data[2] - self_theta = orinetations[index] - - i = 2*index - - s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x - s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y - - # covariance update - [sigma_d, sigma_i] = sigma_s - - G = rot_mtx(self_theta) - W = sigma_odo - W[0,0] = sigma_odo[0,0]*delta_t*delta_t #var_v - W[1,1] = sigma_odo[1,1]*delta_t*delta_t*v*v #var_theta - Q_ii = G * W * G.getT() - - for j in range(num_robots): - jj = 2*j - - if j ==index: - total_sigma = sigma_i[jj:jj+2, jj:jj+2] + sigma_d[jj:jj+2, jj:jj+2] - sigma_i[jj:jj+2, jj:jj+2] += Q_ii - total_sigma += Q_ii - sigma_d[jj:jj+2, jj:jj+2] = total_sigma - sigma_i[jj:jj+2, jj:jj+2] - - else: - total_sigma = sigma_i[jj:jj+2, jj:jj+2] + sigma_d[jj:jj+2, jj:jj+2] #repeated - sigma_i[jj:jj+2, jj:jj+2] += delta_t*delta_t*W[1,1]*np.identity(2) #why only consider var_theta?? - total_sigma += delta_t*delta_t*W[1,1]*np.identity(2) - sigma_d[jj:jj+2, jj:jj+2] = total_sigma - sigma_i[jj:jj+2, jj:jj+2] - - sigma_s = [sigma_d, sigma_i] - - return [s, orinetations, sigma_s] - - - - def absolute_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): - - num_robots = len(s)/2 - self_theta = orinetations[index] - - i = 2*index - landmark_loc = input_data[0] - meas_range = input_data[1] - bearing = input_data[2] - - z = np.matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() # actual measurement - - # for estimated measurement - delta_x = landmark_loc[0] - s.item(i,0) - delta_y = landmark_loc[1] - s.item(i+1,0) - #rot_mtx = np.matrix([[cos(self_theta), -sin(self_theta)], [sin(self_theta), cos(self_theta)]]) - - z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) - - H_i = np.matrix(np.zeros((2,2*num_robots))) - H_i[0, i] = -1 - H_i[1, i+1] = -1 - H = rot_mtx(self_theta).getT()*H_i - - sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range - - V = rot_mtx(bearing) - R = V*sigma_ob*V.getT() #sigma_z - - - [sigma_d, sigma_i] = sigma_s - total_sigma = sigma_i + sigma_d - sigma_invention = H * total_sigma * H.getT() + R - kalman_gain = total_sigma*H.getT()*sigma_invention.getI() - - s = s + kalman_gain*(z - z_hat) - - total_sigma = total_sigma - kalman_gain*H*total_sigma - sigma_i = (np.identity(num_robots*2) - kalman_gain*H) * sigma_i * (np.identity(num_robots*2) - kalman_gain*H).getT() + kalman_gain * R * kalman_gain.getT() - sigma_d = total_sigma - sigma_i - - sigma_s = [sigma_d, sigma_i] - - - - return [s, orinetations, sigma_s] - - - def relative_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): - #i obser j - - num_robots = len(s)/2 - self_theta = orinetations[index] - - i = index * 2 - obser_index = input_data[0] - meas_range = input_data[1] - bearing = input_data[2] - j = obser_index * 2 - - z = np.matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() # actual measurement - - H_ij = np.zeros((2,2*num_robots)) - H_ij[0, i] = -1 - H_ij[1, i+1] = -1 - H_ij[0, j] = 1 - H_ij[1, j+1] = 1 - - H = rot_mtx(self_theta).getT()*H_ij - - z_hat = H * s - - sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range - - V = rot_mtx(bearing) - R = V*sigma_ob*V.getT() #sigma_z - - - [sigma_d, sigma_i] = sigma_s - total_sigma = sigma_i + sigma_d - - e = 0.83# (iii+1)*0.01 - - p_1 = (1/e) * sigma_d + sigma_i - p_2 = (1/(1-e)) * H * (sigma_i + sigma_d) * H.getT() + R - sigma_invention = H * p_1 * H.getT() + p_2 - kalman_gain = p_1 *H.getT()*sigma_invention.getI() - - s = s + kalman_gain*(z - z_hat) - - total_sigma = (np.identity(num_robots*2)-kalman_gain*H) * p_1 - sigma_i = (np.identity(num_robots*2) - kalman_gain*H) * sigma_i * (np.identity(num_robots*2) - kalman_gain*H).getT() + kalman_gain * R * kalman_gain.getT() - sigma_d = total_sigma - sigma_i - - - sigma_s = [sigma_d, sigma_i] - - return [s, orinetations, sigma_s] - - - - - def communication(self, s, orinetations, sigma_s, input_data, index): - num_robots = len(s)/2 - - [sigma_d, sigma_i] = sigma_s - - comm_robot_s = input_data[0] - comm_robot_sigma_s = input_data[1] - [comm_robot_sigma_d, comm_robot_sigma_i] = comm_robot_sigma_s - - e = 0.5# (iii+1)*0.01 - - p_1 = (1/e) * sigma_d + sigma_i - p_2 = (1/(1-e)) * comm_robot_sigma_d + comm_robot_sigma_i - - kalman_gain = p_1 * (p_1+p_2).getI() - s = s + kalman_gain * (comm_robot_s - s) - - - i_mtx_tot = np.matrix(np.identity(num_robots*2)) - total_sigma = (i_mtx_tot - kalman_gain) * p_1 - sigma_i = (i_mtx_tot - kalman_gain) * sigma_i * (i_mtx_tot - kalman_gain).getT() + kalman_gain * comm_robot_sigma_i * kalman_gain.getT() - sigma_d = total_sigma - sigma_i - - - sigma_s = [sigma_d, sigma_i] - - return [s, orinetations, sigma_s] diff --git a/functions/localization_algos/ekf_gs_sci2.py b/functions/localization_algos/ekf_gs_sci2.py index 7adfca5..482547d 100644 --- a/functions/localization_algos/ekf_gs_sci2.py +++ b/functions/localization_algos/ekf_gs_sci2.py @@ -124,7 +124,7 @@ class EKF_GS_SCI2(ekf_algo_framework): [s, orinetations, sigma_s, index] = robot_data [measurement_data, sensor_covariance] = sensor_data sigma_ob = sensor_covariance - '''s + num_robots = int(len(s)/2) self_theta = orinetations[index] @@ -173,15 +173,16 @@ class EKF_GS_SCI2(ekf_algo_framework): sigma_d = total_sigma - sigma_i sigma_s = [sigma_d, sigma_i] - ''' + return [s, orinetations, sigma_s] def communication(self, robot_data, sensor_data): - + [s, orinetations, sigma_s, index] = robot_data [comm_data, comm_variance] = sensor_data [comm_robot_s, orinetations, comm_robot_sigma_s]=comm_data - ''' + num_robots = int(len(s)/2) + e = 0.9 # (iii+1)*0.01 [sigma_d, sigma_i] = sigma_s @@ -195,10 +196,10 @@ class EKF_GS_SCI2(ekf_algo_framework): i_mtx = np.identity(num_robots*2) total_sigma = (i_mtx.copy() - kalman_gain) * p_1 - sigma_i = (i_mtx - kalman_gain*H) * sigma_i * (i_mtx.copy() - kalman_gain*H).getT() + kalman_gain * sigma_z * kalman_gain.getT() + sigma_i = (i_mtx - kalman_gain) * sigma_i * (i_mtx.copy() - kalman_gain).getT() + kalman_gain * comm_robot_sigma_i * kalman_gain.getT() sigma_d = total_sigma - sigma_i sigma_s = [sigma_d, sigma_i] - ''' + return [s, orinetations, sigma_s] diff --git a/functions/localization_algos/simple_ekf_meas_only.py b/functions/localization_algos/simple_ekf_meas_only.py deleted file mode 100644 index 0196aa6..0000000 --- a/functions/localization_algos/simple_ekf_meas_only.py +++ /dev/null @@ -1,100 +0,0 @@ -import numpy as np -import math -from math import cos, sin, atan2, sqrt, pi -from numpy import dot, arctan -from numpy.linalg import inv -import scipy.linalg as linalg -#import parameters -from localization_algo_framework import ekf_algo_framework -#from sympy import * -from collections import deque - -def rot_mtx(theta): - return np.matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) - - -class Trilateration(ekf_algo_framework): - - def __init__(self, algo_name): - ekf_algo_framework.__init__(self, algo_name) - self.queue_meas_objs = deque([]) - self.dict_meas = {} - - def state_init(self): - return 0.01*np.matrix(np.identity(2), dtype = float) - - def state_variance_init(self, num_robots): - return 0.01*np.matrix(np.identity(2), dtype = float) - - def calculate_trace_state_variance(self, robot_data): - [s, orinetations, sigma_s, index] = robot_data - trace_state_var = np.trace(sigma_s) - return trace_state_var - - def propagation_update(self, robot_data, sensor_data): - [s, orinetations, sigma_s, index] = robot_data - sigma_s = sigma_s+1 - return [s, orinetations, sigma_s] - - - - def absolute_obser_update(self, robot_data, sensor_data): - [s, orinetations, sigma_s, index] = robot_data - [measurement_data, sensor_covariance] = sensor_data - sigma_ob = sensor_covariance - num_robots = int(len(s)/2) - self_theta = orinetations[index] - - landmark_loc = measurement_data[0] - meas_range = measurement_data[1] - bearing = measurement_data[2] - meas_time = measurement_data[3] - obj_id = measurement_data[4] - i = 2*index - - self.dict_meas[obj_id] = [landmark_loc, meas_range] - if obj_id not in self.queue_meas_objs: - self.queue_meas_objs.append(obj_id) - - if len(self.queue_meas_objs) > 3: - self.queue_meas_objs.popleft() - - print("#Current queue of objects: ", self.queue_meas_objs) - - if len(self.queue_meas_objs) == 3: - - obj1 = self.queue_meas_objs[0] - obj2 = self.queue_meas_objs[1] - obj3 = self.queue_meas_objs[2] - - - [p1, r1] = self.dict_meas[obj1] - [p2, r2] = self.dict_meas[obj2] - [p3, r3] = self.dict_meas[obj3] - print(self.dict_meas) - - x1 = p1[0] - y1 = p1[1] - - x2 = p2[0] - y2 = p2[1] - - x3 = p3[0] - y3 = p3[1] - - A = (-2*x1+2*x2) - B = (-2*y1+2*y2) - C = (r1*r1-r2*r2-x1*x1+x2*x2-y1*y1+y2*y2) - - D = (-2*x2-2*x3) - E = (-2*y2+2*y3) - F = (r2*r2-r3*r3-x2*x2+x3*x3-y2*y2+y3*y3) - - x = (C*E-F*B)/(E*A-B*D) - y = (C*D-A*F)/(B*D-A*E) - - s[i, 0] = x - s[i+1, 0] = y - print("#Est. Loc: ", [x, y]) - - return [s, orinetations, sigma_s] \ No newline at end of file diff --git a/functions/localization_algos/centralized_ekf2.py b/functions/localization_algos/under_developing_algos/centralized_ekf2.py similarity index 100% rename from functions/localization_algos/centralized_ekf2.py rename to functions/localization_algos/under_developing_algos/centralized_ekf2.py diff --git a/functions/localization_algos/ekf_gs_ci2.py b/functions/localization_algos/under_developing_algos/ekf_gs_ci2.py similarity index 100% rename from functions/localization_algos/ekf_gs_ci2.py rename to functions/localization_algos/under_developing_algos/ekf_gs_ci2.py diff --git a/functions/localization_algos/ekf_ls_bda2.py b/functions/localization_algos/under_developing_algos/ekf_ls_bda2.py similarity index 100% rename from functions/localization_algos/ekf_ls_bda2.py rename to functions/localization_algos/under_developing_algos/ekf_ls_bda2.py diff --git a/functions/localization_algos/ekf_ls_ci2.py b/functions/localization_algos/under_developing_algos/ekf_ls_ci2.py similarity index 100% rename from functions/localization_algos/ekf_ls_ci2.py rename to functions/localization_algos/under_developing_algos/ekf_ls_ci2.py diff --git a/functions/random_walk.py b/functions/random_walk.py deleted file mode 100644 index b4bf26b..0000000 --- a/functions/random_walk.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Aug 15 13:31:34 2018 - -@author: william -""" - -import numpy as np -import matplotlib.pyplot as plt -import mpl_toolkits.mplot3d.axes3d as p3 -import matplotlib.animation as animation - -# Fixing random state for reproducibility -np.random.seed(19680801) - - -def Gen_RandLine(length, dims=2): - """ - Create a line using a random walk algorithm - - length is the number of points for the line. - dims is the number of dimensions the line has. - """ - lineData = np.empty((dims, length)) - lineData[:, 0] = np.random.rand(dims) - for index in range(1, length): - # scaling the random numbers by 0.1 so - # movement is small compared to position. - # subtraction by 0.5 is to change the range to [-0.5, 0.5] - # to allow a line to move backwards. - step = ((np.random.rand(dims) - 0.5) * 0.1) - lineData[:, index] = lineData[:, index - 1] + step - - return lineData - - -def update_lines(num, dataLines, lines): - for line, data in zip(lines, dataLines): - # NOTE: there is no .set_data() for 3 dim data... - line.set_data(data[0:2, :num]) - line.set_3d_properties(data[2, :num]) - return lines - -# Attaching 3D axis to the figure -fig = plt.figure() -ax = p3.Axes3D(fig) - -# Fifty lines of random 3-D lines -data = [Gen_RandLine(25, 3) for index in range(50)] - -# Creating fifty line objects. -# NOTE: Can't pass empty arrays into 3d version of plot() -lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data] - -# Setting the axes properties -ax.set_xlim3d([0.0, 1.0]) -ax.set_xlabel('X') - -ax.set_ylim3d([0.0, 1.0]) -ax.set_ylabel('Y') - -ax.set_zlim3d([0.0, 1.0]) -ax.set_zlabel('Z') - -ax.set_title('3D Test') - -# Creating the Animation object -line_ani = animation.FuncAnimation(fig, update_lines, 25, fargs=(data, lines), - interval=50, blit=False) -plt.show() \ No newline at end of file diff --git a/functions/ros_compatibility.py b/functions/ros_compatibility.py index 0f31e38..d32cc35 100644 --- a/functions/ros_compatibility.py +++ b/functions/ros_compatibility.py @@ -20,35 +20,36 @@ from pprint import pprint # load algorithms sys.path.append(os.path.join(os.path.dirname(__file__), "localization_algos")) -from centralized_ekf2 import Centralized_EKF2 -from simple_ekf_meas_only import Trilateration +from centralized_ekf import Centralized_EKF from simple_ekf import Simple_EKF from ekf_ls_bda import EKF_LS_BDA from ekf_gs_bound import EKF_GS_BOUND -from ekf_gs_ci2 import EKF_GS_CI2 +from ekf_gs_ci import EKF_GS_CI +from ekf_gs_sci2 import EKF_GS_SCI2 #dataset_path = '/home/william/catkin_ws/ros_colo_dataset/' #dataset_path = '/home/william/UTIAS-dataset/MRCLAM_Dataset3/' #dataset_path = '/home/william/full_tests/full_test9/' compname = getpass.getuser() -dataset_path = "/home/"+ compname +"/full_tests/full_test_v2_5/" +dataset_path = "/home/"+ compname +"/full_tests/full_test_v3_6/" -dataset_labels = [1,2] -duration = 240 # duration for the simulation in sec +dataset_labels = [1,2,3] +duration = 300 # duration for the simulation in sec testing_dataset = Dataset('testing') start_time, starting_states, dataset_data, time_arr = testing_dataset.load_MRCLAMDatasets(dataset_path, dataset_labels, duration, delay_start = 0) -loc_algo = Simple_EKF('algo') +loc_algo = EKF_GS_SCI2('algo') robot = RobotSystem('robot', dataset_labels, loc_algo, distr_sys = True) -sim = SimulationManager('PE-CoLo Simple_EKF') +sim = SimulationManager('PE-CoLo EKF_GS_SCI2') state_recorder = StatesRecorder('recorder',dataset_labels) sim.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder, simple_plot = True) -''' + analyzer = Analyzer('analyzer', dataset_labels) loc_err_per_run, trace_per_run, t_arr = analyzer.calculate_loc_err_and_trace_state_variance_per_run(state_recorder, plot_graphs = True) robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) data_in_time_order = state_recorder.get_data_in_time_order() update_in_time_order = state_recorder.get_updata_type_in_time_order() -''' +update_type_arr = state_recorder.get_update_type_arr() +recorded_data = state_recorder.get_recorded_data() #recorded_dataline = [time, robot_label, est_x_pos, est_y_pos, trace_state_var, gt_x_pos, gt_y_pos, loc_err] diff --git a/functions/ros_demo.py b/functions/ros_demo.py index 407cea5..7f43535 100644 --- a/functions/ros_demo.py +++ b/functions/ros_demo.py @@ -28,16 +28,16 @@ from ekf_gs_ci2 import EKF_GS_CI2 #dataset_path = '/home/william/catkin_ws/ros_colo_dataset/' #dataset_path = '/home/william/UTIAS-dataset/MRCLAM_Dataset3/' compname = getpass.getuser() -dataset_path = "/home/"+ compname +"/full_tests/full_test_v2_5/" +dataset_path = "/home/"+ compname +"/full_tests/full_test_v3_2/" dataset_labels = [1, 2] duration = 240 # duration for the simulation in sec testing_dataset = Dataset('testing') start_time, starting_states, dataset_data, time_arr = testing_dataset.load_MRCLAMDatasets(dataset_path, dataset_labels, duration, delay_start = 0) -loc_algo = Centralized_EKF('algo') -robot = RobotSystem('robot', dataset_labels, loc_algo, distr_sys = False) +loc_algo = Simple_EKF('algo') +robot = RobotSystem('robot', dataset_labels, loc_algo, distr_sys = True) -sim = SimulationManager('Centralized_EKF2') +sim = SimulationManager('Simple_EKF') state_recorder = StatesRecorder('recorder',dataset_labels) sim.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder, simple_plot = False) diff --git a/functions/simulation_process/state_recorder.py b/functions/simulation_process/state_recorder.py index d428f52..559fb1d 100644 --- a/functions/simulation_process/state_recorder.py +++ b/functions/simulation_process/state_recorder.py @@ -67,10 +67,11 @@ class StatesRecorder(): print(updata_type) print('neg trace: ', recorded_dataline) - if(loc_err >=1 and updata_type == 'landmark observation'): + if(loc_err >= 1): print(updata_type) - print('>1 loc err: ',recorded_dataline) - + print('>1 m loc err: ',recorded_dataline) + print(req.get_message()) + #print(recorded_dataline) self.data_in_time_order.append(recorded_dataline) self.updata_type_in_time_order.append(updata_type) diff --git a/functions/test_random.py b/functions/test_random.py deleted file mode 100644 index d25eae6..0000000 --- a/functions/test_random.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -author: Cade -""" - -import os, sys -# sys.path.insert(0, 'Simulation-Environment-for-Cooperative-Localization/functions') -sys.path.append(os.path.join(os.path.dirname(__file__), ".")) -os.chdir(os.path.join(os.path.dirname(__file__), ".")) -print os.getcwd() - -from dataset_manager.existing_dataset import Dataset -from simulation_process.sim_manager import SimulationManager -from robots.robot_system import RobotSystem -from simulation_process.state_recorder import StatesRecorder -from data_analysis.data_analyzer import Analyzer -from data_analysis.realtime_plot import animate_plot - -# load algorithms -#sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/localization_algos') -sys.path.append(os.path.join(os.path.dirname(__file__), "localization_algos")) -from centralized_ekf import Centralized_EKF ## this is guaranteed working. always converges -from ekf_ls_bda import EKF_LS_BDA - -#need to verify these -from ekf_ls_ci import EKF_LS_CI -from ekf_gs_ci import EKF_GS_CI - - -def tf(i): - return i + 0.0195 - - -dataset_path = '' - -dataset_labels = [1]#[1,2,3,4,5] -duration = 100 # duration for the simulation in sec -testing_dataset = Dataset('rand', dataset_path, dataset_labels) -start_time, starting_states, dataset_data, time_arr = testing_dataset.initialization_Random_Dataset(duration) - -''' -loc_algo = EKF_LS_CI('ls ci') -robot = RobotSystem('r', dataset_labels, loc_algo, distr_sys = False) - -''' -loc_algo = Centralized_EKF('cent_ekf') -robot = RobotSystem('robot me', dataset_labels, loc_algo, distr_sys = True) - - -sim = SimulationManager('sim') -state_recorder = StatesRecorder('sr',dataset_labels) -analyzer = Analyzer('analyzer', dataset_labels) - -print "robot ", robot -print "dataset labels ", dataset_labels -print "testing dataset", testing_dataset - -sim.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder, simple_plot = True) - -loc_err_per_run, trace_per_run, t_arr = analyzer.calculate_loc_err_and_trace_state_variance_per_run(state_recorder) - -#robot_location_at_unit_time_interval -robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) - -get_robot_time_func = state_recorder.get_time_arr -le = state_recorder.get_loc_err_arr() -t_S = state_recorder.get_trace_sigma_s_arr() -animate_plot(dataset_labels, state_recorder, analyzer, get_robot_time_func, le, t_S) - - -# time label estimate-x-pos estimate-y-pos trace groundtruth_x groundtruth_y error \ No newline at end of file -- GitLab