diff --git a/CoLo-AT/README.md b/CoLo-AT/README.md new file mode 100755 index 0000000000000000000000000000000000000000..b2cb09ac33f70880c1d25a700cb5924eaf1d17b8 --- /dev/null +++ b/CoLo-AT/README.md @@ -0,0 +1,133 @@ +# LEMUR-CoLo-Sim + + + +## Synopsis + +Cooperative localization is still a challenging task for cooperative robot control. CoLo is a robotic simulation environment for cooperative localization or cooperative simultaneous localization and mapping (SLAM) using real world dataset[1] or simulated data. The goal of this project to let users to create and test their own algorithms with some existing algorithms. Compared with other popular robotic simulation environments[2], it has fewer dependencies and more convenient to add localization algorithms without worry much about the robot settings. Moreover, it is able to use both real world data and simulated data to test the robustness and liabilities of these algorithms, whereas other simulation environments use only simulated data. +https://drive.google.com/file/d/1NlUM2QXT_KfZkVOJu3Xsm8_QL8OSy6hz/view?usp=sharing +## Features + +Real world dataset +Flexible control on the real world dataset +Easy to add and modify algorithms +Preloaded with several existing algorithms +Modularized simulation environment +Basic statistical analytic tools for algorithms + + + +## Installation + +1. Clone this repository: + +``` +git clone git@git.uclalemur.com:billyskc/Simulation-Environment-for-Cooperative-Localization.git +``` + +## How to run the simulation environment + +1. Install all dependencies + +2. Create a python script for the environment(ex: test_simulation.py) +a. Create a dataset_manager with proper setting +b. Load a localization algorithm in to robots +c. Create simulation manager, recorder and analyzer in CoLo +d. Put all these part in simulation manager + +3. Run CoLo +In MAC terminal: +``` +python Localization_envir.py +``` + + + +## Available Aglorithm: +1. LS-Cen +2. LS-CI +3. LS-BDA +4. GS-SCI +5. GS-CI + + + +## Dataset: +UTIAS dataset: +- MRCLAM_Dataset1 +- MRCLAM_Dataset2 +- MRCLAM_Dataset3 +- MRCLAM_Dataset4 +- MRCLAM_Dataset5 +- MRCLAM_Dataset6 +- MRCLAM_Dataset7 +- MRCLAM_Dataset8 +- MRCLAM_Dataset9 +details info: [UTIAS Dataset](http://asrl.utias.utoronto.ca/datasets/mrclam/). + + + +## Authors + +Shengkang Chen +Cade Mallett +Kyle Wong + + + + +## Analytical Tool: + +Plots: +1. Estimation deviation error vs. time +2. Trace of state variance vs. time +3. Landmark observation vs. time +4. Relative observation vs .time + +Values: +1. Avg. location deviation errors +2. Avg. trace of state variance + +## How to add new localization algorithm to the environment +1. Create the algorithm in the predefined algorithm framework +``` +def propagation_update(self, robot_data, sensor_data): +[s, orinetations, sigma_s, index] = robot_data +... +return [s, orinetations, sigma_s] + +def absolute_obser_update(self, robot_data, sensor_data): +[s, orinetations, sigma_s, index] = robot_data +... +return [s, orinetations, sigma_s] + +def relative_obser_update(self, robot_data, sensor_data): +[s, orinetations, sigma_s, index] = robot_data +... +return [s, orinetations, sigma_s] + +def communication(self, robot_data, sensor_data): +[s, orinetations, sigma_s, index] = robot_data +... +return [s, orinetations, sigma_s] +``` +4. Add it to the algorithm library + + +## Files and folders includes within the project + +### Datasets Folder: +Store the UTIAS datasets which has odometry, measurement and groundtruth data for each robot + +### Functions Folder: +Contains all the python scripts +Folders: +simulation process +robots +requests +localization algos +dataset_manager +data_analysis + + + diff --git a/CoLo-AT/RunningCoLo.png b/CoLo-AT/RunningCoLo.png new file mode 100644 index 0000000000000000000000000000000000000000..dee957ebfe608ef64baef13cc76268f2d87845bd Binary files /dev/null and b/CoLo-AT/RunningCoLo.png differ diff --git a/CoLo-AT/__init__.py b/CoLo-AT/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/CoLo-AT/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/CoLo-AT/data_analysis/.DS_Store b/CoLo-AT/data_analysis/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 Binary files /dev/null and b/CoLo-AT/data_analysis/.DS_Store differ diff --git a/CoLo-AT/data_analysis/__init__.py b/CoLo-AT/data_analysis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/CoLo-AT/data_analysis/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/CoLo-AT/data_analysis/__init__.pyc b/CoLo-AT/data_analysis/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26489d0bba1beeb8dc793427dcb6ae25076e944f Binary files /dev/null and b/CoLo-AT/data_analysis/__init__.pyc differ diff --git a/CoLo-AT/data_analysis/__pycache__/__init__.cpython-35.pyc b/CoLo-AT/data_analysis/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b50e6bc72aa57d159806ba314e68b284a5e34db3 Binary files /dev/null and b/CoLo-AT/data_analysis/__pycache__/__init__.cpython-35.pyc differ diff --git a/CoLo-AT/data_analysis/__pycache__/__init__.cpython-36.pyc b/CoLo-AT/data_analysis/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c63692804c06816026ad58cdf8da4db3ee6706e3 Binary files /dev/null and b/CoLo-AT/data_analysis/__pycache__/__init__.cpython-36.pyc differ diff --git a/CoLo-AT/data_analysis/__pycache__/data_analyzer.cpython-35.pyc b/CoLo-AT/data_analysis/__pycache__/data_analyzer.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6de0632fa7a1a0d7e09c19fa612bd312bd0984 Binary files /dev/null and b/CoLo-AT/data_analysis/__pycache__/data_analyzer.cpython-35.pyc differ diff --git a/CoLo-AT/data_analysis/__pycache__/data_analyzer.cpython-36.pyc b/CoLo-AT/data_analysis/__pycache__/data_analyzer.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56fb8249090c574284edf5a20dfca7d00694b64c Binary files /dev/null and b/CoLo-AT/data_analysis/__pycache__/data_analyzer.cpython-36.pyc differ diff --git a/CoLo-AT/data_analysis/__pycache__/realtime_plot.cpython-35.pyc b/CoLo-AT/data_analysis/__pycache__/realtime_plot.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d9551659bc133328d33f6bc01e68622869d0d57 Binary files /dev/null and b/CoLo-AT/data_analysis/__pycache__/realtime_plot.cpython-35.pyc differ diff --git a/CoLo-AT/data_analysis/__pycache__/realtime_plot.cpython-36.pyc b/CoLo-AT/data_analysis/__pycache__/realtime_plot.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..366ecb8d007190f9aa9ccc357bae5995364e12c0 Binary files /dev/null and b/CoLo-AT/data_analysis/__pycache__/realtime_plot.cpython-36.pyc differ diff --git a/CoLo-AT/data_analysis/analysis.py b/CoLo-AT/data_analysis/analysis.py new file mode 100755 index 0000000000000000000000000000000000000000..786273deafb41835eeb456a10555dfed617cd6cc --- /dev/null +++ b/CoLo-AT/data_analysis/analysis.py @@ -0,0 +1,330 @@ + +import numpy as np +import math +import os + +def find_nearest_time_idx(array, start_idx, value): + #array[i][0] is the time + i = start_idx + if array[i]['time'] < value: + while array[i]['time'] < value: + i = i+1 + if array[i]['time']-value < value-array[i-1]['time']: + return i + else: + return i-1 + else: + return i + +class DataAnalyzer(): + def __init__(self, name): + self.name = name + + + def estimation_time_analysis_single(est_data): + #calcaulate the root-mean-square deviation (RMSD) between estimation and ground truth and the trace of state variance at each time step. + + robot_num = str(robot_num) + file="/Datasets/"+dataset_name+"/Robot"+robot_num+"_Groundtruth.dat" + path=os.getcwd()+file + groundtruth_file=open(path,'r+'); + + file="/Results_files/estimation_results/"+estimation_file_name+".txt" + path=os.getcwd()+file + est_result_file = open(path, "r+") + + file="/Results_files/analysis_results/"+analysis_result_name+".txt" + path=os.getcwd()+file + analysis_file = open(path, "w") + + + s_ground = groundtruth_file.readline() + s_ground = groundtruth_file.readline() + s_ground = groundtruth_file.readline() + s_ground = groundtruth_file.readline() + + s_ground = groundtruth_file.readline() + t_ground = float(s_ground.split()[0]) + + s_est = est_result_file.readline() + t_est = float(s_est.split()[0]) + + while(s_est): + s = [float(s_est.split()[1]), float(s_est.split()[2]), float(s_est.split()[3])] + + while t_ground <= t_est: + s_ground = groundtruth_file.readline() + t_ground = float(s_ground.split()[0]) + if t_ground >= t_est: + s_g = [float(s_ground.split()[1]), float(s_ground.split()[2])] + break + + err_sq =[(s_g[0]-s[0])*(s_g[0]-s[0]), (s_g[1]-s[1])*(s_g[1]-s[1])] + #err_sq[2] = min((s_g[2]-s[2])*(s_g[2]-s[2]), (s_g[2]+2*math.pi-s[2])*(s_g[2]+2*math.pi-s[2]), (s_g[2]-2*math.pi-s[2])*(s_g[2]-2*math.pi-s[2])) + err_sqrt = math.sqrt(sum(err_sq)) + err_x = s_g[1]-s[1] + analysis_file.write(str(t_est)+ '\t' + str(err_sqrt) + '\t' + str(s_est.split()[3]) + '\t' + str(err_x) +'\n') # s_est.split()[3] is the trace of sigma_S + + s_est = est_result_file.readline() + if(s_est): + t_est = float(s_est.split()[0]) + + groundtruth_file.close() + est_result_file.close() + analysis_file.close() + + return 1 + + + def estimation_time_analysis_multirobot(dataset_name, general_estimation_file_name, general_analysis_result_name, robot_list): + for i, robot in enumerate(robot_list): + robot_num = robot.get_label() + estimation_file_name = general_estimation_file_name +'%d' %robot_num + analysis_result_name = general_analysis_result_name + '%d' %robot_num + estimation_time_analysis_single(dataset_name, robot_num, estimation_file_name, analysis_result_name) + + + def avg_MSE_and_trace(analysis_result_name): + + #initialization for all the lists for plotting + dev_l_t = [] + dev_l_RMSE = [] + dev_l_trace = [] + + file="/Results_files/analysis_results/"+analysis_result_name+".txt" + path=os.getcwd()+file + with open(path,'r') as analysis_file: + s_dev = analysis_file.readline() + while (s_dev): + dev_l_t.append(float(s_dev.split( )[0])) + dev_l_RMSE.append(float(s_dev.split( )[1])) + dev_l_trace.append(float(s_dev.split( )[2])) + s_dev = analysis_file.readline() + + avg_RMSE = float(sum(dev_l_RMSE)) / max(len(dev_l_RMSE), 1) + avg_trace = float(sum(dev_l_trace)) / max(len(dev_l_trace), 1) + print '******Analysis Results******' + print 'Avg. RMSE: ' + str(avg_RMSE) + print 'Avg. trace of state covariance: ' + str(avg_trace) + print '*************\n' + + return [avg_RMSE, avg_trace] + + + + def average_correctness_analysis_per_run(dataset_name, dataset_labels, groundtruth_data, est_result_array): + # #calcaulate the avg. root-mean-square deviation (RMSD) between estimation and groundtruth, and the avg. trace of state variance in each time interval(1s) for all robots. + + num_robot = len(dataset_labels) + analysis_result_array = [[] for i in range(num_robot)] + + time_arr = [] + avg_RMSE_arr = [] + avg_trace_arr = [] + + analysis_file_arr = [] + + for i, robot_label in enumerate(dataset_labels): #analysis for each robot + j = 0 + g_idx = 0 + while j < len(est_result_array[i]): + est_t = est_result_array[i][j]['time'] + g_idx = find_nearest_time_idx(groundtruth_data[i], g_idx, est_t) + + gt_x_pos = groundtruth_data[i][g_idx]['x_pos'] + gt_y_pos = groundtruth_data[i][g_idx]['y_pos'] + est_x_pos = est_result_array[i][j]['x_pos'] + est_y_pos = est_result_array[i][j]['y_pos'] + sigma_s_trace = est_result_array[i][j]['sigma_s_trace'] + + err_sq =[(est_x_pos-gt_x_pos)*(est_x_pos-gt_x_pos), (est_y_pos-gt_y_pos)*(est_y_pos-gt_y_pos)] + err_sqrt = math.sqrt(sum(err_sq)) + + analysis_result = {'time':est_t, 'err_sqrt':err_sqrt, 'sigma_s_trace':sigma_s_trace} + analysis_result_array[i].append(analysis_result) + + #analysis_file_arr[i].write(str(est_t)+ '\t' + str(err_sqrt) + '\t' + str(sigma_s_trace) +'\n') + + j = j+1 + + start_time = est_result_array[0][0]['time'] + 1/2 + + + mean_err = [] + mean_time = [] + mean_STD = [] + + loc_dic = {} + for i, robot_label in enumerate(dataset_labels): + start_loc = 0 + loc_dic[i] = start_loc + + avg_trace = np.zeros(num_robot) + avg_err = np.zeros(num_robot) + + while(len(loc_dic) != 0): + for i, robot_label in enumerate(dataset_labels): + try: + loc = loc_dic[i] + except KeyError: + continue + try: + time_interval_start = analysis_result_array[i][loc]['time'] + except IndexError: + del loc_dic[i] + continue + + time_interval = 0.5 # in [s] for each time interval + num_analyized_result = 0 + sum_trace = 0 + sum_err = 0 + while analysis_result_array[i][loc]['time'] < time_interval_start + time_interval: + sum_trace += analysis_result_array[i][loc]['sigma_s_trace'] + sum_err += analysis_result_array[i][loc]['err_sqrt'] + num_analyized_result += 1 + + loc +=1 + + try: + analysis_result_array[i][loc] + except IndexError: + del loc_dic[i] + break + + loc_dic[i] = loc + avg_trace[i] = sum_trace/num_analyized_result + avg_err[i] = sum_err/num_analyized_result + + + all_robot_avg_err = sum(avg_err)/num_robot + all_robot_avg_trace = sum(avg_trace)/num_robot + avg_STD = math.sqrt(all_robot_avg_trace) + + mean_time.append(time_interval_start+1/2 - start_time) + mean_err.append(all_robot_avg_err) + mean_STD.append(avg_STD) + + return [mean_time, mean_err, mean_STD] + + def recording_data(algos_set, all_mean_time, all_mean_err, all_mean_STD, dataset_name): + + + for i, algo_name in enumerate(algos_set): + algo_analysis_file_name = dataset_name +'_algo_'+algo_name + file="/Results_files/analysis_results/"+algo_analysis_file_name+".txt" + path=os.getcwd()+file + algo_analysis_file = open(path, "w") + algo_analysis_file.write('#time[s] \t\t actual error[m] \t state STD[m] \n') + + for j in range(len(all_mean_time[i])): + algo_analysis_file.write(str(all_mean_time[i][j])+ '\t' + str(all_mean_err[i][j]) + '\t' + str(all_mean_STD[i][j]) +'\n') + + algo_analysis_file.close() + + + return 1 + + def average_correctness_analysis_per_run_w_bound(dataset_name, dataset_labels, groundtruth_data, est_result_array): + # #calcaulate the avg. root-mean-square deviation (RMSD) between estimation and groundtruth, and the avg. trace of state variance in each time interval(1s) for all robots. + + num_robot = len(dataset_labels) + analysis_result_array = [[] for i in range(num_robot)] + + time_arr = [] + avg_RMSE_arr = [] + avg_trace_arr = [] + + analysis_file_arr = [] + + for i, robot_label in enumerate(dataset_labels): #analysis for each robot + j = 0 + g_idx = 0 + while j < len(est_result_array[i]): + est_t = est_result_array[i][j]['time'] + g_idx = find_nearest_time_idx(groundtruth_data[i], g_idx, est_t) + + gt_x_pos = groundtruth_data[i][g_idx]['x_pos'] + gt_y_pos = groundtruth_data[i][g_idx]['y_pos'] + est_x_pos = est_result_array[i][j]['x_pos'] + est_y_pos = est_result_array[i][j]['y_pos'] + sigma_s_trace = est_result_array[i][j]['sigma_s_trace'] + bound = est_result_array[i][j]['bound'] + + + err_sq =[(est_x_pos-gt_x_pos)*(est_x_pos-gt_x_pos), (est_y_pos-gt_y_pos)*(est_y_pos-gt_y_pos)] + err_sqrt = math.sqrt(sum(err_sq)) + + analysis_result = {'time':est_t, 'err_sqrt':err_sqrt, 'sigma_s_trace':sigma_s_trace, 'bound': bound} + analysis_result_array[i].append(analysis_result) + #analysis_file_arr[i].write(str(est_t)+ '\t' + str(err_sqrt) + '\t' + str(sigma_s_trace) +'\n') + + j = j+1 + + start_time = est_result_array[0][0]['time'] + 1/2 + + + mean_err = [] + mean_time = [] + mean_STD = [] + mean_bound_sq = [] + + loc_dic = {} + for i, robot_label in enumerate(dataset_labels): + start_loc = 0 + loc_dic[i] = start_loc + + avg_trace = np.array(np.zeros(num_robot)) + avg_bound = np.array(np.zeros(num_robot)) + avg_err = np.array(np.zeros(num_robot)) + + + while(len(loc_dic) != 0): + for i, robot_label in enumerate(dataset_labels): + try: + loc = loc_dic[i] + except KeyError: + continue + try: + time_interval_start = analysis_result_array[i][loc]['time'] + except IndexError: + del loc_dic[i] + continue + + time_interval = 0.5 # in [s] for each time interval + num_analyized_result = 0 + sum_trace = 0 + sum_bound = 0 + sum_err = 0 + while analysis_result_array[i][loc]['time'] < time_interval_start + time_interval: + sum_trace += analysis_result_array[i][loc]['sigma_s_trace'] + sum_bound += analysis_result_array[i][loc]['bound'] + sum_err += analysis_result_array[i][loc]['err_sqrt'] + num_analyized_result += 1 + + loc +=1 + try: + analysis_result_array[i][loc] + except IndexError: + del loc_dic[i] + break + + loc_dic[i] = loc + avg_trace[i] = sum_trace/num_analyized_result + avg_bound[i] = sum_bound/num_analyized_result + avg_err[i] = sum_err/num_analyized_result + + + all_robot_avg_err = sum(avg_err)/num_robot + all_robot_avg_trace = sum(avg_trace)/num_robot + all_robot_avg_bound = sum(avg_bound)/num_robot + avg_STD = math.sqrt(all_robot_avg_trace) + avg_bound_sq = math.sqrt(all_robot_avg_bound) + + mean_time.append(time_interval_start+1/2 - start_time) + mean_err.append(all_robot_avg_err) + mean_STD.append(avg_STD) + mean_bound_sq.append(avg_bound_sq) + + return [mean_time, mean_err, mean_STD, mean_bound_sq] + + diff --git a/CoLo-AT/data_analysis/animate_plot b/CoLo-AT/data_analysis/animate_plot new file mode 100644 index 0000000000000000000000000000000000000000..9429dd1f730f4ca219bef960a68de224ffac8932 --- /dev/null +++ b/CoLo-AT/data_analysis/animate_plot @@ -0,0 +1,2 @@ +import matplotlib.pyplot as plt +import matplotlib.animation as animation \ No newline at end of file diff --git a/CoLo-AT/data_analysis/animate_plot.py b/CoLo-AT/data_analysis/animate_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..23653ca6cbf6b251d13681d81acf03ff2d75aab8 --- /dev/null +++ b/CoLo-AT/data_analysis/animate_plot.py @@ -0,0 +1,32 @@ +import matplotlib.pyplot as plt +import matplotlib.animation as animation + + + + + + +class Animator(): + """docstring for animate_plot""" + def __init__(self, dataset_labels, data_recorder, analyzer): + self.name = "Animator" + self.dataset_labels = dataset_labels + self.robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) + + + def get_robot_locations(self, robot_loc_time_unit, mode): + arr = [] + # robots in dataset are labeled from 1 to num_robots + for robot_label in self.dataset_labels: + # declare a mapping of x, y coordinate lists + m = {'x': robot_loc_time_unit[i][mode + '_x'], 'y': robot_loc_time_unit[i][mode + '_y']} + arr.append(m) + return arr + + def update(self, ): + pass + + def animate_plot(dataset_labels, data_recorder, analyzer, lm = None): + self.robot_locations_gt = get_robot_locations(num_robots, self.robot_loc_time_unit, 'gt') + self.robot_locations_est = get_robot_locations(num_robots, self.robot_loc_time_unit, 'est') + ani = animation.FuncAnimation(fig, update, fargs = (num_robots, fig, init_path_est, init_path_gt, init_points_est, init_points_gt, robot_locations_est, robot_locations_gt, times, time_func, min_time_arr, arr_err, arr_var, loc_err, trace_sigma), frames=min_length(times), interval=50, blit=False) \ No newline at end of file diff --git a/CoLo-AT/data_analysis/data_analyzer.py b/CoLo-AT/data_analysis/data_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..2eb2c313aa4cf5e5c32c5581e542feff4caac20b --- /dev/null +++ b/CoLo-AT/data_analysis/data_analyzer.py @@ -0,0 +1,243 @@ +import numpy as np +import math +import os +import matplotlib.pyplot as plt + + +def find_nearest_time_idx(array, start_idx, value): + #array[i][0] is the time + i = start_idx + if array[i]['time'] < value: + while array[i]['time'] < value: + i = i+1 + if array[i]['time']-value < value-array[i-1]['time']: + return i + else: + return i-1 + else: + return i + +class Analyzer(): + def __init__(self, name, dataset_labels): + self.name = name + self.dataset_labels = dataset_labels + + def set_dataset_label(self, dataset_labels): + self.dataset_labels = dataset_labels + + def plot_loc_err_and_trace(self, loc_err, trace, time_arr, measurement_count): + fig = plt.figure() + plt.suptitle('Correctness analysis') + fig1 = fig.add_subplot(311) + fig2 = fig.add_subplot(312) + fig3 = fig.add_subplot(313) + + fig1.plot(time_arr, loc_err) + fig2.plot(time_arr, trace) + fig3.bar(time_arr, measurement_count) + + + fig1.set_title('Estimation deviation error') + fig1.set_xlabel('Time[s]') + fig1.set_ylabel('RMS[m]') + #fig1.set_ylim(0, 6) + fig1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + + fig2.set_title('Trace of state variance') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('Sigma_s [m^2]') + #fig2.set_ylim(0, 0.08) + fig2.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + + + fig3.set_title('Observation counts') + fig3.set_xlabel('Time[s]') + fig3.set_ylabel('Num of obser') + #fig1.set_ylim(0, 6) + fig3.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + + fig.subplots_adjust(hspace = 1.2) + plt.show() + + + + def calculate_loc_err_and_trace_state_variance_per_run(self, data_recorder, unit_time_interval = 0.5, plot_graphs = True): + #recorded_dataline = [time, robot_label, est_x_pos, est_y_pos, trace_state_var, gt_x_pos, gt_y_pos, loc_err, update type] + data = data_recorder.get_recorded_data() + update_in_time_order = data_recorder.get_updata_type_in_time_order() + data_in_time_order = data_recorder.get_data_in_time_order() + + time_index = 0 + time = data_in_time_order[time_index][0] + interval_start_time = time + finish_flag = False + + loc_err_per_run = [] + measurement_num = [] + trace_per_run = [] + time_arr = [] + + while True: + loc_err_per_time_iterval = 0 + trace_per_time_iterval = 0 + num_dataline_per_time_iterval = 0 + measurement_count = 0 + + while interval_start_time <= time < interval_start_time+unit_time_interval: + try: + data_in_time_order[time_index] + except IndexError: + finish_flag = True + break + time = data_in_time_order[time_index][0] + + if update_in_time_order[time_index] == 'landmark observation' or update_in_time_order[time_index] == 'relative observation': + measurement_count +=1 + + loc_err_per_time_iterval+= data_in_time_order[time_index][7] + trace_per_time_iterval += data_in_time_order[time_index][4] + num_dataline_per_time_iterval+=1 + + time_index += 1 + + if finish_flag: + break + + if num_dataline_per_time_iterval != 0: + loc_err_per_run.append(loc_err_per_time_iterval/num_dataline_per_time_iterval) + trace_per_run.append(trace_per_time_iterval/num_dataline_per_time_iterval) + + else: + loc_err_per_run.append(0) + trace_per_run.append(0) + + measurement_num.append(measurement_count) + time_arr.append((interval_start_time+unit_time_interval+interval_start_time)/2) + interval_start_time = interval_start_time+unit_time_interval + + + print(data_recorder.name,': ') + print('Avg location deviation errors per run: ', sum(loc_err_per_run)/len(loc_err_per_run)) + print('Avg trace of state variances per run: ', sum(trace_per_run)/len(trace_per_run)) + + if plot_graphs: + self.plot_loc_err_and_trace(loc_err_per_run, trace_per_run, time_arr, measurement_count = measurement_num) + + return loc_err_per_run, trace_per_run, time_arr + + def robot_location_at_unit_time_interval(self, data_recorder, unit_time_interval = 0.2): + #recorded_dataline = [time, robot_label, est_x_pos, est_y_pos, trace_state_var, gt_x_pos, gt_y_pos, loc_err] + data = data_recorder.get_recorded_data() + update_in_time_order = data_recorder.get_updata_type_in_time_order() + data_in_time_order = data_recorder.get_data_in_time_order() + + time_index = 0 + time = data_in_time_order[time_index][0] + interval_start_time = time + finish_flag = False + + robot_loc_time_unit = {} + for i, label in enumerate(self.dataset_labels): + robot_loc_time_unit[label] = {'time': [], 'est_x': [], 'est_y': [], 'gt_x': [], 'gt_y': []} + + while True: + robot_sum_loc_per_time_unit = {} + measurement_update = False + for i, label in enumerate(self.dataset_labels): + robot_sum_loc_per_time_unit[label] = {'est_x': 0, 'est_y': 0, 'gt_x': 0, 'gt_y': 0, 'num': 0} + + while interval_start_time <= time < interval_start_time+unit_time_interval: + + try: + data_in_time_order[time_index] + except IndexError: + finish_flag = True + break + + time = data_in_time_order[time_index][0] + + if update_in_time_order[time_index] == 'measurement': + measurement_update = True + + robot_label = data_in_time_order[time_index][1] + #get sum for est_x, est_y, gt_x, gt_y and num for each robor per time interval + robot_sum_loc_per_time_unit[robot_label]['est_x'] += data_in_time_order[time_index][2] + robot_sum_loc_per_time_unit[robot_label]['est_y'] += data_in_time_order[time_index][3] + robot_sum_loc_per_time_unit[robot_label]['gt_x'] += data_in_time_order[time_index][5] + robot_sum_loc_per_time_unit[robot_label]['gt_y'] += data_in_time_order[time_index][6] + robot_sum_loc_per_time_unit[robot_label]['num'] += 1 + + time_index += 1 + + if finish_flag: + break + + for i, robot_label in enumerate(self.dataset_labels): + for j in ['est_x', 'est_y', 'gt_x', 'gt_y']: + if robot_sum_loc_per_time_unit[robot_label]['num'] != 0: + robot_loc_time_unit[robot_label][j].append(robot_sum_loc_per_time_unit[robot_label][j]/robot_sum_loc_per_time_unit[robot_label]['num']) + else: + robot_loc_time_unit[robot_label][j].append(robot_loc_time_unit[robot_label][j][-1]) # use the previous value + + robot_loc_time_unit[robot_label]['time'].append((interval_start_time+unit_time_interval+interval_start_time)/2) + + interval_start_time = interval_start_time+unit_time_interval + + return robot_loc_time_unit + + + def algos_comparison_graph(self, arr_loc_err, arr_trace): + fig = plt.figure() + fig1 = fig.add_subplot(211) + fig2 = fig.add_subplot(212) + + for loc_err in arr_loc_err: + fig1.plot(loc_err[0], loc_err[1], label = loc_err[2]) + + for trace in arr_trace: + fig2.plot(trace[0], trace[1], label = trace[2]) + + fig1.set_title('Estimation deviation error') + fig1.set_xlabel('Time[s]') + fig1.set_ylabel('RMS[m]') + #fig1.set_ylim(0, 6) + fig1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + + fig2.set_title('Trace of state variance') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('Sigma_s [m^2]') + #fig2.set_ylim(0, 0.08) + fig2.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + plt.subplots_adjust(hspace=.6) + plt.show() + + + + def algos_comparison(self, arr_data_recorder, only_trace=None): + arr_loc_err = [] + arr_trace = [] + for data_recorder in arr_data_recorder: + loc_err_per_run, trace_per_run, time_stamps = self.calculate_loc_err_and_trace_state_variance_per_run(data_recorder, plot_graphs = True) + if data_recorder.name not in only_trace: + arr_loc_err.append([time_stamps, loc_err_per_run, data_recorder.name]) + arr_trace.append([time_stamps, trace_per_run, data_recorder.name] ) + + print('Plotting Comparison Graphs') + self.algos_comparison_graph(arr_loc_err, arr_trace) + return arr_loc_err, arr_trace + + + + + + + + + + + + + + + + diff --git a/CoLo-AT/data_analysis/data_analyzer.pyc b/CoLo-AT/data_analysis/data_analyzer.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44b59bceea581694a219af7ee3b8c4aa2ee15c96 Binary files /dev/null and b/CoLo-AT/data_analysis/data_analyzer.pyc differ diff --git a/CoLo-AT/data_analysis/dataset_analyzer.py b/CoLo-AT/data_analysis/dataset_analyzer.py new file mode 100644 index 0000000000000000000000000000000000000000..a9e77f89bba62c029ab475cd927806c253af18af --- /dev/null +++ b/CoLo-AT/data_analysis/dataset_analyzer.py @@ -0,0 +1,35 @@ +import numpy as np +import os + +import initialization_datasets + +def dataset_analysis(dataset_name, duration, sampling_period): + # this function is to find the characteristic of the dataset. Given name of the dataset and period of sampling (in second), + # it will output v_max, r_max of for each robot during each period, also the mean and the STD of observation time interval + dataset_labels = [1,2,3,4,5] #all robots + [landmark_map, start_time, start_state_arr, measurement_data, odometry_data, groundtruth_data] = initialization_datasets.initialization_MRCLAMDatasets_multirobots(dataset_name, dataset_labels, duration) + + print 'robot\t\tv_max\t\tr_mean\t\tr_std\tnum_obser\ttime' + + for i, label in enumerate(dataset_labels): + t = start_time + while t <= start_time+duration-sampling_period: + v_seq = [obs['velocity'] for obs in odometry_data[i] if t+sampling_period > obs['time'] >= t] # create a list of velocity data for each sampling period + v_max = max(v_seq) + + r_seq = [obs['measurment_range'] for obs in measurement_data[i] if t+sampling_period > obs['time'] >= t] # create a list of measurement_range data for each sampling period + try: + r_max = max(r_seq) + r_mean = np.mean(r_seq) + r_std = np.std(r_seq) + + if r_std < 0.8: + print str(label)+'\t'+ str(v_max) + '\t\t'+ str(r_max) +'\t' +str(r_std) + '\t'+ str(len(r_seq)) + '\t' + str(t-start_time) + 's~' + str(t+sampling_period-start_time) +'s' + + except ValueError: + print '\tempty array' + + t = t + sampling_period + + return 1 + diff --git a/CoLo-AT/data_analysis/realtime_plot.py b/CoLo-AT/data_analysis/realtime_plot.py new file mode 100644 index 0000000000000000000000000000000000000000..436e6fc2e0821c756869e0bf6c5b1dca9a390aca --- /dev/null +++ b/CoLo-AT/data_analysis/realtime_plot.py @@ -0,0 +1,217 @@ +from matplotlib import pyplot as plt +from matplotlib import animation +from matplotlib.font_manager import FontProperties +from matplotlib import gridspec +import numpy as np +import time +import sys +""" +author: Kyle +""" +# update point: function handling the logic of animating the dot, denoting the current +# position of the robot +# 2 modes: est for estimate, gt for groundtruth +# not used in the implementation +def update_point(i, fig, scat, robot_loc_time_unit, dataset_labels, robotNum, mode): + label = robotNum + x = robot_loc_time_unit[label][mode + '_x'][i] + y = robot_loc_time_unit[label][mode + '_y'][i] + loc = [x, y] + scat.set_offsets(loc) + return scat, +# update path: function handling the logic of animating a path trail left by the robot +# 2 modes: est for estimate, gt for groundtruth +# not used in the implementation +def update_path(i, fig, l, robot_loc_time_unit, dataset_labels, robotNum, mode): + label = robotNum + x = robot_loc_time_unit[label][mode + '_x'] + y = robot_loc_time_unit[label][mode + '_y'] + l.set_data(x[:i], y[:i]) + return l, +# get the list of x, y locations of each robot and return those as a list +# 2 modes: est for estimate, gt for groundtruth +def get_robot_locations(num_robots, robot_loc_time_unit, mode): + arr = [] + # robots in dataset are labeled from 1 to num_robots + for i in range(1, num_robots + 1): + # declare a mapping of x, y coordinate lists + m = {'x': robot_loc_time_unit[i][mode + '_x'], 'y': robot_loc_time_unit[i][mode + '_y']} + arr.append(m) + return arr +# initialize initial positions of each robot for dots +def initialize_robots(num_robots, locations, colors): + arr = [] + for i in range(0, num_robots): + scat = plt.scatter(locations[i]['x'], locations[i]['y'], c=colors[i]) + arr.append(scat) + return arr +# initialize starting point of the robot path trial +# 2 modes: est for estimate, gt for groundtruth +def initialize_path(num_robots, ax, locations, colors, mode): + s = "" + mode_str = "" + # define path marker, based on mode + # define label representation, based on mode + if mode == 'est': + s = ':' + mode_str = 'estimate' + else: + s = '-' + mode_str = 'groundtruth' + arr = [] + for i in range(0, num_robots): + line, = ax.plot(locations[i]['x'], locations[i]['y'], colors[i]+s, label='Robot ' + str(i+1) + ' ' + mode_str) + arr.append(line) + return arr +# initialize graphs to be incrementally constructed based on animation over time +def initialize_graphs(num_robots, ax_err, ax_var, loc_err, trace_sigma, time_func, colors): + arr_err = [] + arr_var = [] + for i in range(0, num_robots): + time_arr = time_func(i + 1) + line, = ax_err.plot(time_arr, loc_err[i+1], colors[i]+'-', label='Robot ' + str(i+1)) + line2, = ax_var.plot(time_arr, trace_sigma[i+1], colors[i]+'-', label='Robot ' + str(i+1)) + arr_err.append(line) + arr_var.append(line2) + return [arr_err, arr_var] +# get the time array for each robot +def get_robot_times(num_robots, robot_loc_time_unit): + arr = [] + for i in range(1, num_robots + 1): + arr.append(robot_loc_time_unit[i]['time']) + return arr +# return the time array of minimum time duration +def min_robot_times(times): + res = [] + m = 2**63 - 1 + for arr in times: + if m > len(arr): + res = list(arr) + m = len(arr) + return res +# update function for aggregate animation +def update(i, num_robots, fig, l_est, l_gt, scat_est, scat_gt, location_est, location_gt, times, time_func, min_time_arr, arr_err, arr_var, loc_err, trace_sigma): + # aggregate of both scatterplot point and path trails + res = [] + for txt in fig.texts: + txt.set_visible(False) + tmp = fig.text(0.8, 0.03, "Time: " + str(round(times[0][i], 1)), fontsize=12) + for robotNum in range(0, num_robots): + # factor out common variables + est_x = location_est[robotNum]['x'] + est_y = location_est[robotNum]['y'] + gt_x = location_gt[robotNum]['x'] + gt_y = location_gt[robotNum]['y'] + # scatter + s1 = scat_est[robotNum] + s2 = scat_gt[robotNum] + s1.set_offsets([est_x[i], est_y[i]]) + s2.set_offsets([gt_x[i], gt_y[i]]) + res.append(s1,) + res.append(s2,) + # path + l1 = l_est[robotNum] + l2 = l_gt[robotNum] + l1.set_data(est_x[:i], est_y[:i]) + l2.set_data(gt_x[:i], gt_y[:i]) + res.append(l1,) + res.append(l2,) + # graph + time_arr = time_func(robotNum + 1) + ind = find_index_time(time_arr, min_time_arr[i]) + l3 = arr_err[robotNum] + l4 = arr_var[robotNum] + l3.set_data(time_arr[:ind], loc_err[robotNum+1][:ind]) + l4.set_data(time_arr[:ind], trace_sigma[robotNum+1][:ind]) + #return res +# to prevent subscript out of bounds, determine minimum length timestamp array to be used +# for animation +def min_length(times): + m = 2**63 - 1 # 64-bit max int upper bound + for arr in times: + m = min(m, len(arr)) + return m +# return the index of the array that is less than or equal to t, the current time +def find_index_time(arr, t): + ind = 0 + for i in arr: + if i >= t: + return ind + ind += 1 + return ind +# precondition: dataset should have sufficient information for N robots +# postcondition: dataset is not manipulated; the function simply mutates the data for +# simpler data processing internally +# This function handles the plot animation graph functionalities +def animate_plot(dataset_labels, data_recorder, analyzer, lm = None): + + time_func = data_recorder.get_time_arr + loc_err = data_recorder.get_loc_err_arr() + trace_sigma = data_recorder.get_trace_sigma_s_arr() + + # obtain data + robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(data_recorder) + # determine number of robots under consideration for this animation + num_robots = int(len(dataset_labels)) + # initialize graphs: error, demo, state variance + fig = plt.figure(figsize=(12, 9), tight_layout=True) + fig.subplots_adjust(top=0.88) + gs = gridspec.GridSpec(3, 2) + fig.suptitle('CoLo Robot Demo Simulation', fontsize=14) + ax_err = plt.subplot(gs[0, 0]) + ax_var = plt.subplot(gs[0, 1]) + ax = plt.subplot(gs[1:, :]) + #ax.set_xlim([-2, 2]) + #ax.set_ylim([-2, 2]) + # configure graph + # set bounds + #fig.set_size_inches((18,18)) + #ax = plt.axes(xlim=(-6, 6), ylim=(-6, 8)) + #ax_err = plt.axes(xlim=(0, 100), ylim=(0, 0.4)) + # assign labels + ax.set_title('CoLo Demo') + ax.set_xlabel('x-axis[m]') + ax.set_ylabel('y-axis[m]') + ax_err.set_title('Estimation deviation error') + ax_err.set_xlabel('Time[s]') + ax_err.set_ylabel('RMS[m]') + ax_var.set_title('Trace of state variance') + ax_var.set_xlabel('Time [s]') + ax_var.set_ylabel('Sigma_s [m^2]') + # obtain robot locations from dataset + robot_locations_est = get_robot_locations(num_robots, robot_loc_time_unit, 'est') + robot_locations_gt = get_robot_locations(num_robots, robot_loc_time_unit, 'gt') + # hardcoded for now, to be passed as parameter in test_simulation call + colors = ['b', 'g', 'r', 'c', 'k'] + # initialize graph initial positions + init_points_est = initialize_robots(num_robots, robot_locations_est, colors) + init_points_gt = initialize_robots(num_robots, robot_locations_gt, colors) + init_path_est = initialize_path(num_robots, ax, robot_locations_est, colors, 'est') + init_path_gt = initialize_path(num_robots, ax, robot_locations_gt, colors, 'gt') + ig = initialize_graphs(num_robots, ax_err, ax_var, loc_err, trace_sigma, time_func, colors) + arr_err = ig[0] + arr_var = ig[1] + + if lm != None: + lm_x = [] + lm_y = [] + for landmark_id, [x, y] in lm.items(): + lm_x.append(x) + lm_y.append(y) + ax.scatter(lm_x, lm_y, s=120, marker=(5,0), label='Landmark') + + # obtain robot timestamps + times = get_robot_times(num_robots, robot_loc_time_unit) + min_time_arr = min_robot_times(times) + # initialize animation, passing in frame update function + # robot animation + ani = animation.FuncAnimation(fig, update, fargs = (num_robots, fig, init_path_est, init_path_gt, init_points_est, init_points_gt, robot_locations_est, robot_locations_gt, times, time_func, min_time_arr, arr_err, arr_var, loc_err, trace_sigma), frames=min_length(times), interval=50, blit=False) + # Show legend + fontP = FontProperties() + fontP.set_size('small') + ax.legend(prop=fontP, bbox_to_anchor=(1.1, 1.0), loc=9, ncol=1) + ax_var.legend(prop=fontP, bbox_to_anchor=(1.2, 1.0), loc=9, ncol=1) + # Show graph + plt.show() + # (optional) save as GIF, comment the following line if you don't want this + ani.save('full_test9.gif', writer="imagemagick", fps=60) diff --git a/CoLo-AT/data_analysis/realtime_plot.pyc b/CoLo-AT/data_analysis/realtime_plot.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0873796d526d99aa6a6f0b56e08abaa3fe4fc654 Binary files /dev/null and b/CoLo-AT/data_analysis/realtime_plot.pyc differ diff --git a/CoLo-AT/data_analysis/results_graphs.py b/CoLo-AT/data_analysis/results_graphs.py new file mode 100755 index 0000000000000000000000000000000000000000..7ecd4ab558b11e3eb4aecf10896132ae8f8e958c --- /dev/null +++ b/CoLo-AT/data_analysis/results_graphs.py @@ -0,0 +1,300 @@ +import numpy as np +import os +import matplotlib.pyplot as plt +import math +import sys +def find_nearest_time_idx(array, start_idx, value): + #array[i][0] is the time + i = start_idx + if array[i]['time'] < value: + while array[i]['time'] < value: + i = i+1 + if array[i]['time']-value < value-array[i-1]['time']: + return i + else: + return i-1 + else: + return i +def plots_MSE_and_trace(dataset_labels, analysis_result_name, dataset_name, algo_name): + + fig = plt.figure() + plt.suptitle('Correctness analysis Algo: ' + algo_name + ' Dataset: ' + dataset_name ) + fig1 = fig.add_subplot(211) + fig2 = fig.add_subplot(212) + for i, label in enumerate(dataset_labels): + #initialization for all the lists for plotting + dev_l_t = [] + dev_l_RMSE = [] + dev_l_trace = [] + file="/Results_files/analysis_results/"+analysis_result_name+str(label)+".txt" + path=os.getcwd()+file + with open(path,'r') as analysis_file: + s_dev = analysis_file.readline() + starting_time = float(s_dev.split( )[0]) + print starting_time + while (s_dev): + dev_l_t.append(float(s_dev.split( )[0]) - starting_time) + dev_l_RMSE.append(float(s_dev.split( )[1])) + dev_l_trace.append(float(s_dev.split( )[2])) + s_dev = analysis_file.readline() + fig1.plot(dev_l_t, dev_l_RMSE, label= 'Robot %d' %label) + fig2.plot(dev_l_t, dev_l_trace, label= 'Robot %d' %label) + + fig1.set_title('Root mean square error') + fig1.set_xlabel('Time[s]') + fig1.set_ylabel('RMS[m]') + #fig1.set_ylim(0, 6) + fig1.legend(loc=2) + + fig2.set_title('Trace of state variance') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('Sigma_s [m^2]') + #fig2.set_ylim(0, 0.08) + fig2.legend(loc=2) + + fig.subplots_adjust(hspace = 0.8) + plt.show() + + return 1 + + + +def plots_mean_MSE_and_trace(mean_time, mean_err, mean_trace, dataset_name, algo_name): + + fig = plt.figure() + plt.suptitle('Correctness analysis Algo: ' + algo_name + ' Dataset: ' + dataset_name ) + fig1 = fig.add_subplot(211) + fig2 = fig.add_subplot(212) + + + fig1.plot(mean_time, mean_err) + fig2.plot(mean_time, mean_trace) + + fig1.set_title('Avg. Root mean square error') + fig1.set_xlabel('Time[s]') + fig1.set_ylabel('RMS[m]') + #fig1.set_ylim(0, 6) + + + fig2.set_title('Avg. Trace of state variance') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('Sigma_s [m^2]') + #fig2.set_ylim(0, 0.08) + + fig.subplots_adjust(hspace = 0.8) + plt.show() + + return 1 + + +def plots_mean_MSE_and_trace_for_multiple_algos(figure_name, algos_set, all_mean_time, all_mean_err, all_mean_STD, dataset_name, comm_fail_rate): + + fig = plt.figure() + plt.suptitle('Correctness analysis' + ' Dataset: ' + dataset_name + ' comm failure rate: ' + str(comm_fail_rate)) + fig1 = fig.add_subplot(211) + fig2 = fig.add_subplot(212) + + for i, algo_name in enumerate(algos_set): + print algo_name + if algo_name == 'GS-CI': + fig1.plot(all_mean_time[i], all_mean_err[i], label = algo_name, linewidth=3.0) + fig2.plot(all_mean_time[i], all_mean_STD[i], label = algo_name, linewidth=3.0) + else: + fig1.plot(all_mean_time[i], all_mean_err[i], label = algo_name) + fig2.plot(all_mean_time[i], all_mean_STD[i], label = algo_name) + + + fig1.set_title('Actual location error') + fig1.set_xlabel('Time [s]') + fig1.set_ylabel('RMS [m]') + #fig1.set_ylim(0, 3) + fig1.legend(loc=2) + #plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1) + + + + fig2.set_title('Trace of state deviation') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('STD [m]') + #fig2.set_ylim(0, 1) + fig2.legend(loc=2) + + fig.subplots_adjust(hspace = 0.8) + figure_name = figure_name +'.png' + print figure_name + plt.savefig(figure_name) + + plt.show() + + return 1 + + +def plots_mean_MSE_and_trace_for_multiple_algos_w_bound(figure_name, algos_set, all_mean_time, all_mean_err, all_mean_STD, all_mean_bound, dataset_name, comm_fail_rate): + + fig = plt.figure() + plt.suptitle('Correctness analysis' + ' Dataset: ' + dataset_name + ' comm failure rate: ' + str(comm_fail_rate)) + fig1 = fig.add_subplot(211) + fig2 = fig.add_subplot(212) + + for i, algo_name in enumerate(algos_set): + print algo_name + if algo_name == 'GS-CI': + fig1.plot(all_mean_time[i], all_mean_err[i], label = algo_name, linewidth=3.0) + fig2.plot(all_mean_time[i], all_mean_STD[i], label = algo_name, linewidth=3.0) + fig2.plot(all_mean_time[i], all_mean_bound[i], label = 'bound', linewidth=3.0) + else: + fig1.plot(all_mean_time[i], all_mean_err[i], label = algo_name) + fig2.plot(all_mean_time[i], all_mean_STD[i], label = algo_name) + + + fig1.set_title('Actual location error') + fig1.set_xlabel('Time [s]') + fig1.set_ylabel('RMS [m]') + #fig1.set_ylim(0, 3) + fig1.legend(loc=2) + #plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1) + + + + fig2.set_title('Trace of state deviation') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('STD [m]') + #fig2.set_ylim(0, 1) + fig2.legend(loc=2) + + fig.subplots_adjust(hspace = 0.8) + figure_name = figure_name +'.png' + print figure_name + plt.savefig(figure_name) + + plt.show() + + return 1 + +def plot_err_x(starting_time, dataset_labels, analysis_result_name, dataset_name): + plt.figure(1) + + for i, label in enumerate(dataset_labels): + list_t = [] + list_err_x = [] + file="/Results_files/analysis_results/"+analysis_result_name+str(label)+".txt" + path=os.getcwd()+file + with open(path,'r') as analysis_file: + s_dev = analysis_file.readline() + while (s_dev): + list_t.append(float(s_dev.split( )[0])-starting_time) + list_err_x.append(float(s_dev.split( )[3])) + s_dev = analysis_file.readline() + plt.plot(list_t, list_err_x, label= 'Robot %d' %label) + + #plt.ylim(-2, 2) + #plt.xlim(0, 600) + + plt.legend(loc=1) + plt.title('Robot x-pos error Dataset: ' + dataset_name) + plt.xlabel('Time[s]') + plt.ylabel('Robot x-pos error[m]') + plt.show() + pass + +def plot_RMSE_and_trace_withrespectto_noise(list2D_avg_RMSE, list2D_avg_trace, noise_range): + list_process_noise = np.arange(noise_range[0], noise_range[1], 0.2) + print list_process_noise + plt.figure(1) + plt.suptitle('Error analysis for single EKF for all robots with respect to measurement noise') + + fig1 = plt.subplot(211) + for i in range(5): + fig1.plot(list_process_noise, list2D_avg_RMSE[i], label= 'Robot %d' %(i+1)) + + plt.title('Root mean square error(RMSE)') + plt.xlabel('Measurement noise') + plt.ylabel('RMSE') + #plt.ylim(1, 2) + + plt.legend(loc=1) + + fig2 = plt.subplot(212) + for i in range(5): + fig2.plot(list_process_noise, list2D_avg_trace[i], label= 'Robot %d' %(i+1)) + + plt.title('Trace of state variance') + plt.xlabel('Measurement noise') + plt.ylabel('Trace') + plt.legend(loc=2) + #plt.ylim(0, 2) + + plt.subplots_adjust(hspace = 0.8) + plt.show() + + return 1 + + + +def single_plot_w_bond(start_time, est_result_array, groundtruth_data, robot_index, algo_name): + time_arr = [] + err_arr = [] + sigma_s_arr = [] + bound_arr = [] + i = robot_index + g_idx = 0 + j = 0 + while j < len(est_result_array[i]): + est_t = est_result_array[i][j]['time'] + g_idx = find_nearest_time_idx(groundtruth_data[i], g_idx, est_t) + + if abs(groundtruth_data[i][g_idx]['time']-est_result_array[i][j]['time']) > 0.5: + sys.exit('groundtruth time not match!') + + gt_x_pos = groundtruth_data[i][g_idx]['x_pos'] + gt_y_pos = groundtruth_data[i][g_idx]['y_pos'] + est_x_pos = est_result_array[i][j]['x_pos'] + est_y_pos = est_result_array[i][j]['y_pos'] + sigma_s_trace = est_result_array[i][j]['sigma_s_trace'] + bound = est_result_array[i][j]['bound'] + + err_sq = sum([(est_x_pos-gt_x_pos)*(est_x_pos-gt_x_pos), (est_y_pos-gt_y_pos)*(est_y_pos-gt_y_pos)]) + err_sqrt = math.sqrt(err_sq) + + est_t = est_t - start_time + #print est_t + #print err_sq + + time_arr.append(est_t) + err_arr.append(err_sq) + sigma_s_arr.append(sigma_s_trace) + bound_arr.append(bound) + + #print sigma_s_arr + + j = j+1 + + print est_result_array[0][0] + print est_result_array[0][-1] + + fig = plt.figure() + plt.suptitle('GS-CI analysis w/ e = 0.5') + fig1 = fig.add_subplot(211) + fig2 = fig.add_subplot(212) + + fig1.plot(time_arr, err_arr, label = algo_name, linewidth=3.0) + fig2.plot(time_arr, sigma_s_arr, label = algo_name, linewidth=3.0) + #fig2.plot(time_arr, bound_arr, label = 'bound', linewidth=3.0) + + + fig1.set_title('Actual location error') + fig1.set_xlabel('Time [s]') + fig1.set_ylabel('RMS [m]') + #fig1.set_ylim(0, 1) + fig1.legend(loc=2) + #plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1) + + fig2.set_title('Trace of state deviation') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('STD [m]') + #fig2.set_ylim(0, 0.2) + fig2.legend(loc=2) + + plt.show() + + return 1 \ No newline at end of file diff --git a/CoLo-AT/dataset_manager/.DS_Store b/CoLo-AT/dataset_manager/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0b6aee47e8927b5469c9e0e3ffc5096f41ce3014 Binary files /dev/null and b/CoLo-AT/dataset_manager/.DS_Store differ diff --git a/CoLo-AT/dataset_manager/__init__.py b/CoLo-AT/dataset_manager/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/CoLo-AT/dataset_manager/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/CoLo-AT/dataset_manager/__pycache__/__init__.cpython-35.pyc b/CoLo-AT/dataset_manager/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3c36bd5c707e061f9d31563c5f2f8b0ccc41c32 Binary files /dev/null and b/CoLo-AT/dataset_manager/__pycache__/__init__.cpython-35.pyc differ diff --git a/CoLo-AT/dataset_manager/__pycache__/__init__.cpython-36.pyc b/CoLo-AT/dataset_manager/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd31344ff4309517f4f33b309e86875ec7027076 Binary files /dev/null and b/CoLo-AT/dataset_manager/__pycache__/__init__.cpython-36.pyc differ diff --git a/CoLo-AT/dataset_manager/__pycache__/existing_dataset.cpython-35.pyc b/CoLo-AT/dataset_manager/__pycache__/existing_dataset.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8f4113524e113b5bc74d0dcc2360c8aac644e3c Binary files /dev/null and b/CoLo-AT/dataset_manager/__pycache__/existing_dataset.cpython-35.pyc differ diff --git a/CoLo-AT/dataset_manager/__pycache__/existing_dataset.cpython-36.pyc b/CoLo-AT/dataset_manager/__pycache__/existing_dataset.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c4964b7c5fa18e5c4e54efd6d80b0a0f47841d3 Binary files /dev/null and b/CoLo-AT/dataset_manager/__pycache__/existing_dataset.cpython-36.pyc differ diff --git a/CoLo-AT/dataset_manager/existing_dataset.py b/CoLo-AT/dataset_manager/existing_dataset.py new file mode 100755 index 0000000000000000000000000000000000000000..ec3fc68c7b5fcb9569e077ba48ede319c77a666c --- /dev/null +++ b/CoLo-AT/dataset_manager/existing_dataset.py @@ -0,0 +1,465 @@ +# -*- coding: utf-8 -*- + +import numpy as np +from numpy import random +import sys +from math import atan2, sqrt, pi +import random +from math import cos +from math import sin +import math +import os.path +''' +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/requests') +import request_response +''' + +def find_nearest_time_idx(l, value): + if len(l) != 0: + array = np.asarray(l) + idx = (np.abs(array-value)).argmin() + return idx + else: + return None + + + +def find_next_time_idx(array, start_idx, value): + i = start_idx + try: + array[i] + except IndexError: + i = -1 + return i + while array[i] < value: + i = i+1 + try: + array[i] + except IndexError: + i = -1 + break + return i + +def linear_interpolation(end0, end1, x): + [x0, y0] = end0 + [x1, y1] = end1 + if x1 == x0: + y = y0 + else: + y = y0+(x-x0)*(y1-y0)/(x1-x0) + return y + +class Dataset: + + def __init__(self, dataset_name): + self.name = dataset_name + + def create_landmark_map(self): + ### Build landmark map ### + self.landmark_map = {} + path=self.dataset_path + "Landmark_Groundtruth.dat" + landmark_file=open(path,'r+'); + s = landmark_file.readline() + while(s): + if(s[0]!='#'): + landmark_location = [float(s.split( )[1]), float(s.split( )[2])] + self.landmark_map.update({int(s.split( )[0]): landmark_location}) + s = landmark_file.readline() + landmark_file.close() + #print "lm ", self.landmark_map, " lm" + return self.landmark_map + + + def load_MRCLAMDatasets(self, dataset_path, dataset_labels, duration, adding_actifical_dataline = True, delay_start = 5): + print ('******** Initialization Started ********') + print ('add synthetic data: ', adding_actifical_dataline) + self.dataset_path = dataset_path + print("Absolute datapath: ") + print(self.dataset_path) + self.dataset_labels = dataset_labels + self.adding_actifical_dataline = adding_actifical_dataline + self.create_landmark_map() + + self.num_robots = len(self.dataset_labels) + self.measurement_data = [[] for i in range(self.num_robots)] + self.odometry_data = [[] for i in range(self.num_robots)] + self.gt_data_odo = [[] for i in range(self.num_robots)] + self.gt_data_meas = [[] for i in range(self.num_robots)] + self.groundtruth_data = [[] for i in range(self.num_robots)] + + #self.groundtruth_time= [[] for i in range(self.num_robots)] + self.time_arr = {'odometry': [[] for i in range(self.num_robots)], 'measurement': [[] for i in range(self.num_robots)], 'groundtruth': [[] for i in range(self.num_robots)]} + + #initialization for MRCLAMDatasets: put files into dictionaries: + self.duration = duration # some more time to avoid index error + self.start_time_arr = [] + #finding the starting time: + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + groundtruth_path = self.dataset_path+"Robot"+robot_num+"_Groundtruth.dat" + with open(groundtruth_path,'r+') as groundtruth_file: + for line in groundtruth_file: + if str(line.split()[0]) != '#': + time = float(line.split()[0]) + self.start_time_arr.append(time) + break + self.start_time = max(self.start_time_arr) + delay_start + self.end_time = self.start_time + self.duration + print('Staring time: ', self.start_time) + #finding starting states: + self.starting_states = {} + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + groundtruth_path = self.dataset_path+"Robot"+robot_num+"_Groundtruth.dat" + with open(groundtruth_path,'r+') as groundtruth_file: + for line in groundtruth_file: + if line[0] != '#' and float(line.split()[0]) >= self.start_time: + time = round(float(line.split()[0]), 3) + x_pos = float(line.split()[1]) + y_pos = float(line.split()[2]) + orientation = float(line.split()[3]) + self.starting_states[label] = [time, x_pos, y_pos, orientation] + break + print('Staring states: ') + print(self.starting_states) + + + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + + groundtruth_path = self.dataset_path+"Robot"+robot_num+"_Groundtruth.dat" + with open(groundtruth_path,'r+') as groundtruth_file: + for line in groundtruth_file: + if line[0] != '#' and (self.end_time >= float(line.split()[0]) >= self.start_time): + time = round(float(line.split()[0]), 3) + x_pos = float(line.split()[1]) + y_pos = float(line.split()[2]) + orientation = float(line.split()[3]) + groundtruth_info = {'time':time, 'x_pos':x_pos, 'y_pos':y_pos, 'orientation':orientation} + self.groundtruth_data[i].append(groundtruth_info) + self.time_arr['groundtruth'][i].append(time) + + + meas_path = self.dataset_path+"Robot"+robot_num+"_Measurement_x.dat" + with open(meas_path,'r+') as measure_file: + for line in measure_file: + if line[0] != '#' and (self.end_time>= float(line.split()[0]) >= self.start_time): + time = round(float(line.split()[0]), 3) + subject_ID = int(line.split()[1]) + measurment_range = float(line.split()[2]) + bearing = float(line.split()[3]) + meas_info = {'time':time, 'subject_ID':subject_ID, 'measurment_range': measurment_range, 'bearing':bearing} + + g_idx = find_nearest_time_idx(self.time_arr['groundtruth'][i],time) + gt_x = self.groundtruth_data[i][g_idx]['x_pos'] + gt_y = self.groundtruth_data[i][g_idx]['y_pos'] + orientation = self.groundtruth_data[i][g_idx]['orientation'] + matched_gt_info = {'time':time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':orientation} + + self.measurement_data[i].append(meas_info) + self.gt_data_meas[i].append(matched_gt_info) + self.time_arr['measurement'][i].append(time) + + + odo_path = self.dataset_path+"Robot"+robot_num+"_Odometry.dat" + with open(odo_path,'r+') as odometry_file: + lines = odometry_file.readlines() + for line_idx in range(0, len(lines)): + line = lines[line_idx] + if line[0] != '#' and (self.end_time >= float(line.split()[0]) >= self.start_time): + t = float(line.split()[0]) + time = round(float(line.split()[0]), 3) + g_idx = find_nearest_time_idx(self.time_arr['groundtruth'][i],time) + velocity = float(line.split()[1]) + a_v = float(line.split()[2]) + orientation = self.groundtruth_data[i][g_idx]['orientation'] + try: + next_line = lines[line_idx+1] + next_time = float(next_line.split()[0]) + delta_t = next_time - time + except IndexError: + delta_t = 0 + if delta_t < 0: + sys.exit('incorrect delta_t: '+ str(delta_t)) + + odo_info = {'time':time, 'velocity':velocity, 'angular velocity': a_v, 'orientation':orientation, 'delta_t': delta_t} + gt_x = self.groundtruth_data[i][g_idx]['x_pos'] + gt_y = self.groundtruth_data[i][g_idx]['y_pos'] + matched_gt_info = {'time':time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':orientation} + + self.odometry_data[i].append(odo_info) + self.gt_data_odo[i].append(matched_gt_info) + self.time_arr['odometry'][i].append(time) + + self.data_trackers = {'odometry': np.ones((self.num_robots,), dtype=np.int), 'measurement':np.ones((self.num_robots,), dtype=np.int)} + # tracker for each robot for both each type of data to keep track of their location in the dataset + self.odo_data_trackers = np.ones((self.num_robots,), dtype=np.int) + self.dataset_data = {'odometry': self.odometry_data, 'measurement': self.measurement_data, 'groundtruth': self.groundtruth_data} + self.dataset_matched_gt_data = {'odometry': self.gt_data_odo, 'measurement': self.gt_data_meas} + print ('******** Initialization Completed ********') + print("self.start_time: ", self.start_time) + print("self.starting_states ", self.starting_states) + return self.start_time, self.starting_states, self.dataset_data, self.time_arr + + def dataset_reset(self): + self.data_trackers = {'odometry': np.ones((self.num_robots,), dtype=np.int),'measurement':np.ones((self.num_robots,), dtype=np.int), 'groundtruth':np.ones((self.num_robots,), dtype=np.int)} + + def get_start_time(self): + return self.start_time + + def get_starting_states(self): + return self.starting_states + + def get_duration(self): + return self.duration + + def get_landmark_map(self): + return self.landmark_map + + def get_start_moving_times(self): + start_moving_times = [] + time_idx = 0 + for i, label in enumerate(self.dataset_labels): + start_moving_times.append(self.dataset_data['odometry'][i][time_idx]['time']) + return start_moving_times + + def get_time_arr(self, data_catagory): + # retunr an array of time shows time for next dataline for each robot given data catagory + time_arr =[] + for i, label in enumerate(self.dataset_labels): + time_idx = self.data_trackers[data_catagory][i] + if time_idx == -1: + time_arr.append(self.start_time + self.duration + 10) # so that it will be out of range and not be selected + else: + time_arr.append(self.dataset_data[data_catagory][i][time_idx]['time']) + return time_arr + + def find_corresponding_ground_truth(self, robot_idx, time): + data_catagory = 'groundtruth' + gt_idx = find_nearest_time_idx(self.time_arr[data_catagory][robot_idx], time) + try: + self.dataset_data[data_catagory][robot_idx][gt_idx] + except IndexError: + print(robot_idx, 'Index ERR: ', gt_idx) + matched_gt_data = self.dataset_data[data_catagory][robot_idx][gt_idx] + return matched_gt_data + + def trackers_sync(self, current_time): + #print('sync current time: ', current_time ) + for catagory in ['odometry', 'measurement']: + for robot_idx in range(self.num_robots): + start_idx = self.data_trackers[catagory][robot_idx] + if start_idx != -1: + time_idx = find_next_time_idx(self.time_arr[catagory][robot_idx], start_idx, current_time) + self.data_trackers[catagory][robot_idx] = time_idx + + def trackers_update(self, req_type, robot_idx): + # update corrsponding trackers + if self.data_trackers[req_type][robot_idx]!=-1: + self.data_trackers[req_type][robot_idx]+=1 + #print(self.data_trackers) + #self.data_trackers['groundtruth'][robot_idx]+=1 + + def get_matched_dataline(self, req): + message = req.get_message() + + if message['robot_index'] == None: + if req.get_type() == None: + odo_time_arr = self.get_time_arr('odometry') + meas_time_arr = self.get_time_arr('measurement') + if min(odo_time_arr)>min(meas_time_arr): + robot_idx = np.argmin(meas_time_arr) + req_type = 'measurement' + req.set_type(req_type) + else: + robot_idx = np.argmin(odo_time_arr) + req_type = 'odometry' + req.set_type(req_type) + else: + req_type = req.get_type() + time_arr = self.get_time_arr(req_type) + robot_idx = np.argmin(time_arr) + + message['robot_index'] = robot_idx + + else: + robot_idx = message['robot_index'] + if req.get_type() == None: + odo_time_idx = self.data_trackers['odometry'][robot_idx] + meas_time_idx = self.data_trackers['measurement'][robot_idx] + if odo_time_idx != -1 and self.dataset_data['odometry'][robot_idx][odo_time_idx]['time'] >= self.dataset_data['measurement'][robot_idx][meas_time_idx]['time']: + req_type = 'measurement' + req.set_type(req_type) + elif meas_time_idx != -1 and self.dataset_data['odometry'][robot_idx][odo_time_idx]['time'] < self.dataset_data['measurement'][robot_idx][meas_time_idx]['time']: + req_type = 'odometry' + req.set_type(req_type) + else: + valid_dataline = False + else: + req_type = req.get_type() + + time_idx = self.data_trackers[req_type][robot_idx] + if self.dataset_data[req_type][robot_idx][time_idx]['time'] > self.end_time or time_idx == -1: + valid_dataline = False + else: + valid_dataline= True + message['data'] = self.dataset_data[req_type][robot_idx][time_idx] + message['groundtruth'] = self.dataset_matched_gt_data[req_type][robot_idx][time_idx] + return valid_dataline, message, req_type, robot_idx, time_idx + + + def create_synthetic_dataline(self, req, time_idx, meas_input_var): + message = req.get_message() + req_type = req.get_type() + robot_idx = message['robot_index'] + req_time = message['time'] + if req_time > self.time_arr[req_type][robot_idx][time_idx]: + try: + prev_time_idx = time_idx + post_time_idx = time_idx+1 + self.time_arr[req_type][robot_idx][post_time_idx] + except IndexError: + prev_time_idx = time_idx-1 + post_time_idx = time_idx + + else: + prev_time_idx = time_idx-1 + post_time_idx = time_idx + + t0= self.time_arr[req_type][robot_idx][prev_time_idx] + t1 = self.time_arr[req_type][robot_idx][post_time_idx] + if t1-t0 > 1: # time interval is too big for linear iterpolation + g_idx = find_nearest_time_idx(self.time_arr['groundtruth'][robot_idx],req_time) + gt_x = self.groundtruth_data[robot_idx][g_idx]['x_pos'] + gt_y = self.groundtruth_data[robot_idx][g_idx]['y_pos'] + gt_orientation = self.groundtruth_data[robot_idx][g_idx]['orientation'] + else: + #groudtruth = {'time':time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':orientation} + x0 = self.dataset_matched_gt_data[req_type][robot_idx][prev_time_idx]['x_pos'] + x1 = self.dataset_matched_gt_data[req_type][robot_idx][post_time_idx]['x_pos'] + gt_x = linear_interpolation([t0, x0], [t1, x1], req_time) + y0 = self.dataset_matched_gt_data[req_type][robot_idx][prev_time_idx]['y_pos'] + y1 = self.dataset_matched_gt_data[req_type][robot_idx][post_time_idx]['y_pos'] + gt_y = linear_interpolation([t0, y0], [t1, y1], req_time) + o0 = self.dataset_matched_gt_data[req_type][robot_idx][prev_time_idx]['orientation'] + o1 = self.dataset_matched_gt_data[req_type][robot_idx][post_time_idx]['orientation'] + gt_orientation = linear_interpolation([t0, o0], [t1, o1], req_time) + + message['groundtruth'] = {'time':req_time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':gt_orientation} + + if req_type == 'odometry': + v0 = self.dataset_data[req_type][robot_idx][prev_time_idx]['velocity'] + v1 = self.dataset_data[req_type][robot_idx][post_time_idx]['velocity'] + velocity = linear_interpolation([t0, v0], [t1, v1], req_time) + o0 = self.dataset_data[req_type][robot_idx][prev_time_idx]['orientation'] + o1 = self.dataset_data[req_type][robot_idx][post_time_idx]['orientation'] + orientation = linear_interpolation([t0, o0], [t1, o1], req_time) + message['data'] = {'time':req_time, 'velocity':velocity, 'orientation':orientation} + else: #measurement: meas_info = {'time':time, 'subject_ID':subject_ID, 'measurment_range': measurment_range, 'bearing':bearing} + pass + + subject_ID = self.dataset_data[req_type][robot_idx][time_idx]['subject_ID'] + #bearing = self.dataset_data[req_type][robot_idx][time_idx]['bearing'] + #print(self.dataset_data[req_type][robot_idx][time_idx]) + var_dis = meas_input_var[0] + var_phi = meas_input_var[1] + if subject_ID > 5: # landmark + [lx, ly] = self.landmark_map[subject_ID] + else: + if subject_ID not in self.dataset_labels: + obj_index = (robot_idx+2)%(len(self.dataset_labels)) + subject_ID = self.dataset_labels[obj_index] + + obj_index = self.dataset_labels.index(subject_ID) + matched_gt_data = self.find_corresponding_ground_truth(obj_index, req_time) + lx = matched_gt_data['x_pos'] + ly = matched_gt_data['y_pos'] + + measurment_range = sqrt((lx-gt_x)*(lx-gt_x)+(ly-gt_y)*(ly-gt_y)) + float(np.random.normal(0, sqrt(var_dis), 1)) + # bearing not matching with closest dataline.... + bearing = (atan2((ly-gt_y), (lx-gt_x))-gt_orientation)%pi + float(np.random.normal(0, sqrt(var_phi), 1)) + if abs(bearing-pi) < abs(bearing): + bearing = bearing-pi + + message['data'] = {'time':req_time, 'subject_ID':subject_ID, 'measurment_range': measurment_range, 'bearing':bearing} + + return message + + + def load_most_recent_dataline(self, req, time_idx): + message = req.get_message() + req_type = req.get_type() + robot_idx = message['robot_index'] + req_time = message['time'] + message['data'] = self.dataset_data[req_type][robot_idx][time_idx] + message['groundtruth'] = self.dataset_matched_gt_data[req_type][robot_idx][time_idx] + return message + + def create_additional_dataline(self, req, time_idx, meas_input_var): + message = self.create_synthetic_dataline(req, time_idx, meas_input_var) + return message + + + def get_dataline_at_specific_time(self, req, time_diff_limit = 0.1): + message = req.get_message() + req_type = req.get_type() + meas_input_var = [0.01, 0.01] + odo_input_var = [0.01, 0.01] + robot_idx = message['robot_index'] + req_time = message['time'] + if req_time < self.dataset_data[req_type][robot_idx][0]['time']: + g_idx = find_nearest_time_idx(self.time_arr['groundtruth'][robot_idx],req_time) + gt_x = self.groundtruth_data[robot_idx][g_idx]['x_pos'] + gt_y = self.groundtruth_data[robot_idx][g_idx]['y_pos'] + gt_orientation = self.groundtruth_data[robot_idx][g_idx]['orientation'] + message['groundtruth'] = {'time':req_time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':gt_orientation} + if req_type == 'odometry': + message['data'] = {'time':req_time, 'velocity':0 , 'orientation':gt_orientation + float(np.random.normal(0, sqrt(odo_input_var[1]), 1) )} + else: + subject_ID = 6 + [lx, ly] = self.landmark_map[subject_ID] + measurment_range = sqrt((lx-gt_x)*(lx-gt_x)+(ly-gt_y)*(ly-gt_y)) + float(np.random.normal(0, sqrt(meas_input_var[0]), 1)) + bearing = (atan2((ly-gt_y), (lx-gt_x))-gt_orientation)%pi + float(np.random.normal(0, sqrt(meas_input_var[1]), 1)) + if abs(bearing-pi) < abs(bearing): + bearing = bearing-pi + + message['data'] = {'time':req_time, 'subject_ID':subject_ID, 'measurment_range': measurment_range, 'bearing':bearing} + valid_dataline = True + time_idx = 0 + return valid_dataline, message, req_type, robot_idx, time_idx + + + + time_idx = find_nearest_time_idx(self.time_arr[req_type][robot_idx], req_time) + respond_time = self.time_arr[req_type][robot_idx][time_idx] + + message['data'] = self.dataset_data[req_type][robot_idx][time_idx] + message['groundtruth'] = self.dataset_matched_gt_data[req_type][robot_idx][time_idx] + + if abs(respond_time-req_time) > time_diff_limit and self.adding_actifical_dataline: + message = self.create_additional_dataline(req, time_idx, meas_input_var) + + if req_time > self.end_time: + valid_dataline = False + else: + valid_dataline= True + return valid_dataline, message, req_type, robot_idx, time_idx + + + def respond(self, req, current_time, need_specific_time = False): + message = req.get_message() + self.trackers_sync(current_time) + if need_specific_time: + valid_respond, message, req_type, robot_idx, time_idx = self.get_dataline_at_specific_time(req) + current_time = message['time'] + else: + valid_respond, message, req_type, robot_idx, time_idx = self.get_matched_dataline(req) + current_time = self.dataset_data[req_type][robot_idx][time_idx]['time'] + if valid_respond: + #load data + message['time'] = current_time + self.trackers_update(req_type, robot_idx) + req.set_message(message) + + return valid_respond, current_time, req + diff --git a/CoLo-AT/dataset_manager/existing_dataset.py~ b/CoLo-AT/dataset_manager/existing_dataset.py~ new file mode 100755 index 0000000000000000000000000000000000000000..0e266a607f21cc45a8b5aa2a7c150c40f3c810d1 --- /dev/null +++ b/CoLo-AT/dataset_manager/existing_dataset.py~ @@ -0,0 +1,740 @@ +# -*- coding: utf-8 -*- + +import numpy as np +from numpy import random +import sys +<<<<<<< HEAD +from math import atan2, sqrt, pi +======= +import random +from math import cos +from math import sin +import math +>>>>>>> caa3398ca773a598bfbaedd0e32f33e92280c52c +''' +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/requests') +import request_response +''' + +def find_nearest_time_idx(l, value): + if len(l) != 0: + array = np.asarray(l) + idx = (np.abs(array-value)).argmin() + return idx + else: + return None + + + +def find_next_time_idx(array, start_idx, value): + i = start_idx + try: + array[i] + except IndexError: + i = -1 + return i + while array[i] < value: + i = i+1 + try: + array[i] + except IndexError: + i = -1 + break + return i + +<<<<<<< HEAD +def linear_interpolation(end0, end1, x): + [x0, y0] = end0 + [x1, y1] = end1 + if x1 == x0: + y = y0 + else: + y = y0+(x-x0)*(y1-y0)/(x1-x0) + return y + +class Dataset: + + def __init__(self, dataset_name): +======= +def get_pos_within_rad(rad): + rad = abs(rad) + while True: + x = random.uniform(-1*rad, rad) + y = random.uniform(-1*rad, rad) + if (((x**2) + (y**2))**.5 > rad): + continue + else: + return x, y + +def sanitize_angle(angle): + if -3.1415 > angle: + temp = abs(angle) - 3.1415 + angle = 3.1415 - temp + if angle > 3.1415: + temp = abs(angle) - 3.1415 + angle = -3.1415 + temp + return angle + +class Dataset: + + def __init__(self, dataset_name, dataset_path, dataset_labels): + random.seed(a=100) #start a seed for the random data generator +>>>>>>> caa3398ca773a598bfbaedd0e32f33e92280c52c + self.name = dataset_name + + def create_landmark_map(self): + ### Build landmark map ### + self.landmark_map = {} + path=self.dataset_path + "Landmark_Groundtruth.dat" + landmark_file=open(path,'r+'); + s = landmark_file.readline() + while(s): + if(s[0]!='#'): + landmark_location = [float(s.split( )[1]), float(s.split( )[2])] + self.landmark_map.update({int(s.split( )[0]): landmark_location}) + s = landmark_file.readline() + landmark_file.close() + #print "lm ", self.landmark_map, " lm" + return self.landmark_map + + + def load_MRCLAMDatasets(self, dataset_path, dataset_labels, duration, adding_actifical_dataline = True, delay_start = 10, synthetic = True): + print ('******** Initialization Started ********') + self.synthetic = synthetic + print ('add synthetic data: ', self.synthetic) + self.dataset_path = dataset_path + self.dataset_labels = dataset_labels + self.adding_actifical_dataline = adding_actifical_dataline + self.create_landmark_map() + + self.num_robots = len(self.dataset_labels) + self.measurement_data = [[] for i in range(self.num_robots)] + self.odometry_data = [[] for i in range(self.num_robots)] + self.gt_data_odo = [[] for i in range(self.num_robots)] + self.gt_data_meas = [[] for i in range(self.num_robots)] + self.groundtruth_data = [[] for i in range(self.num_robots)] + + #self.groundtruth_time= [[] for i in range(self.num_robots)] + self.time_arr = {'odometry': [[] for i in range(self.num_robots)], 'measurement': [[] for i in range(self.num_robots)], 'groundtruth': [[] for i in range(self.num_robots)]} + + #initialization for MRCLAMDatasets: put files into dictionaries: + self.duration = duration # some more time to avoid index error + self.start_time_arr = [] + #finding the starting time: + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + groundtruth_path = self.dataset_path+"Robot"+robot_num+"_Groundtruth.dat" + with open(groundtruth_path,'r+') as groundtruth_file: + for line in groundtruth_file: + if str(line.split()[0]) != '#': + time = float(line.split()[0]) + self.start_time_arr.append(time) + break + self.start_time = max(self.start_time_arr) + delay_start + self.end_time = self.start_time + self.duration + print('Staring time: ', self.start_time) + #finding starting states: + self.starting_states = {} + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + groundtruth_path = self.dataset_path+"Robot"+robot_num+"_Groundtruth.dat" + with open(groundtruth_path,'r+') as groundtruth_file: + for line in groundtruth_file: + if line[0] != '#' and float(line.split()[0]) >= self.start_time: + time = float(line.split()[0]) + x_pos = float(line.split()[1]) + y_pos = float(line.split()[2]) + orientation = float(line.split()[3]) + self.starting_states[label] = [time, x_pos, y_pos, orientation] + break + print('Staring states: ') + print(self.starting_states) + + + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + + groundtruth_path = self.dataset_path+"Robot"+robot_num+"_Groundtruth.dat" + with open(groundtruth_path,'r+') as groundtruth_file: + for line in groundtruth_file: + if line[0] != '#' and (self.end_time+2 >= float(line.split()[0]) >= self.start_time): + time = float(line.split()[0]) + x_pos = float(line.split()[1]) + y_pos = float(line.split()[2]) + orientation = float(line.split()[3]) + groundtruth_info = {'time':time, 'x_pos':x_pos, 'y_pos':y_pos, 'orientation':orientation} + self.groundtruth_data[i].append(groundtruth_info) + self.time_arr['groundtruth'][i].append(time) + + + meas_path = self.dataset_path+"Robot"+robot_num+"_Measurement_x.dat" + with open(meas_path,'r+') as measure_file: + for line in measure_file: + if line[0] != '#' and (self.end_time+2 >= float(line.split()[0]) >= self.start_time): + time = float(line.split()[0]) + subject_ID = int(line.split()[1]) + measurment_range = float(line.split()[2]) + bearing = float(line.split()[3]) + meas_info = {'time':time, 'subject_ID':subject_ID, 'measurment_range': measurment_range, 'bearing':bearing} + + g_idx = find_nearest_time_idx(self.time_arr['groundtruth'][i],time) + gt_x = self.groundtruth_data[i][g_idx]['x_pos'] + gt_y = self.groundtruth_data[i][g_idx]['y_pos'] + orientation = self.groundtruth_data[i][g_idx]['orientation'] + matched_gt_info = {'time':time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':orientation} + + self.measurement_data[i].append(meas_info) + self.gt_data_meas[i].append(matched_gt_info) + self.time_arr['measurement'][i].append(time) + + + odo_path = self.dataset_path+"Robot"+robot_num+"_Odometry.dat" + with open(odo_path,'r+') as odometry_file: + for line in odometry_file: + if line[0] != '#' and (self.end_time+2 >= float(line.split()[0]) >= self.start_time): + time = float(line.split()[0]) + g_idx = find_nearest_time_idx(self.time_arr['groundtruth'][i],time) + velocity = float(line.split()[1]) + orientation = self.groundtruth_data[i][g_idx]['orientation'] + #print "orientation ", orientation + odo_info = {'time':time, 'velocity':velocity, 'orientation':orientation} + + gt_x = self.groundtruth_data[i][g_idx]['x_pos'] + gt_y = self.groundtruth_data[i][g_idx]['y_pos'] + matched_gt_info = {'time':time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':orientation} + + self.odometry_data[i].append(odo_info) + self.gt_data_odo[i].append(matched_gt_info) + self.time_arr['odometry'][i].append(time) + + self.data_trackers = {'odometry': np.ones((self.num_robots,), dtype=np.int), 'measurement':np.ones((self.num_robots,), dtype=np.int)} + # tracker for each robot for both each type of data to keep track of their location in the dataset + self.odo_data_trackers = np.ones((self.num_robots,), dtype=np.int) + self.dataset_data = {'odometry': self.odometry_data, 'measurement': self.measurement_data, 'groundtruth': self.groundtruth_data} + self.dataset_matched_gt_data = {'odometry': self.gt_data_odo, 'measurement': self.gt_data_meas} + print ('******** Initialization Completed ********') + + return self.start_time, self.starting_states, self.dataset_data, self.time_arr + + def dataset_reset(self): + self.data_trackers = {'odometry': np.ones((self.num_robots,), dtype=np.int),'measurement':np.ones((self.num_robots,), dtype=np.int), 'groundtruth':np.ones((self.num_robots,), dtype=np.int)} + + def get_start_time(self): + return self.start_time + + def get_starting_states(self): + return self.starting_states + + def get_duration(self): + return self.duration + + def get_landmark_map(self): + return self.landmark_map + + def get_start_moving_times(self): + start_moving_times = [] + time_idx = 0 + for i, label in enumerate(self.dataset_labels): + start_moving_times.append(self.dataset_data['odometry'][i][time_idx]['time']) + return start_moving_times + + def get_time_arr(self, data_catagory): + # retunr an array of time shows time for next dataline for each robot given data catagory + time_arr =[] + for i, label in enumerate(self.dataset_labels): + time_idx = self.data_trackers[data_catagory][i] + if time_idx == -1: + time_arr.append(self.start_time + self.duration + 10) # so that it will be out of range and not be selected + else: + time_arr.append(self.dataset_data[data_catagory][i][time_idx]['time']) + return time_arr + + def find_corresponding_ground_truth(self, robot_idx, time): + data_catagory = 'groundtruth' + gt_idx = find_nearest_time_idx(self.time_arr[data_catagory][robot_idx], time) + try: + self.dataset_data[data_catagory][robot_idx][gt_idx] + except IndexError: + print(robot_idx, 'Index ERR: ', gt_idx) + matched_gt_data = self.dataset_data[data_catagory][robot_idx][gt_idx] + return matched_gt_data + + def trackers_sync(self, current_time): +<<<<<<< HEAD + #print('sync current time: ', current_time ) + for catagory in ['odometry', 'measurement']: +======= + for catagory in ['odometry', 'measurement', 'groundtruth']: +>>>>>>> caa3398ca773a598bfbaedd0e32f33e92280c52c + for robot_idx in range(self.num_robots): + start_idx = self.data_trackers[catagory][robot_idx] + if start_idx != -1: + time_idx = find_next_time_idx(self.time_arr[catagory][robot_idx], start_idx, current_time) + self.data_trackers[catagory][robot_idx] = time_idx + + def trackers_update(self,req_type,robot_idx): + # update corrsponding trackers + self.data_trackers[req_type][robot_idx]+=1 + #self.data_trackers['groundtruth'][robot_idx]+=1 + + def get_matched_dataline(self, req): + message = req.get_message() + + if message['robot_index'] == None: + if req.get_type() == None: + odo_time_arr = self.get_time_arr('odometry') + meas_time_arr = self.get_time_arr('measurement') + if min(odo_time_arr)>min(meas_time_arr): + robot_idx = np.argmin(meas_time_arr) + req_type = 'measurement' + req.set_type(req_type) + else: + robot_idx = np.argmin(odo_time_arr) + req_type = 'odometry' + req.set_type(req_type) + else: + req_type = req.get_type() + time_arr = self.get_time_arr(req_type) + robot_idx = np.argmin(time_arr) + + message['robot_index'] = robot_idx + + else: + robot_idx = message['robot_index'] + if req.get_type() == None: + time_idx = self.data_trackers['odometry'][robot_idx] + if self.dataset_data['odometry'][robot_idx][time_idx]['time']>self.dataset_data['measurement'][robot_idx][time_idx]['time']: + req_type = 'measurement' + req.set_type(req_type) + else: + req_type = 'odometry' + req.set_type(req_type) + else: + req_type = req.get_type() + + time_idx = self.data_trackers[req_type][robot_idx] + if self.dataset_data[req_type][robot_idx][time_idx]['time'] > self.end_time: + valid_dataline = False + else: + valid_dataline= True + message['data'] = self.dataset_data[req_type][robot_idx][time_idx] + message['groundtruth'] = self.dataset_matched_gt_data[req_type][robot_idx][time_idx] + return valid_dataline, message, req_type, robot_idx, time_idx + + + def create_synthetic_dataline(self, req, time_idx, meas_input_var): + message = req.get_message() + req_type = req.get_type() + robot_idx = message['robot_index'] + req_time = message['time'] + if req_time > self.time_arr[req_type][robot_idx][time_idx]: + try: + prev_time_idx = time_idx + post_time_idx = time_idx+1 + self.time_arr[req_type][robot_idx][post_time_idx] + except IndexError: + prev_time_idx = time_idx-1 + post_time_idx = time_idx + + else: + prev_time_idx = time_idx-1 + post_time_idx = time_idx + + t0= self.time_arr[req_type][robot_idx][prev_time_idx] + t1 = self.time_arr[req_type][robot_idx][post_time_idx] + if t1-t0 > 1: # time interval is too big for linear iterpolation + g_idx = find_nearest_time_idx(self.time_arr['groundtruth'][robot_idx],req_time) + gt_x = self.groundtruth_data[robot_idx][g_idx]['x_pos'] + gt_y = self.groundtruth_data[robot_idx][g_idx]['y_pos'] + gt_orientation = self.groundtruth_data[robot_idx][g_idx]['orientation'] + else: + #groudtruth = {'time':time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':orientation} + x0 = self.dataset_matched_gt_data[req_type][robot_idx][prev_time_idx]['x_pos'] + x1 = self.dataset_matched_gt_data[req_type][robot_idx][post_time_idx]['x_pos'] + gt_x = linear_interpolation([t0, x0], [t1, x1], req_time) + y0 = self.dataset_matched_gt_data[req_type][robot_idx][prev_time_idx]['y_pos'] + y1 = self.dataset_matched_gt_data[req_type][robot_idx][post_time_idx]['y_pos'] + gt_y = linear_interpolation([t0, y0], [t1, y1], req_time) + o0 = self.dataset_matched_gt_data[req_type][robot_idx][prev_time_idx]['orientation'] + o1 = self.dataset_matched_gt_data[req_type][robot_idx][post_time_idx]['orientation'] + gt_orientation = linear_interpolation([t0, o0], [t1, o1], req_time) + + message['groundtruth'] = {'time':req_time, 'x_pos':gt_x, 'y_pos':gt_y, 'orientation':gt_orientation} + + if req_type == 'odometry': + v0 = self.dataset_data[req_type][robot_idx][prev_time_idx]['velocity'] + v1 = self.dataset_data[req_type][robot_idx][post_time_idx]['velocity'] + velocity = linear_interpolation([t0, v0], [t1, v1], req_time) + o0 = self.dataset_data[req_type][robot_idx][prev_time_idx]['orientation'] + o1 = self.dataset_data[req_type][robot_idx][post_time_idx]['orientation'] + orientation = linear_interpolation([t0, o0], [t1, o1], req_time) + message['data'] = {'time':req_time, 'velocity':velocity, 'orientation':orientation} + else: #measurement: meas_info = {'time':time, 'subject_ID':subject_ID, 'measurment_range': measurment_range, 'bearing':bearing} + pass + + subject_ID = self.dataset_data[req_type][robot_idx][time_idx]['subject_ID'] + #bearing = self.dataset_data[req_type][robot_idx][time_idx]['bearing'] + #print(self.dataset_data[req_type][robot_idx][time_idx]) + var_dis = meas_input_var[0] + var_phi = meas_input_var[1] + if subject_ID > 5: # landmark + [lx, ly] = self.landmark_map[subject_ID] + else: + if subject_ID not in self.dataset_labels: + obj_index = (robot_idx+2)%(len(self.dataset_labels)) + subject_ID = self.dataset_labels[obj_index] + + obj_index = self.dataset_labels.index(subject_ID) + matched_gt_data = self.find_correspoind_ground_truth(obj_index, req_time) + lx = matched_gt_data['x_pos'] + ly = matched_gt_data['y_pos'] + + measurment_range = sqrt((lx-gt_x)*(lx-gt_x)+(ly-gt_y)*(ly-gt_y)) + random.normal(0, sqrt(var_dis)) + # bearing not matching with closest dataline.... + bearing = (atan2((ly-gt_y), (lx-gt_x))-gt_orientation)%pi + random.normal(0, sqrt(var_dis)) + if abs(bearing-pi) < abs(bearing): + bearing = bearing-pi + + message['data'] = {'time':req_time, 'subject_ID':subject_ID, 'measurment_range': measurment_range, 'bearing':bearing} + + return message + + + def load_most_recent_dataline(self, req, time_idx): + message = req.get_message() + req_type = req.get_type() + robot_idx = message['robot_index'] + req_time = message['time'] + message['data'] = self.dataset_data[req_type][robot_idx][time_idx] + message['groundtruth'] = self.dataset_matched_gt_data[req_type][robot_idx][time_idx] + return message + + def create_additional_dataline(self, req, time_idx, meas_input_var): + if self.synthetic: + message = self.create_synthetic_dataline(req, time_idx, meas_input_var) + else: + message = self.load_most_recent_dataline(req, time_idx) + return message + + + def get_dataline_at_specific_time(self, req, time_diff_limit = 0.1): + message = req.get_message() + req_type = req.get_type() + robot_idx = message['robot_index'] + req_time = message['time'] + time_idx = find_nearest_time_idx(self.time_arr[req_type][robot_idx], req_time) + respond_time = self.time_arr[req_type][robot_idx][time_idx] + + message['data'] = self.dataset_data[req_type][robot_idx][time_idx] + message['groundtruth'] = self.dataset_matched_gt_data[req_type][robot_idx][time_idx] + + if abs(respond_time-req_time) > time_diff_limit and self.adding_actifical_dataline: + meas_input_var = [0.1, 0.1] + message = self.create_additional_dataline(req, time_idx, meas_input_var) + + if req_time > self.end_time: + valid_dataline = False + else: + valid_dataline= True + return valid_dataline, message, req_type, robot_idx, time_idx + + + def respond(self, req, current_time, need_specific_time = False): + message = req.get_message() + self.trackers_sync(current_time) +<<<<<<< HEAD + if need_specific_time: + valid_respond, message, req_type, robot_idx, time_idx = self.get_dataline_at_specific_time(req) + current_time = message['time'] + else: + valid_respond, message, req_type, robot_idx, time_idx = self.get_matched_dataline(req) + current_time = self.dataset_data[req_type][robot_idx][time_idx]['time'] + + if valid_respond: + #load data + message['time'] = current_time + self.trackers_update(req_type, robot_idx) +======= + valid_respond, req_type, robot_idx, time_idx = self.get_matched_dataline(req) + if valid_respond: + #load data + current_time = self.dataset_data[req_type][robot_idx][time_idx]['time'] + message['time'] = self.dataset_data[req_type][robot_idx][time_idx]['time'] + message['data'] = self.dataset_data[req_type][robot_idx][time_idx] + #load corresponding groundtruth + time = self.dataset_data[req_type][robot_idx][time_idx]['time'] + matched_gt_data = self.find_corresponding_ground_truth(robot_idx, time) + message['groundtruth'] = matched_gt_data + self.trackers_update(req_type,robot_idx) +>>>>>>> caa3398ca773a598bfbaedd0e32f33e92280c52c + req.set_message(message) + + return valid_respond, current_time, req + + + def generate_random_landmark_map(self): + """num_landmarks = random.randint(8, 20) + + self.landmark_map = {} + for i in range(num_landmarks): + x_coord, y_coord = get_pos_within_rad(5) + self.landmark_map[i+1] = [x_coord, y_coord] + """ + self.landmark_map = {1: [-5.0, 0.0]} + print "map", self.landmark_map + return self.landmark_map + + # take an x and y from the robot and the first two params, + # and a list of coords for the landmark as the second param + # return the distance between them + def landmark_distance(self, old_x, old_y, new_coords): + new_x, new_y = new_coords[0], new_coords[1] + return math.sqrt((old_x - new_x)**2 + (old_y - new_y)**2) + + def getBearing(self, x, y): + return math.atan2(y, x) + + def getLineAngle(self, my_x, my_y, lm_x, lm_y, orientation): + theta = self.getBearing(my_x - lm_x, my_y - lm_y) + #need to account that you don't yet know which direction the angle is, + #eg robot to lm or lmm to robot + if (theta > 0) and (my_y > lm_y): + theta = -3.1415 + theta + elif (theta < 0) and (my_y < lm_y): + print "correcting" + theta = 3.1415 + theta + + angle_from_robot = sanitize_angle(theta - orientation) + + return angle_from_robot + + def get_closest_landmark(self, x, y): + closest_distance = 10 + closest_lm = 0 + for key, coords in self.landmark_map.items(): + distance = self.landmark_distance(x, y, coords) + if (distance < closest_distance): + closest_distance = distance + closest_lm = key + + #print closest_lm, closest_distance, x, y, coords[0], coords[1] + return closest_lm, closest_distance + + def get_second_closest_landmark(self, x, y): + closest_lm, closest_distance = self.get_closest_landmark(x, y) + second_closest_distance = 10 + second_closest_lm = 0 + for key, coords in self.landmark_map.items(): + distance = self.landmark_distance(x, y, coords) + if (distance < second_closest_distance and not(key == closest_lm)): + second_closest_distance = distance + second_closest_lm = key + + return second_closest_lm, second_closest_distance + + def initialization_Random_Dataset(self, duration): + print ('******** Random Initialization Started ********') + self.generate_random_landmark_map()# + + self.num_robots = len(self.dataset_labels) + self.measurement_data = [[] for i in range(self.num_robots)] + self.odometry_data = [[] for i in range(self.num_robots)] + self.groundtruth_data = [[] for i in range(self.num_robots)] + self.time_arr = {'odometry': [[] for i in range(self.num_robots)], 'measurement': [[] for i in range(self.num_robots)], 'groundtruth': [[] for i in range(self.num_robots)]} + + self.duration = duration # some more time to avoid index error + self.start_time_arr = [] + + + #finding the starting time: + self.start_time = (1e9) + self.end_time = self.start_time + self.duration + print('Staring time: ', self.start_time) + + #finding starting states: + self.starting_states = {} + robot_movement_dict = {} + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + + #generate the position starts here + x_pos, y_pos = get_pos_within_rad(5) + orientation = 0#random.uniform(-3.1415, 3.1415) + + self.starting_states[label] = [self.start_time, x_pos, y_pos, orientation] + print('Starting states: ') + print(self.starting_states) + + # + for i, label in enumerate(self.dataset_labels): + robot_num = str(label) + + #give the robot a random starting position + velocity = random.uniform(0.0, 1.0) + inits = self.starting_states[label] + x_pos = inits[1] + y_pos = inits[2] + orientation = inits[3] + this_robot = Robot_Movement_Generator(x_pos, y_pos, orientation, robot_num) + robot_movement_dict[robot_num] = this_robot + + + measurement_file = open("random" + robot_num + "_measurement_x.dat", 'w') + + time = self.start_time + ticks = 0 + while (time < self.end_time):# this is a fixed number of iterations right now + #time moves at a constant step right now + time_delta = 0.0195 + time = time + time_delta + ticks = ticks + 1 + + velocity, orientation, x_pos, y_pos = this_robot.run_tick(time, time_delta) + + #these are biased values that are approximated by the robot from the groundtruth + odo_info = {'time':time, 'velocity':velocity, 'orientation':orientation} + self.odometry_data[i].append(odo_info) + self.time_arr['odometry'][i].append(time)# I think there may be a mistake here. This value isn't meant to be orientation, it should be angular velocity + + groundtruth_info = {'time':time, 'x_pos':x_pos, 'y_pos':y_pos, 'orientation':orientation} # groundtruth + self.groundtruth_data[i].append(groundtruth_info) + self.time_arr['groundtruth'][i].append(time) + + #choose a random landmark and calculate the distance from it + if ticks % 23 == 0: + closest_landmark, closest_distance = self.get_closest_landmark(x_pos, y_pos) + if closest_distance < 5: + measurement_range = closest_distance #+ random.gauss(0.0, .001) #TODO: find a legitimate way to choose this sigma + bearing = self.getLineAngle(x_pos, y_pos, self.landmark_map[closest_landmark][0], self.landmark_map[closest_landmark][1], orientation) + meas_info = {'time':time, 'subject_ID':closest_landmark, 'measurment_range': measurement_range, 'bearing':bearing} + print meas_info + print x_pos, y_pos, self.landmark_map[closest_landmark][0], self.landmark_map[closest_landmark][1], bearing + measurement_file.write(str(time) + "\t" + str(closest_landmark) + "\t" + str(measurement_range) + "\t" + str(bearing) + "\n") + self.measurement_data[i].append(meas_info) + self.time_arr['measurement'][i].append(time) + second_closest_landmark, second_closest_distance = self.get_second_closest_landmark(x_pos, y_pos) + if second_closest_distance < 3: + measurement_range = second_closest_distance #+ random.gauss(0.0, .001) #TODO: find a legitimate way to choose this sigma + bearing = self.getLineAngle(x_pos, y_pos, self.landmark_map[second_closest_landmark][0], self.landmark_map[second_closest_landmark][1], orientation) + print meas_info + meas_info = {'time':time, 'subject_ID':second_closest_landmark, 'measurment_range': measurement_range, 'bearing':bearing} + measurement_file.write(str(time) + "\t" + str(second_closest_landmark) + "\t" + str(measurement_range) + "\t" + str(bearing) + "\n") + self.measurement_data[i].append(meas_info) + self.time_arr['measurement'][i].append(time) + + + measurement_file.close() + this_robot.close_file() + self.data_trackers = {'odometry': np.ones((self.num_robots,), dtype=np.int),'measurement':np.ones((self.num_robots,), dtype=np.int), 'groundtruth':np.ones((self.num_robots,), dtype=np.int)} + # tracker for each robot for both each type of data to keep track of their location in the dataset + self.odo_data_trackers = np.ones((self.num_robots,), dtype=np.int) + self.dataset_data = {'odometry': self.odometry_data, 'measurement': self.measurement_data, 'groundtruth': self.groundtruth_data} + print ('******** Initialization Completed ********') + + return self.start_time, self.starting_states, self.dataset_data, self.time_arr + + + +class Robot_Movement_Generator: + def __init__(self, x, y, orientation, num): + self.robot_num = num + self.velocity = .04#random.uniform(0.0, .1) + self.max_velocity = 0#.1 + self.orientation = orientation + self.angular_velocity = 0#.15 + self.max_angular_velocity = 0#.15 + self.x = x + self.y = y + self.sensor_noise = 0#.25 + self.odometry_file = open("random" + self.robot_num + "_odometry.dat", 'w') + self.groundtruth_file = open("random"+ self.robot_num + "_groundtruth.dat", 'w') + + def close_file(self): + self.odometry_file.close() + self.groundtruth_file.close() + + def update_xy(self, time_delta): + #check if the update is in bounds and if so do it + temp_x = self.x + cos(self.orientation) * self.velocity * (time_delta) + temp_y = self.y + sin(self.orientation) * self.velocity * (time_delta) + if ((temp_x**2 + self.y**2)**.5 < 5) and ((self.y**2 + temp_y**2)**.5 < 5): + self.x = temp_x + self.y = temp_y + else: + self.velocity = 0 + return self.x, self.y + + + def update_angular_velocity(self): + self.angular_velocity = self.max_angular_velocity + return self.angular_velocity + + def update_velocity(self): + self.velocity = self.max_velocity + return self.velocity + + def update_orientation(self, time_delta): + self.orientation += self.update_angular_velocity() * time_delta + self.orientation = sanitize_angle(self.orientation) + return self.orientation + + + def run_tick(self, time, time_delta): + #add biasing here, after we've already computed an accurate groundtruth + v = self.update_velocity()+ random.gauss(0.0, self.sensor_noise) + o = self.update_orientation(time_delta)+ random.gauss(0.0, self.sensor_noise) + x, y = self.update_xy(time_delta) + + + self.odometry_file.write(str(time) + "\t" + str(v) + "\t\t" + str(self.angular_velocity) + "\n") + + self.groundtruth_file.write(str(time) + "\t" + str(self.x) + "\t" + str(self.y) + "\t" + str(self.orientation) + "\n") + return (v, o, x, y) + + + + + +# test script +""" +rgf = open('random1_groundtruth.dat', 'r') +rof = open('random1_odometry.dat', 'r') +end_time=1000000100 +start_time = 1000000000 + +times = [] +x_poss = [] +y_poss = [] +orientations = [] + +times2 = [] +velocities = [] +orientations2 = [] +for line in rgf: + if line[0] != '#' and (end_time+2 >= float(line.split()[0]) >= start_time): + times.append(float(line.split()[0])) + x_poss.append(float(line.split()[1])) + y_poss.append(float(line.split()[2])) + orientations.append(float(line.split()[3])) + +for line in rof: + if line[0] != '#' and (end_time+2 >= float(line.split()[0]) >= start_time): + times2.append(float(line.split()[0])) + velocities.append(float(line.split()[1])) + orientations2.append(float(line.split()[2])) + +for i in range(len(rof)): + + +<<<<<<< HEAD + + + + + + + + +======= +print orientations, orientations2""" +>>>>>>> caa3398ca773a598bfbaedd0e32f33e92280c52c diff --git a/CoLo-AT/dataset_manager/testing_dataset.py b/CoLo-AT/dataset_manager/testing_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac374de41bd8955af7959bf7196fe74fbfbac01 --- /dev/null +++ b/CoLo-AT/dataset_manager/testing_dataset.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Created on Wed Feb 14 16:13:43 2018 + +@author: william +""" +from existing_dataset import Dataset + +import sys +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/requests') +import request_response + +"""Testing branch for dataset related classes and functions""" + +# testing for DataSet class + +dataset_path = '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/datasets/MRCLAM_Dataset1/' +dataset_labels = [1,2,3,4,5] +testing_dataset = Dataset('testing', dataset_path, dataset_labels) +lm_map = testing_dataset.create_landmark_map() +print(lm_map) +st,_,d,t_arr = testing_dataset.initialization_MRCLAMDatasets(20) + +''' +for i, label in enumerate(dataset_labels): + print(d['odometry'][i][-1]['time']) + print(d['measurement'][i][-1]['time']) + print(d['groundtruth'][i][-1]['time']) +''' +''' +prop_req= request_response.Prop_req_resp(None, None) +print(prop_req.get_message()) +print(prop_req.get_type()) +''' + + +g_req= request_response.Request_response(None, None) +print(g_req.get_message()) +print(g_req.get_type()) +''' +c_t = st +prev_time = 0 +while True: + print('***************** ****************') + g_req= request_response.Request_response(None, None) + prev_time = c_t + valid, c_t, rsp = testing_dataset.respond(g_req, c_t) + print('current time: ', c_t) + + g_req= request_response.Meas_req_resp(None, None) + prev_time = c_t + valid, c_t, rsp = testing_dataset.respond(g_req, c_t) + print('current time-M: ', c_t) + + if c_t < prev_time: + print('Time inconsistant!') + break + + if valid == False : + print('Done!') + break + +''' \ No newline at end of file diff --git a/CoLo-AT/localization_algos/.DS_Store b/CoLo-AT/localization_algos/.DS_Store new file mode 100755 index 0000000000000000000000000000000000000000..16a5a9ef2686ad5e79514819d39d7bf82f5ec619 Binary files /dev/null and b/CoLo-AT/localization_algos/.DS_Store differ diff --git a/CoLo-AT/localization_algos/__init__.py b/CoLo-AT/localization_algos/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/CoLo-AT/localization_algos/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/CoLo-AT/localization_algos/__pycache__/centralized_ekf.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/centralized_ekf.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81a915cb00b1fdfc5005ee5199ace0d68e5e028d Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/centralized_ekf.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/centralized_ekf.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/centralized_ekf.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d947265cbc05ed085fa91c83a8c81e78989f7e8b Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/centralized_ekf.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/centralized_ekf2.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/centralized_ekf2.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d91470af95ab53073a2de77c321310fd870a95b Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/centralized_ekf2.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/centralized_ekf2.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/centralized_ekf2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fecab09170779466d62b94741224829306bf89ef Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/centralized_ekf2.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_gs_bound.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_gs_bound.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70305042b116cf32fd00875c639e615cae841b7b Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_gs_bound.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_gs_bound.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_gs_bound.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2368b8f11cbbd40638748256f016fb4fe3099cf Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_gs_bound.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69150a44fca989b0b0e3843b3ddad2a91f1f44f4 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci2.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci2.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bb4b98c836ce1ebbd895f1c63f76b070036ea18 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci2.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci2.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..557e2e83be46e54cd8480385a25d1c0802ece75f Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_gs_ci2.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_gs_sci.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_gs_sci.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecd385aa4955ec72d14498fca9bd122ff78a551b Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_gs_sci.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_gs_sci2.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_gs_sci2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39fb3191fa91a4062b35e5ae88f5b1d2fec4f8b1 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_gs_sci2.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_ls_bda.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_ls_bda.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..005931bbcedf15b2cb73c58feec0bec63bdfca66 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_ls_bda.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_ls_bda.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_ls_bda.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ca85c6006b5eaf6bedd40448862c238c2a09aaf Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_ls_bda.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_ls_ci.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_ls_ci.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c46d670d002febc994b991f5e161624431c22975 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_ls_ci.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/ekf_ls_ci2.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/ekf_ls_ci2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e30eed1b45497e918f55103b7fe9d301faa1a916 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/ekf_ls_ci2.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/localization_algo_framework.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/localization_algo_framework.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c70caf19a44a99759548f9b65aac709c0097fd76 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/localization_algo_framework.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/localization_algo_framework.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/localization_algo_framework.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed8bf49bd81b702d10a4fd624404d3f372042198 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/localization_algo_framework.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/simple_ekf.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/simple_ekf.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6aa1a5652f53acdfb31168e23820c7265f0c4ae Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/simple_ekf.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/simple_ekf.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/simple_ekf.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..796da56e40283212099772fe8c64b1eab28ffb56 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/simple_ekf.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/simple_ekf_2.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/simple_ekf_2.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d373b47ea3d79b661c68dd98e07c23814d5df3f Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/simple_ekf_2.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/simple_ekf_meas_only.cpython-35.pyc b/CoLo-AT/localization_algos/__pycache__/simple_ekf_meas_only.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49abc100156a47f7e3866cab47e5b639d5991f4f Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/simple_ekf_meas_only.cpython-35.pyc differ diff --git a/CoLo-AT/localization_algos/__pycache__/simple_ekf_meas_only.cpython-36.pyc b/CoLo-AT/localization_algos/__pycache__/simple_ekf_meas_only.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..392809c937f58499e81e740e57fb568831ccf316 Binary files /dev/null and b/CoLo-AT/localization_algos/__pycache__/simple_ekf_meas_only.cpython-36.pyc differ diff --git a/CoLo-AT/localization_algos/cal_test.py b/CoLo-AT/localization_algos/cal_test.py new file mode 100644 index 0000000000000000000000000000000000000000..67766b17bcc4b786032e1102ae64010d957af635 --- /dev/null +++ b/CoLo-AT/localization_algos/cal_test.py @@ -0,0 +1,44 @@ + +import scipy +import numpy as np +from sympy import * +''' +# for absolute landmark +x_symbol, y_symbol = symbols('x y') +h1 = sqrt((lx-x_symbol)*(lx-x_symbol)+(ly-y_symbol)*(ly-y_symbol)) +h2 = atan2((ly-y_symbol), (lx-x_symbol)) + +h_11 = diff(h1, x_symbol) +print(h_11) +h_12 = diff(h1, y_symbol) +print(h_12) +h_21 = diff(h2, x_symbol) +print(h_21) +h_22 = diff(h2, y_symbol) +print(h_22) +''' +''' +h11 = float(h_11.subs([(x_symbol, x),(y_symbol, y)])) +h12 = float(h_12.subs([(x_symbol, x),(y_symbol, y)])) +h21 = float(h_21.subs([(x_symbol, x),(y_symbol, y)])) +h22 = float(h_22.subs([(x_symbol, x),(y_symbol, y)])) +''' + +# for relative landmark +x_i, y_i, x_j, y_j = symbols('x_i, y_i, x_j, y_j') + +h1 = sqrt((x_j-x_i)*(x_j-x_i)+(y_j-y_i)*(y_j-y_i)) +h2 = atan2((y_j-y_i), (x_j-x_i)) + +h_11 = diff(h1, x_j) +print(h_11) +h_12 = diff(h1, y_j) +print(h_12) +h_21 = diff(h2, x_j) +print(h_21) +h_22 = diff(h2, y_j) +print(h_22) + + +temp = np.asmatrix(scipy.linalg.sqrtm(np.matrix('1,2;3,4')))**2 +print(temp) diff --git a/CoLo-AT/localization_algos/centralized_ekf.py b/CoLo-AT/localization_algos/centralized_ekf.py new file mode 100755 index 0000000000000000000000000000000000000000..821c31e8ad2bdde1010711d564d091b098f08715 --- /dev/null +++ b/CoLo-AT/localization_algos/centralized_ekf.py @@ -0,0 +1,137 @@ +import numpy as np +import math +from math import cos, sin, atan2, sqrt +from numpy import dot +import scipy.linalg as linalg +#import parameters +from localization_algo_framework import ekf_algo_framework + +def rot_mtx(theta): + return np.matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class Centralized_EKF(ekf_algo_framework): + + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.01*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + trace_state_var = np.trace(sigma_s[i:i+2, i:i+2]) + return trace_state_var + + #ef propagation_update(self, s, orinetations, sigma_s, input_data, sigma_odo, index): + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + num_robots = int(len(s)/2) + + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + i = 2*index + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + G = rot_mtx(self_theta) + W = sigma_odo + W[0,0] = sigma_odo[0,0]*delta_t*delta_t + #W[1,1] = sigma_odo[1,1]*delta_t*delta_t*v*v + W[1,1] = sigma_odo[1,1]*delta_t*delta_t + Q_ii = G * W * G.getT() + Q = np.zeros((2*num_robots,2*num_robots)) + Q[i:i+2, i:i+2] = Q_ii + sigma_s = sigma_s + Q #errors in the odometric measurements of the robots are uncorrelated, it's a diagonal matrix + + return [s, orinetations, sigma_s] + + + + #def absolute_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + i = 2*index + landmark_loc = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + ################### + z = np.matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() # actual measurement + # for estimated measurement + delta_x = landmark_loc[0] - s.item(i,0) + delta_y = landmark_loc[1] - s.item(i+1,0) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) # shifted to robot frame + + H_i = np.matrix(np.zeros((2,2*num_robots))) + H_i[0, i] = -1 + H_i[1, i+1] = -1 + H = rot_mtx(self_theta).getT()*H_i + + #sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + V = rot_mtx(bearing) + R = V*sigma_ob*V.getT() #sigma_z + + sigma_invention = H * sigma_s * H.getT() + R + kalman_gain = sigma_s*H.getT()*sigma_invention.getI() + s = s + kalman_gain*(z - z_hat) + sigma_s = sigma_s - kalman_gain*H*sigma_s + ################## + + return [s, orinetations, sigma_s] + + + + #def relative_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + + ############### + i = index * 2 + obser_index = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + j = obser_index * 2 + + z = np.matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() # actual measurement + + H_ij = np.zeros((2,2*num_robots)) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + + H = rot_mtx(self_theta).getT()*H_ij + + z_hat = H * s + + #sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + + V = rot_mtx(bearing) + R = V*sigma_ob*V.getT() + + sigma_invention = H* sigma_s*H.getT() + R + kalman_gain = sigma_s*H.getT()*sigma_invention.getI() + s = s + kalman_gain*(z - z_hat) + sigma_s = sigma_s - kalman_gain*H*sigma_s + ################# + + return [s, orinetations, sigma_s] diff --git a/CoLo-AT/localization_algos/ekf_gs_bound.py b/CoLo-AT/localization_algos/ekf_gs_bound.py new file mode 100644 index 0000000000000000000000000000000000000000..1736d1f0a226851063fcf23b551f7101b6f609aa --- /dev/null +++ b/CoLo-AT/localization_algos/ekf_gs_bound.py @@ -0,0 +1,143 @@ +import numpy as np +from numpy import matrix +from math import cos, sin, atan2, sqrt +from localization_algo_framework import ekf_algo_framework + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class EKF_GS_BOUND(ekf_algo_framework): + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.1*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + trace_state_var = np.trace(sigma_s[i:i+2, i:i+2]) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + + var_v = 0.1 # variance of the velocity + + i = 2*index + num_robots = int(len(s)/2) + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + Q = sigma_odo + Q[1,1] = Q[1,1]*v*v + W = delta_t*rot_mtx(self_theta) + + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + for j in range(num_robots): + jj = 2*j + sigma_s[jj:jj+2, jj:jj+2] += delta_t*delta_t*var_v*np.identity(2) + + return [s, orinetations, sigma_s] + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + ''' + landmark_loc = measurement_data[0] + dis = measurement_data[1] + phi = measurement_data[2] + ''' + + i = 2*index + H_i = np.matrix(np.zeros((2,2*num_robots)), dtype = float) + H_i[0, i] = -1 + H_i[1, i+1] = -1 + H = rot_mtx(self_theta).getT()*H_i + + var_range = 0.1 + d_max = pow(0.05,2) + var_bearing = pow(2.0 / 180, 2) + + R = np.matrix(max(var_range, d_max, var_bearing)*np.identity(2)) + sigma_s = (sigma_s.getI()+H.getT()*R.getI()*H).getI() + + return [s, orinetations, sigma_s] + + + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + i = index * 2 + obser_index = measurement_data[0] + #dis = measurement_data[1] + #phi = measurement_data[2] + j = obser_index * 2 + + #z = matrix([dis*cos(phi), dis*sin(phi)]).getT() + + H_ij = np.zeros((2,2*num_robots)) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + H = rot_mtx(self_theta).getT()*H_ij + + var_range = 0.1 + d_max = pow(0.05,2) + var_bearing = pow(2.0 / 180, 2) + + R = np.matrix(max(var_range, d_max, var_bearing)*np.identity(2)) + sigma_s = (sigma_s.getI()+H.getT()*R.getI()*H).getI() + + return [s, orinetations, sigma_s] + + def communication(self, robot_data, sensor_data): + + [s, orinetations, sigma_s, index] = robot_data + [comm_data, comm_variance] = sensor_data + [sender_idx, comm_robot_s, comm_robot_sigma_s]=comm_data + + #sent to robot i from robot k + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + i = index * 2 + k = sender_idx*2 + e = 0.9 # (iii+1)*0.01 + #e = comm_e + + H_k = np.zeros((2,2*num_robots)) + H_k[0, i] = -1 + H_k[1, i+1] = -1 + H_k[0, k] = 1 + H_k[1, k+1] = 1 + H = rot_mtx(self_theta).getT()*H_k + + var_range = 0.25 + d_max = pow(0.05,2) + var_bearing = pow(2.0 / 180, 2) + + R = np.matrix(max(var_range, d_max, var_bearing)*np.identity(2)) + sig_inv = e*sigma_s.getI() + (1-e)*(H.getT()*R.getI()*H) + sigma_s = sig_inv.getI() + + return [s, orinetations, sigma_s] \ No newline at end of file diff --git a/CoLo-AT/localization_algos/ekf_gs_ci.py b/CoLo-AT/localization_algos/ekf_gs_ci.py new file mode 100644 index 0000000000000000000000000000000000000000..9933159ce6137d758867cd75bdd29519b9234ff9 --- /dev/null +++ b/CoLo-AT/localization_algos/ekf_gs_ci.py @@ -0,0 +1,148 @@ +import numpy as np +from numpy import matrix +from math import cos, sin, atan2, sqrt +from localization_algo_framework import ekf_algo_framework + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class EKF_GS_CI(ekf_algo_framework): + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.02*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + trace_state_var = np.trace(sigma_s[i:i+2, i:i+2]) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + + var_v = 0.1 + + num_robots = int(len(s)/2) + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + i = 2*index + + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + for j in range(num_robots): + jj = 2*j + if j==index: + #print(np.matmul(sigma_odo,rot_mtx(self_theta).getT())) + sigma_s[jj:jj+2, jj:jj+2] += delta_t*delta_t*rot_mtx(self_theta)*sigma_odo*rot_mtx(self_theta).getT() + else: + sigma_s[jj:jj+2, jj:jj+2] += delta_t*delta_t*var_v*np.identity(2) + + return [s, orinetations, sigma_s] + + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + + + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + landmark_loc = measurement_data[0] + dis = measurement_data[1] + phi = measurement_data[2] + + i = 2*index + H_i = np.matrix(np.zeros((2,2*num_robots)), dtype = float) + H_i[0, i] = -1 + H_i[1, i+1] = -1 + H = rot_mtx(self_theta).getT()*H_i + + + #z_hat= rot_mtx(self_theta).getT() * (landmark_loc + H_i*s) + delta_x = landmark_loc[0] - s.item(i,0) + delta_y = landmark_loc[1] - s.item(i+1,0) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) + z = matrix([dis*cos(phi), dis*sin(phi)]).getT() + + sigma_z = rot_mtx(phi) * sigma_ob * rot_mtx(phi).getT() + + sigma_invention = H * sigma_s * H.getT() + sigma_z + kalman_gain = sigma_s*H.getT()*sigma_invention.getI() + + s = s + kalman_gain*(z - z_hat) + sigma_s = sigma_s - kalman_gain*H*sigma_s + + return [s, orinetations, sigma_s] + + ''' + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + i = index * 2 + obser_index = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + j = obser_index * 2 + + z = np.matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() # actual measurement + + H_ij = np.zeros((2,2*num_robots)) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + H = rot_mtx(self_theta).getT()*H_ij + + z_hat = H * s + + sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + + V = rot_mtx(bearing) + R = V*sigma_ob*V.getT() #sigma_z + + sigma_invention = H*sigma_s*H.getT() + R + kalman_gain = sigma_s*H.getT()*sigma_invention.getI() + + s = s + kalman_gain*(z - z_hat) + print('inside ekf gs ci') + print(s) + sigma_s = sigma_s - kalman_gain*H*sigma_s + + return [s, orinetations, sigma_s] + ''' + ''' + def communication(self, robot_data, sensor_data): + + [s, orinetations, sigma_s, index] = robot_data + [comm_data, comm_e] = sensor_data + + comm_robot_s = comm_data[0] + comm_robot_sigma_s = comm_data[1] + + e = 0.23 # (iii+1)*0.01 + #e = comm_e + + sig_inv = e*sigma_s.getI() + (1-e)*comm_robot_sigma_s.getI() + s = sig_inv.getI() * (e*sigma_s.getI()*s + (1-e)*comm_robot_sigma_s.getI()*comm_robot_s) + sigma_s = sig_inv.getI() + + return [s, orinetations, sigma_s] + ''' \ No newline at end of file diff --git a/CoLo-AT/localization_algos/ekf_gs_sci2.py b/CoLo-AT/localization_algos/ekf_gs_sci2.py new file mode 100644 index 0000000000000000000000000000000000000000..482547dbca8a5758011615d5ed2b3e63e7b342e1 --- /dev/null +++ b/CoLo-AT/localization_algos/ekf_gs_sci2.py @@ -0,0 +1,205 @@ +import numpy as np +from numpy import matrix +from math import cos, sin, atan2, sqrt +from localization_algo_framework import ekf_algo_framework + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + +class EKF_GS_SCI2(ekf_algo_framework): + def __init__(self, algo_name): + self.algo_name = algo_name + + def state_variance_init(self, num_robots): + sigma_d = 0.1*np.matrix(np.identity(2*num_robots), dtype = float) + sigma_i = 0.1*np.matrix(np.identity(2*num_robots), dtype = float) + state_variance = [sigma_d, sigma_i] + return state_variance + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + [sigma_d, sigma_i] = sigma_s + total_sigma = sigma_d + sigma_i + trace_state_var = np.trace(total_sigma[i:i+2, i:i+2]) + return np.trace(total_sigma) + + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + + var_v = 0.1 + + num_robots = int(len(s)/2) + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + Q = sigma_odo + W = delta_t*rot_mtx(self_theta) + + i = 2*index + + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + [sigma_d, sigma_i] = sigma_s + + Q = sigma_odo + W = delta_t*rot_mtx(self_theta)*np.matrix([[1,0],[0,v]]) + + for j in range(num_robots): + jj = 2*j + + if j ==index: + total_sigma = sigma_i[jj:jj+2, jj:jj+2] + sigma_d[jj:jj+2, jj:jj+2] + sigma_i[jj:jj+2, jj:jj+2] += W*Q*W.getT() + total_sigma += W*Q*W.getT() + sigma_d[jj:jj+2, jj:jj+2] = total_sigma - sigma_i[jj:jj+2, jj:jj+2] + + else: + ''' + total_sigma = sigma_i[jj:jj+2, jj:jj+2] + sigma_d[jj:jj+2, jj:jj+2] #repeated + sigma_i[jj:jj+2, jj:jj+2] += delta_t*delta_t*var_v*np.identity(2) #unknow other robot's movement + total_sigma += delta_t*delta_t*var_v*np.identity(2) + sigma_d[jj:jj+2, jj:jj+2] = total_sigma - sigma_i[jj:jj+2, jj:jj+2] + ''' + + sigma_s = [sigma_d, sigma_i] + + return [s, orinetations, sigma_s] + + + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + landmark_loc = measurement_data[0] + dis = measurement_data[1] + phi = measurement_data[2] + + i = 2*index + H_i = np.matrix(np.zeros((2,2*num_robots)), dtype = float) + H_i[0, i] = -1 + H_i[1, i+1] = -1 + H = rot_mtx(self_theta).getT()*H_i + + delta_x = landmark_loc[0] - s.item(i,0) + delta_y = landmark_loc[1] - s.item(i+1,0) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) + z = matrix([dis*cos(phi), dis*sin(phi)]).getT() + + [sigma_d, sigma_i] = sigma_s + total_sigma = sigma_i + sigma_d + + sigma_ob[1,1] = sigma_ob[1,1]*dis*dis + sigma_z = rot_mtx(phi) * sigma_ob * rot_mtx(phi).getT() + + + sigma_invention = H * total_sigma * H.getT() + sigma_z + kalman_gain = total_sigma*H.getT()*sigma_invention.getI() + + s = s + kalman_gain*(z - z_hat) + + total_sigma = total_sigma - kalman_gain*H*total_sigma + i_mtx = np.identity(num_robots*2) + sigma_i = (i_mtx - kalman_gain*H) * sigma_i * (i_mtx.copy() - kalman_gain*H).getT() + kalman_gain * sigma_z * kalman_gain.getT() + sigma_d = total_sigma - sigma_i + + sigma_s = [sigma_d, sigma_i] + + return [s, orinetations, sigma_s] + + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + i = index * 2 + obser_index = measurement_data[0] + dis = measurement_data[1] + phi = measurement_data[2] + j = obser_index * 2 + + z = matrix([dis*cos(phi), dis*sin(phi)]).getT() + + H_ij = np.zeros((2,2*num_robots)) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + H = rot_mtx(self_theta).getT()*H_ij + + #z_hat = H * s + #sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + + delta_x = s.item(j,0) - s.item(i,0) + delta_y = s.item(j+1,0) - s.item(i+1,0) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) + + sigma_ob[1,1] = sigma_ob[1,1]*dis*dis + sigma_z = rot_mtx(phi)*sigma_ob*rot_mtx(phi).getT() + + [sigma_d, sigma_i] = sigma_s + + e = 0.83 + + p_1 = (1/e) * sigma_d + sigma_i + + # + p_2 = (1/(1-e)) * H * (sigma_i + sigma_d) * H.getT() + sigma_z + sigma_invention = H * p_1 * H.getT() + p_2 + kalman_gain = p_1 *H.getT()*sigma_invention.getI() + # + + s = s + kalman_gain*(z - z_hat) + i_mtx = np.identity(num_robots*2) + + total_sigma = (i_mtx.copy()-kalman_gain*H) * p_1 + sigma_i = (i_mtx - kalman_gain*H) * sigma_i * (i_mtx.copy() - kalman_gain*H).getT() + kalman_gain * sigma_z * kalman_gain.getT() + sigma_d = total_sigma - sigma_i + + sigma_s = [sigma_d, sigma_i] + + return [s, orinetations, sigma_s] + + def communication(self, robot_data, sensor_data): + + [s, orinetations, sigma_s, index] = robot_data + [comm_data, comm_variance] = sensor_data + [comm_robot_s, orinetations, comm_robot_sigma_s]=comm_data + num_robots = int(len(s)/2) + + e = 0.9 # (iii+1)*0.01 + + [sigma_d, sigma_i] = sigma_s + [comm_robot_sigma_d, comm_robot_sigma_i] = comm_robot_sigma_s + + p_1 = (1/e) * sigma_d + sigma_i + p_2 = (1/(1-e)) * comm_robot_sigma_d + comm_robot_sigma_i + + kalman_gain = p_1 * (p_1+p_2).getI() + s = s + kalman_gain * (comm_robot_s - s) + + i_mtx = np.identity(num_robots*2) + total_sigma = (i_mtx.copy() - kalman_gain) * p_1 + sigma_i = (i_mtx - kalman_gain) * sigma_i * (i_mtx.copy() - kalman_gain).getT() + kalman_gain * comm_robot_sigma_i * kalman_gain.getT() + sigma_d = total_sigma - sigma_i + sigma_s = [sigma_d, sigma_i] + + return [s, orinetations, sigma_s] + + diff --git a/CoLo-AT/localization_algos/ekf_ls_bda.py b/CoLo-AT/localization_algos/ekf_ls_bda.py new file mode 100644 index 0000000000000000000000000000000000000000..4cde55ada221d9075d853ddf1fdcc6b740b251d9 --- /dev/null +++ b/CoLo-AT/localization_algos/ekf_ls_bda.py @@ -0,0 +1,175 @@ +# coding: utf-8 + + +#local state CI (LS-BDA) [9] - “Recursive decentralized collaborative localization for sparsely communicating robots†- L. Luft, T. Schubert, S. I. Roumeliotis, and W. Burgard, +#Local state with block diagonal approximation (BDA) algorithm +import numpy as np +from numpy import matrix +from numpy import linalg +from math import cos, sin, atan2, sqrt +from localization_algo_framework import ekf_algo_framework + + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class EKF_LS_BDA(ekf_algo_framework): + + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.01*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + trace_state_var = np.trace(sigma_s) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + + num_robots = int(len(s)/2) + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + i = 2*index + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + W = sigma_odo + W[0,0] = sigma_odo[0,0]*delta_t*delta_t + W[1,1] = sigma_odo[1,1]*delta_t*delta_t*v*v + G = rot_mtx(self_theta) + Q_ii = G * W * G.getT() + + Q = np.zeros((2*num_robots,2*num_robots)) + Q[i:i+2, i:i+2] = Q_ii + sigma_s = sigma_s + Q #errors in the odometric measurements of the robots are uncorrelated, it's a diagonal matrix + + return [s, orinetations, sigma_s] + + + + #def absolute_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + i = 2*index + + local_s = s[i:i+2] + local_sigma = sigma_s[i:i+2,i:i+2] + + landmark_loc = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + z = matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() # actual measurement + # for estimated measurement + delta_x = landmark_loc[0] - s.item(i,0) + delta_y = landmark_loc[1] - s.item(i+1,0) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) + + H_i = matrix([[-1,0],[0,-1]], dtype=float) + H = rot_mtx(self_theta).getT()*H_i + + sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + V = rot_mtx(bearing) + R = V*sigma_ob*V.getT() #sigma_z + sigma_invention = H*local_sigma*H.getT() + R + kalman_gain = local_sigma*H.getT()*sigma_invention.getI() + + s[i:i+2] = local_s + kalman_gain*(z - z_hat) + sigma_s[i:i+2,i:i+2] = local_sigma - kalman_gain*H*local_sigma + + multi_i = (np.identity(2) - kalman_gain*H ) + + for k in range(num_robots): + if( not (k==index)): + kk = 2*k + sigma_s[i:i+2,kk:kk+2] = multi_i * sigma_s[i:i+2,kk:kk+2] + sigma_s[kk:kk+2,i:i+2] = sigma_s[kk:kk+2,i:i+2] * multi_i.getT() + + return [s, orinetations, sigma_s] + + + #def relative_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + obser_index = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + i = index * 2 + j = obser_index * 2 + + H_ij = np.zeros((2,2*num_robots)) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + + H = rot_mtx(self_theta).getT()*H_ij + z_hat = H * s + z = matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() #actual measurement + + + reduced_sigma = np.zeros((len(s), len(s))) + reduced_sigma[i:i+2,i:i+2] = sigma_s[i:i+2,i:i+2] + reduced_sigma[j:j+2,j:j+2] = sigma_s[j:j+2,j:j+2] + reduced_sigma[i:i+2,j:j+2] = sigma_s[i:i+2,j:j+2] + reduced_sigma[j:j+2,i:i+2] = sigma_s[j:j+2,i:i+2] + + multi_i = sigma_s[i:i+2,i:i+2].getI() + multi_j = sigma_s[j:j+2,j:j+2].getI() + + sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + V = rot_mtx(bearing) + R = V*sigma_ob*V.getT() #sigma_z + sigma_invention = H*reduced_sigma*H.getT() + R + kalman_gain = reduced_sigma*H.getT()*sigma_invention.getI() + + s[i:i+2] = s[i:i+2] + kalman_gain[i:i+2]*(z - z_hat) + s[j:j+2] = s[j:j+2] + kalman_gain[j:j+2]*(z - z_hat) + + sigma_s = sigma_s - kalman_gain*H*reduced_sigma + + multi_i = sigma_s[i:i+2,i:i+2] * multi_i + multi_j = sigma_s[j:j+2,j:j+2] * multi_j + + + for k in range(5): + if( not (k == index)): + if( not (k == obser_index)): + kk = 2*k + sigma_s[i:i+2,kk:kk+2] = multi_i * sigma_s[i:i+2,kk:kk+2] + sigma_s[j:j+2,kk:kk+2] = multi_j * sigma_s[j:j+2,kk:kk+2] + + + for k in range(5): + if( not (k == index)): + if( not (k == obser_index)): + kk = 2*k + sigma_s[kk:kk+2,i:i+2] = sigma_s[kk:kk+2,i:i+2] * multi_i.getT() + sigma_s[kk:kk+2,j:j+2] = sigma_s[kk:kk+2,j:j+2] * multi_j.getT() + + + return [s, orinetations, sigma_s] diff --git a/CoLo-AT/localization_algos/ekf_ls_ci.py b/CoLo-AT/localization_algos/ekf_ls_ci.py new file mode 100644 index 0000000000000000000000000000000000000000..fe396468252612ed20b838524c8dc6fbf0f9f63b --- /dev/null +++ b/CoLo-AT/localization_algos/ekf_ls_ci.py @@ -0,0 +1,137 @@ +# coding: utf-8 + +#local state CI (LS-CI) - “Decentralized multi-robot cooperative localization using covariance intersection,†L. C. Carrillo-Arce, E. D. Nerurkar, J. L. Gordillo, and S. I. Roumeliotis +import numpy as np +from numpy import matrix +from numpy import linalg +from math import cos, sin, atan2, sqrt +from localization_algo_framework import ekf_algo_framework + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + +class EKF_LS_CI(ekf_algo_framework): + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.01*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + trace_state_var = np.trace(sigma_s[i:i+2, i:i+2]) + return trace_state_var + + #def propagation_update(self, s, orinetations, sigma_s, input_data, sigma_odo, index): + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + + num_robots = int(len(s)/2) + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + i = 2*index + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + var_u_v = sigma_odo[0,0] + sigma_s[i:i+2, i:i+2] = sigma_s[i:i+2, i:i+2]+ delta_t*delta_t*rot_mtx(self_theta)*matrix([[var_u_v, 0],[0, 0]])*rot_mtx(self_theta).T + + return [s, orinetations, sigma_s] + + + #def absolute_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + i = 2*index + + local_s = s[i:i+2] + local_sigma = sigma_s[i:i+2,i:i+2] + + landmark_loc = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + var_dis = sigma_ob[0,0] + var_phi = sigma_ob[1,1] + + H = rot_mtx(self_theta).getT()*matrix([[-1,0],[0,-1]], dtype=float) + + local_s = s[i:i+2] + local_sigma = sigma_s[i:i+2,i:i+2] + + landmark_loc = np.matrix(landmark_loc).getT() + z = np.matrix([meas_range*cos(bearing), meas_range*sin(bearing)]).getT() # actual measurement + delta_x = float(landmark_loc[0] - s.item(i,0)) + delta_y = float(landmark_loc[1] - s.item(i+1,0)) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) # shifted to robot frame + + + + + sigma_z = rot_mtx(bearing) * sigma_ob * rot_mtx(bearing).getT() + sigma_invention = H * local_sigma * H.getT() + sigma_z + kalman_gain = local_sigma*H.getT()*sigma_invention.getI() + + s[i:i+2] = local_s + kalman_gain*(z - z_hat) + + sigma_s[i:i+2,i:i+2] = local_sigma - kalman_gain*H*local_sigma + + return [s, orinetations, sigma_s] + + + #def relative_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): + def relative_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + #i obser j + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + obser_index = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + i = index * 2 + j = obser_index * 2 + + H_ij = np.zeros((2,2*num_robots)) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + + H = rot_mtx(self_theta).getT()*H_ij + + dis = meas_range + phi = bearing + + z = matrix([[dis*cos(phi)],[dis*sin(phi)]]) + + hat_j = z + s[i:i+2] + + sigma_i = sigma_s[i:i+2,i:i+2] + sigma_j = sigma_s[j:j+2,j:j+2] + + e = 0.5 + + sigma_j_next_inv = e*sigma_j.getI() + (1-e)*sigma_i.getI() + + s[j:j+2] = sigma_j_next_inv.getI()*(e*sigma_j.getI()*s[j:j+2] + (1-e)*sigma_i.getI()*hat_j) + sigma_s[j:j+2,j:j+2] = sigma_j_next_inv.getI() + return [s, orinetations, sigma_s] + + + diff --git a/CoLo-AT/localization_algos/gs_ci_bound.py b/CoLo-AT/localization_algos/gs_ci_bound.py new file mode 100644 index 0000000000000000000000000000000000000000..74e482c0b35a179d6fda78b7fb1512de9f406183 --- /dev/null +++ b/CoLo-AT/localization_algos/gs_ci_bound.py @@ -0,0 +1,141 @@ +import numpy as np +from numpy import matrix +from math import cos, sin, atan2, sqrt + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + +class gs_ci_bound(): + def __init__(self, algo_name): + self.algo_name = algo_name + self.d_max = 4.5 # max measurement distance + self.d_var = 0.2 + self.bearing_var = 0.2 + self.var_v = 0.5 + + def state_veriance_init(self, num_robots): + return 0.01*np.matrix(np.identity(2*num_robots), dtype = float) + + def algo_update(self, robot_data, update_type, sensor_data): + print 'in th algo update' + #return [s, orinetations, sigma_s] + [s, odo_freq, th_sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + + if update_type == 'propagation update': + print 'th bound propagation' + return self.propagation_update(s, th_sigma_s, sensor_covariance, index, odo_freq) + elif update_type == 'landmark observation update': + return self.absolute_obser_update(s, th_sigma_s, index) + elif update_type == 'relative observation update': + obser_index = measurement_data[0] + return self.relative_obser_update(s, th_sigma_s, index, obser_index) + elif update_type == 'communication': + return self.communication(s, th_sigma_s, index, measurement_data) + else: + print "invalid update" + + + def calculate_trace_state_variance(self, state_variance, robot_index): + #j = robot_index*2 + #return np.trace(state_variance[j:j+1, j:j+1]) + return np.trace(state_variance) + + def propagation_update(self, s, th_sigma_s, sigma_odo, index, odo_freq): + num_robots = len(s)/2 + i = 2*index + delta_t = 1/odo_freq + + for j in range(num_robots): + jj = 2*j + + if j==index: + th_sigma_s[jj:jj+2, jj:jj+2] += sigma_odo[0,0]*np.identity(2)*delta_t*delta_t + else: + #var_v = pow(2*self.d_max, 2)/12 + var_v = self.var_v + th_sigma_s[jj:jj+2, jj:jj+2] += var_v*np.identity(2)*delta_t*delta_t + print 'bound' + print np.trace(state_variance) + return th_sigma_s + + + def absolute_obser_update(self, s, th_sigma_s, index): + num_robots = len(s)/2 + i = 2*index + H_i = np.matrix(np.zeros((2,2*num_robots))) + H_i[0, i] = -1 + H_i[1, i+1] = -1 + + d_max = self.d_max + var_dis = self.d_var + var_phi = self.bearing_var + + #sigma_th_z = np.matrix(max(sigma_d, d_max*d_max*sigma_bearing)*np.identity(2)) #sigme_Ri + #th_sigma_s = (th_sigma_s.getI()+H_i.getT()*sigma_th_z.getI()*H_i).getI() + + sigma_th_z = np.matrix(max(var_dis, d_max*d_max*var_phi)*np.identity(2)) + th_sigma_s = (th_sigma_s.getI() + H_i.getT() * sigma_th_z.getI() * H_i).getI() + + return th_sigma_s + + + def relative_obser_update(self, s, th_sigma_s, index, obser_index): + #i obser j + + i = 2*index + j = obser_index * 2 + + num_robots = len(s)/2 + + H_ij = np.matrix(np.zeros((2,2*num_robots))) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + + d_max = self.d_max + var_dis = self.d_var + var_phi = self.bearing_var + + sigma_th_z = np.matrix(max(var_dis, d_max*d_max*var_phi)*np.identity(2)) + th_sigma_s = (th_sigma_s.getI() + H_ij.getT() * sigma_th_z.getI() * H_ij).getI() + return th_sigma_s + + + def communication(self, s, th_sigma_s, index, measurement_data): + + e = 0.5 + ''' + sender_th_sigma_s = measurement_data + rec_th_sigma_s = th_sigma_s + sig_inv = e*rec_th_sigma_s.getI() + (1-e)*sender_th_sigma_s.getI() + th_sigma_s = sig_inv.getI() + + ''' + ''' + i = 1*2 + j = 2*2 + + num_robots = len(s)/2 + + H_ij = np.matrix(np.zeros((2,2*num_robots))) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + + + d_max = self.d_max + sigma_d = self.d_var + sigma_bearing = self.bearing_var + + sigma_th_z = np.matrix(max(sigma_d, d_max*d_max*sigma_bearing)*np.identity(2)) #sigme_Ri + sig_inv = e*th_sigma_s.getI() + (1-e)*(th_sigma_s.getI()+H_ij.getT()*sigma_th_z.getI()*H_ij) + th_sigma_s = sig_inv.getI() + ''' + + comm_robot_th_sigma = measurement_data + th_sigma_s = ( e*th_sigma_s.getI() + (1-e)*comm_robot_th_sigma.getI()).getI() # from TK's gs-ci + + return th_sigma_s \ No newline at end of file diff --git a/CoLo-AT/localization_algos/localization_algo_framework.py b/CoLo-AT/localization_algos/localization_algo_framework.py new file mode 100644 index 0000000000000000000000000000000000000000..4acfe687d39b28596cbc6acdddd4e5b8baa34419 --- /dev/null +++ b/CoLo-AT/localization_algos/localization_algo_framework.py @@ -0,0 +1,44 @@ +import numpy as np + +class ekf_algo_framework(): + """this is the framework with all possible functions for localization algorithms""" + def __init__(self, algo_name): + self.algo_name = algo_name + + def get_name(self): + return self.algo_name + + def state_variance_init(self, num_robots): + raise Exception("Failed to define state variance!") + + def calculate_trace_state_variance(self, robot_data): + raise Exception("Failed to define trace of state variance!") + + def algo_update(self, robot_data, update_type, sensor_data): + if update_type == 'propagation': + #print 'algo propagation update' + return self.propagation_update(robot_data, sensor_data) + elif update_type == 'landmark observation': + return self.absolute_obser_update(robot_data, sensor_data) + elif update_type == 'relative observation': + return self.relative_obser_update(robot_data, sensor_data) + elif update_type == 'communication': + return self.communication(robot_data, sensor_data) + else: + print("invalid update") + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + return [s, orinetations, sigma_s] + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + return [s, orinetations, sigma_s] + + def relative_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + return [s, orinetations, sigma_s] + + def communication(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + return [s, orinetations, sigma_s] \ No newline at end of file diff --git a/CoLo-AT/localization_algos/simple_ekf.py b/CoLo-AT/localization_algos/simple_ekf.py new file mode 100644 index 0000000000000000000000000000000000000000..f56a96c655fef852d8d2ea83daf897dcd4f11817 --- /dev/null +++ b/CoLo-AT/localization_algos/simple_ekf.py @@ -0,0 +1,100 @@ +import numpy as np +import math +from math import cos, sin, atan2, sqrt, pi +from numpy import dot, arctan +from numpy.linalg import inv +import scipy.linalg as linalg +import sys +#import parameters +from localization_algo_framework import ekf_algo_framework +#from sympy import * + +def rot_mtx(theta): + return np.matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class Simple_EKF(ekf_algo_framework): + + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_init(self): + return 0.01*np.matrix(np.identity(2), dtype = float) + + def state_variance_init(self, num_robots): + return 0.01*np.matrix(np.identity(2), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + trace_state_var = np.trace(sigma_s) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + num_robots = int(len(s)/2) + + delta_t = measurement_data[0] + if delta_t < 0: + sys.exit('delta_t: '+ str(delta_t)) + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + i = 2*index + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + Q = sigma_odo + P = sigma_s + + W = delta_t*rot_mtx(self_theta) + P = P + W*Q*W.getT() # A is identity matrix + sigma_s = P + return [s, orinetations, sigma_s] + + + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + landmark_loc = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + i = 2*index + P = sigma_s + R = sigma_ob + z = np.matrix([meas_range,bearing]).getT() + lx = landmark_loc[0] + ly = landmark_loc[1] + x = s[i,0] + y = s[i+1,0] + + z_hat_0 = sqrt((lx-x)*(lx-x)+(ly-y)*(ly-y)) + z_hat_1 = (atan2((ly-y), (lx-x))-self_theta)%pi + if abs(z_hat_1-pi) < abs(z_hat_1): + z_hat_1 = z_hat_1-pi + + z_hat = np.matrix([z_hat_0, z_hat_1]).getT() + + h11 = (-lx + x)/sqrt((lx - x)**2 + (ly - y)**2) + h12 = (-ly + y)/sqrt((lx - x)**2 + (ly - y)**2) + h21 = -(-ly + y)/((lx - x)**2 + (ly - y)**2) + h22 = -(lx - x)/((lx - x)**2 + (ly - y)**2) + + + H = np.matrix([[h11, h12],[h21, h22]]) + sigma_invention = H * P * H.getT() + R # V is a identity matrix + K = P*H.getT()*sigma_invention.getI() + s[i:i+2, 0] = s[i:i+2, 0] + K*(z - z_hat) + P = P - K*H*P + + sigma_s = P + + return [s, orinetations, sigma_s] \ No newline at end of file diff --git a/CoLo-AT/localization_algos/under_developing_algos/centralized_ekf2.py b/CoLo-AT/localization_algos/under_developing_algos/centralized_ekf2.py new file mode 100644 index 0000000000000000000000000000000000000000..06db6573c5c55d9865c9de24017e2dce6fb753e9 --- /dev/null +++ b/CoLo-AT/localization_algos/under_developing_algos/centralized_ekf2.py @@ -0,0 +1,158 @@ +import numpy as np +import math +from math import cos, sin, atan2, sqrt, pi +from numpy import dot +import scipy.linalg as linalg +#import parameters +from localization_algo_framework import ekf_algo_framework + +def rot_mtx(theta): + return np.matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class Centralized_EKF2(ekf_algo_framework): + + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.01*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + trace_state_var = np.trace(sigma_s[i:i+2, i:i+2]) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + num_robots = int(len(s)/2) + + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + i = 2*index + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + Q = sigma_odo + Q[1,1] = Q[1,1]*v*v + P = sigma_s[i:i+2, i:i+2] + W = delta_t*rot_mtx(self_theta) + W[0,1] = sigma_odo[0,1]*v + W[1,1] = sigma_odo[1,1]*v + + P = P + W*Q*W.getT() # A is identity matrix + + sigma_s[i:i+2, i:i+2] = P + + return [s, orinetations, sigma_s] + + + + #def absolute_obser_update(self, s, orinetations, sigma_s, input_data, sigma_ob, index): + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + landmark_loc = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + i = 2*index + P = sigma_s + sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + R = sigma_ob + z = np.matrix([meas_range,bearing]).getT() + lx = landmark_loc[0] + ly = landmark_loc[1] + x = s[i,0] + y = s[i+1,0] + + z_hat_0 = sqrt((lx-x)*(lx-x)+(ly-y)*(ly-y)) + z_hat_1 = (atan2((ly-y), (lx-x))-self_theta)%pi + if abs(z_hat_1-pi) < abs(z_hat_1): + z_hat_1 = z_hat_1-pi + + z_hat = np.matrix([z_hat_0, z_hat_1]).getT() + + + H = np.matrix(np.zeros((2,2*num_robots))) + h11 = (-lx + x)/sqrt((lx - x)**2 + (ly - y)**2) + h12 = (-ly + y)/sqrt((lx - x)**2 + (ly - y)**2) + h21 = -(-ly + y)/((lx - x)**2 + (ly - y)**2) + h22 = -(lx - x)/((lx - x)**2 + (ly - y)**2) + H[0,i] = h11 + H[0,i+1] = h12 + H[1,i] = h21 + H[1,i+1] = h22 + + + sigma_invention = H * P * H.getT() + R + kalman_gain = P*H.getT()*sigma_invention.getI() + s = s + kalman_gain*(z - z_hat) + P = P - kalman_gain*H*P + sigma_s = P + + return [s, orinetations, sigma_s] + + + + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + obser_index = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + i = 2*index + P = sigma_s + sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + R = sigma_ob + z = np.matrix([meas_range,bearing]).getT() + j = obser_index * 2 + + x_i = s[i,0] + y_i = s[i+1,0] + x_j = s[j,0] + y_j = s[j+1,0] + + z_hat_0 = sqrt((x_j-x_i)*(x_j-x_i)+(y_j-y_i)*(y_j-y_i)) + z_hat_1 = (atan2((y_j-y_i), (x_j-x_i))-self_theta)%pi + if abs(z_hat_1-pi) < abs(z_hat_1): + z_hat_1 = z_hat_1-pi + + z_hat = np.matrix([z_hat_0, z_hat_1]).getT() + + H = np.matrix(np.zeros((2,2*num_robots))) + H[0,i] = (x_i - x_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[0,i+1] = (y_i - y_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,i] = -(y_i - y_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,i+1] = -(-x_i + x_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + + H[0,j] = (-x_i + x_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[0,j+1] = (-y_i + y_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,j] = (y_i - y_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,j+1] = (-x_i + x_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + + sigma_invention = H * P * H.getT() + R + kalman_gain = P*H.getT()*sigma_invention.getI() + s = s + kalman_gain*(z - z_hat) + P = P - kalman_gain*H*P + sigma_s = P + + + return [s, orinetations, sigma_s] diff --git a/CoLo-AT/localization_algos/under_developing_algos/ekf_gs_ci2.py b/CoLo-AT/localization_algos/under_developing_algos/ekf_gs_ci2.py new file mode 100644 index 0000000000000000000000000000000000000000..0c3d51d07c5efb25e62ac8f20109efab288484fc --- /dev/null +++ b/CoLo-AT/localization_algos/under_developing_algos/ekf_gs_ci2.py @@ -0,0 +1,146 @@ +import numpy as np +from numpy import matrix +from math import cos, sin, atan2, sqrt +from localization_algo_framework import ekf_algo_framework + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class EKF_GS_CI2(ekf_algo_framework): + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.1*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + trace_state_var = np.trace(sigma_s[i:i+2, i:i+2]) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + + var_v = 0.1 # variance of the velocity + i = 2*index + num_robots = int(len(s)/2) + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + Q = sigma_odo + Q[1,1] = Q[1,1]*v*v + W = delta_t*rot_mtx(self_theta) + + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + for j in range(num_robots): + jj = 2*j + if j==index: + #print(np.matmul(sigma_odo,rot_mtx(self_theta).getT())) + sigma_s[jj:jj+2, jj:jj+2] += delta_t*delta_t*rot_mtx(self_theta)*Q*rot_mtx(self_theta).getT() + else: + sigma_s[jj:jj+2, jj:jj+2] += delta_t*delta_t*var_v*np.identity(2) + + return [s, orinetations, sigma_s] + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + + sigma_ob = sensor_covariance + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + landmark_loc = measurement_data[0] + dis = measurement_data[1] + phi = measurement_data[2] + + i = 2*index + H_i = np.matrix(np.zeros((2,2*num_robots)), dtype = float) + H_i[0, i] = -1 + H_i[1, i+1] = -1 + H = rot_mtx(self_theta).getT()*H_i + + + #z_hat= rot_mtx(self_theta).getT() * (landmark_loc + H_i*s) + delta_x = landmark_loc[0] - s.item(i,0) + delta_y = landmark_loc[1] - s.item(i+1,0) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) + z = matrix([dis*cos(phi), dis*sin(phi)]).getT() + + sigma_ob[1,1] = sigma_ob[1,1]*dis*dis + sigma_z = rot_mtx(phi) * sigma_ob * rot_mtx(phi).getT() + + sigma_invention = H * sigma_s * H.getT() + sigma_z + kalman_gain = sigma_s*H.getT()*sigma_invention.getI() + + s = s + kalman_gain*(z - z_hat) + sigma_s = sigma_s - kalman_gain*H*sigma_s + + return [s, orinetations, sigma_s] + + + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + i = index * 2 + obser_index = measurement_data[0] + dis = measurement_data[1] + phi = measurement_data[2] + j = obser_index * 2 + + z = matrix([dis*cos(phi), dis*sin(phi)]).getT() + + H_ij = np.zeros((2,2*num_robots)) + H_ij[0, i] = -1 + H_ij[1, i+1] = -1 + H_ij[0, j] = 1 + H_ij[1, j+1] = 1 + H = rot_mtx(self_theta).getT()*H_ij + + #z_hat = H * s + #sigma_ob[1,1] = sigma_ob[1,1]*meas_range*meas_range + + delta_x = s.item(j,0) - s.item(i,0) + delta_y = s.item(j+1,0) - s.item(i+1,0) + z_hat = rot_mtx(self_theta).getT()*(np.matrix([delta_x, delta_y]).getT()) + + sigma_ob[1,1] = sigma_ob[1,1]*dis*dis + sigma_z = rot_mtx(phi)*sigma_ob*rot_mtx(phi).getT() + + sigma_invention = H*sigma_s*H.getT() + sigma_z + kalman_gain = sigma_s*H.getT()*sigma_invention.getI() + + s = s + kalman_gain*(z - z_hat) + sigma_s = sigma_s - kalman_gain*H*sigma_s + + return [s, orinetations, sigma_s] + + def communication(self, robot_data, sensor_data): + + [s, orinetations, sigma_s, index] = robot_data + [comm_data, comm_variance] = sensor_data + [sender_idx, comm_robot_s, comm_robot_sigma_s]=comm_data + + e = 0.8 # (iii+1)*0.01 + #e = comm_e + + sig_inv = e*sigma_s.getI() + (1-e)*comm_robot_sigma_s.getI() + s = sig_inv.getI() * (e*sigma_s.getI()*s + (1-e)*comm_robot_sigma_s.getI()*comm_robot_s) + sigma_s = sig_inv.getI() + + return [s, orinetations, sigma_s] \ No newline at end of file diff --git a/CoLo-AT/localization_algos/under_developing_algos/ekf_ls_bda2.py b/CoLo-AT/localization_algos/under_developing_algos/ekf_ls_bda2.py new file mode 100644 index 0000000000000000000000000000000000000000..cd0baa0bb97f29be8d3e6b973b78c01355dbd247 --- /dev/null +++ b/CoLo-AT/localization_algos/under_developing_algos/ekf_ls_bda2.py @@ -0,0 +1,161 @@ +# coding: utf-8 + + +#local state CI (LS-BDA) [9] - “Recursive decentralized collaborative localization for sparsely communicating robots†- L. Luft, T. Schubert, S. I. Roumeliotis, and W. Burgard, +#Local state with block diagonal approximation (BDA) algorithm +import numpy as np +from numpy import matrix +from numpy import linalg +from math import cos, sin, atan2, sqrt +from localization_algo_framework import ekf_algo_framework + + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + + +class EKF_LS_BDA2(ekf_algo_framework): + + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.01*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + trace_state_var = np.trace(sigma_s) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + + num_robots = int(len(s)/2) + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + i = 2*index + s[i,0] = s[i,0] + cos(self_theta)*v*delta_t #x + s[i+1,0] = s[i+1,0] + sin(self_theta)*v*delta_t #y + + Q = sigma_odo + P = sigma_s[i:i+2, i:i+2] + W = delta_t*rot_mtx(self_theta) + W[0,1] = sigma_odo[0,1]*v + W[1,1] = sigma_odo[1,1]*v + + P = P + W*Q*W.getT() # A is identity matrix + + sigma_s[i:i+2, i:i+2] = P + + return [s, orinetations, sigma_s] + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + i = 2*index + + local_s = s[i:i+2] + local_P = sigma_s[i:i+2,i:i+2] + + landmark_loc = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + lx = landmark_loc[0] + ly = landmark_loc[1] + x = s[i,0] + y = s[i+1,0] + + z = np.matrix([meas_range,bearing]).getT() + z_hat_0 = sqrt((lx-x)*(lx-x)+(ly-y)*(ly-y)) + z_hat_1 = (atan2((ly-y), (lx-x))-self_theta)%pi + if abs(z_hat_1-pi) < abs(z_hat_1): + z_hat_1 = z_hat_1-pi + z_hat = np.matrix([z_hat_0, z_hat_1]).getT() + + H = np.matrix(np.zeros((2,2*num_robots))) + h11 = (-lx + x)/sqrt((lx - x)**2 + (ly - y)**2) + h12 = (-ly + y)/sqrt((lx - x)**2 + (ly - y)**2) + h21 = -(-ly + y)/((lx - x)**2 + (ly - y)**2) + h22 = -(lx - x)/((lx - x)**2 + (ly - y)**2) + H[0,i] = h11 + H[0,i+1] = h12 + H[1,i] = h21 + H[1,i+1] = h22 + + sigma_invention = H * P * H.getT() + R + kalman_gain = P*H.getT()*sigma_invention.getI() + + local_s = local_s + kalman_gain*(z - z_hat) + local_P = local_P - kalman_gain*H*local_P + + s[i:i+2] = local_s + sigma_s[i:i+2,i:i+2] = local_P + + + + for rbt_idx in range(num_robots): + if(not (rbt_idx==index)): + j = 2*rbt_idx + sigma_s[i:i+2,j:j+2] = sigma_s[i:i+2,j:j+2] - kalman_gain*H*sigma_s[i:i+2,j:j+2] + + return [s, orinetations, sigma_s] + + + def relative_obser_update(self, robot_data, sensor_data): + #when robot i observes robot j + + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + obser_index = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + i = index * 2 + j = obser_index * 2 + P = sigma_s + R = sigma_ob + z = np.matrix([meas_range,bearing]).getT() + + x_i = s[i,0] + y_i = s[i+1,0] + x_j = s[j,0] + y_j = s[j+1,0] + + z_hat_0 = sqrt((x_j-x_i)*(x_j-x_i)+(y_j-y_i)*(y_j-y_i)) + z_hat_1 = (atan2((y_j-y_i), (x_j-x_i))-self_theta)%pi + if abs(z_hat_1-pi) < abs(z_hat_1): + z_hat_1 = z_hat_1-pi + + z_hat = np.matrix([z_hat_0, z_hat_1]).getT() + + H = np.matrix(np.zeros((2,2*num_robots))) + H[0,i] = (x_i - x_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[0,i+1] = (y_i - y_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,i] = -(y_i - y_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,i+1] = -(-x_i + x_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + + H[0,j] = (-x_i + x_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[0,j+1] = (-y_i + y_j)/sqrt((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,j] = (y_i - y_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + H[1,j+1] = (-x_i + x_j)/((-x_i + x_j)**2 + (-y_i + y_j)**2) + + inverse_sigma = [H] + K_i = + + return [s, orinetations, sigma_s] \ No newline at end of file diff --git a/CoLo-AT/localization_algos/under_developing_algos/ekf_ls_ci2.py b/CoLo-AT/localization_algos/under_developing_algos/ekf_ls_ci2.py new file mode 100644 index 0000000000000000000000000000000000000000..8de4f286f78e7fa807d738610407eace28ea6675 --- /dev/null +++ b/CoLo-AT/localization_algos/under_developing_algos/ekf_ls_ci2.py @@ -0,0 +1,177 @@ +# coding: utf-8 + +#local state CI (LS-CI) - “Decentralized multi-robot cooperative localization using covariance intersection,†L. C. Carrillo-Arce, E. D. Nerurkar, J. L. Gordillo, and S. I. Roumeliotis +import numpy as np +from numpy import matrix +from numpy import linalg +from math import cos, sin, atan2, sqrt, pi +from localization_algo_framework import ekf_algo_framework +import scipy + + +def rot_mtx(theta): + return matrix([[cos(theta), -sin(theta)], [sin(theta), cos(theta)]]) + +class EKF_LS_CI2(ekf_algo_framework): + def __init__(self, algo_name): + ekf_algo_framework.__init__(self, algo_name) + + def state_variance_init(self, num_robots): + return 0.1*np.matrix(np.identity(2*num_robots), dtype = float) + + def calculate_trace_state_variance(self, robot_data): + [s, orinetations, sigma_s, index] = robot_data + i = 2*index + trace_state_var = np.trace(sigma_s[i:i+2, i:i+2]) + return trace_state_var + + def propagation_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_odo = sensor_covariance + num_robots = int(len(s)/2) + + delta_t = measurement_data[0] + v = measurement_data[1] + orinetations[index] = measurement_data[2] + self_theta = orinetations[index] + + i = 2*index + local_s = s[i:i+2] + local_P = sigma_s[i:i+2,i:i+2] + + local_s[0,0] = local_s[0,0] + cos(self_theta)*v*delta_t #x + local_s[1,0] = local_s[1,0] + sin(self_theta)*v*delta_t #y + + Q = sigma_odo + W = delta_t*rot_mtx(self_theta) + local_P = local_P + W*Q*W.getT() # A is identity matrix + + s[i:i+2] = local_s + sigma_s[i:i+2, i:i+2] = local_P + + return [s, orinetations, sigma_s] + + + def absolute_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + landmark_loc = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + i = 2*index + + local_s = s[i:i+2] + local_P = sigma_s[i:i+2,i:i+2] + R = sigma_ob + + z = np.matrix([meas_range,bearing]).getT() + lx = landmark_loc[0] + ly = landmark_loc[1] + x = local_s[0,0] + y = local_s[1,0] + + z_hat_0 = sqrt((lx-x)*(lx-x)+(ly-y)*(ly-y)) + z_hat_1 = (atan2((ly-y), (lx-x))-self_theta)%pi + if abs(z_hat_1-pi) < abs(z_hat_1): + z_hat_1 = z_hat_1-pi + z_hat = np.matrix([z_hat_0, z_hat_1]).getT() + + h11 = (-lx + x)/sqrt((lx - x)**2 + (ly - y)**2) + h12 = (-ly + y)/sqrt((lx - x)**2 + (ly - y)**2) + h21 = -(-ly + y)/((lx - x)**2 + (ly - y)**2) + h22 = -(lx - x)/((lx - x)**2 + (ly - y)**2) + H = np.matrix([[h11, h12],[h21, h22]]) + + sigma_invention = H * local_P * H.getT() + R # V is a identity matrix + K = local_P*H.getT()*sigma_invention.getI() + local_s = local_s + K*(z - z_hat) + local_P = local_P - K*H*local_P + + s[i:i+2] = local_s + sigma_s[i:i+2,i:i+2] = local_P + + return [s, orinetations, sigma_s] + + + def relative_obser_update(self, robot_data, sensor_data): + [s, orinetations, sigma_s, index] = robot_data + [measurement_data, sensor_covariance] = sensor_data + sigma_ob = sensor_covariance + + #a obser b + num_robots = int(len(s)/2) + self_theta = orinetations[index] + + obser_index = measurement_data[0] + meas_range = measurement_data[1] + bearing = measurement_data[2] + + a_i = index * 2 + b_i = obser_index * 2 + + R = sigma_ob + s_a = s[a_i:a_i+2] + P_a = sigma_s[a_i:a_i+2,a_i:a_i+2] + P_ab = sigma_s[a_i:a_i+2,b_i:b_i+2] + + s_b = s[b_i:b_i+2] + P_b = sigma_s[b_i:b_i+2,b_i:b_i+2] + + x_a = s_a[0,0] + y_a = s_a[1,0] + x_b = s_b[0,0] + y_b = s_b[1,0] + + z = np.matrix([meas_range,bearing]).getT() + z_hat_0 = sqrt((x_b-x_a)*(x_b-x_a)+(y_b-y_a)*(y_b-y_a)) + z_hat_1 = (atan2((y_b-y_a), (x_b-x_a))-self_theta)%pi + if abs(z_hat_1-pi) < abs(z_hat_1): + z_hat_1 = z_hat_1-pi + z_hat = np.matrix([z_hat_0, z_hat_1]).getT() + + H_a = np.matrix(np.zeros((2,2))) + H_a[0,0] = (x_a - x_b)/sqrt((-x_a + x_b)**2 + (-y_a + y_b)**2) + H_a[0,1] = (y_a - y_b)/sqrt((-x_a + x_b)**2 + (-y_a + y_b)**2) + H_a[1,0] = -(y_a - y_b)/((-x_a + x_b)**2 + (-y_a + y_b)**2) + H_a[1,1] = -(-x_a + x_b)/((-x_a + x_b)**2 + (-y_a + y_b)**2) + + H_b = np.matrix(np.zeros((2,2))) + H_b[0,0] = (-x_a + x_b)/sqrt((-x_a + x_b)**2 + (-y_a + y_b)**2) + H_b[0,1] = (-y_a + y_b)/sqrt((-x_a + x_b)**2 + (-y_a + y_b)**2) + H_b[1,0] = (y_a - y_b)/((-x_a + x_b)**2 + (-y_a + y_b)**2) + H_b[1,1] = (-x_a + x_b)/((-x_a + x_b)**2 + (-y_a + y_b)**2) + + r = z - z_hat + S = R + H_a * P_a * H_a.getT() + H_b * P_b * H_b.getT() - H_a * P_ab * H_b.getT() - H_b * P_ab * H_a.getT() + S_sqrt = np.asmatrix(scipy.linalg.sqrtm(np.matrix(S))) + + D_arr = [0]*num_robots + + D_arr[index] = (P_ab * H_b.getT() - P_a * H_a.getT())*S_sqrt.getI() + D_arr[obser_index] = (P_b * H_b.getT() - P_ab * H_a.getT())*S_sqrt.getI() + + for robot_index_j in range(num_robots): + j = robot_index_j*2 + if robot_index_j != index and robot_index_j != obser_index: + D_arr[robot_index_j] = sigma_s[j:j+2,b_i:b_i+2]*H_b.getT()*S_sqrt.getI()-sigma_s[j:j+2,a_i:a_i+2]*H_a.getT()*S_sqrt.getI() + s[j:j+2] = s[j:j+2]+D_arr[robot_index_j]*r + sigma_s[j:j+2,j:j+2] = sigma_s[j:j+2,j:j+2]-D_arr[robot_index_j]*D_arr[robot_index_j].getT() + + # error.... + + ''' + for robot_index_j in range(num_robots): + j = robot_index_j*2 + for robot_index_l in range(num_robots): + l = robot_index_l*2 + if l != j: + sigma_s[j:j+2,l:l+2] = sigma_s[j:j+2,l:l+2]-D_arr[robot_index_j]*D_arr[robot_index_l].getT() + ''' + return [s, orinetations, sigma_s] \ No newline at end of file diff --git a/CoLo-AT/requests/__init__.py b/CoLo-AT/requests/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/CoLo-AT/requests/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/CoLo-AT/requests/__pycache__/request_response.cpython-35.pyc b/CoLo-AT/requests/__pycache__/request_response.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e79d4a633a8d51d0b273c790f2ee29e6e9bbfe70 Binary files /dev/null and b/CoLo-AT/requests/__pycache__/request_response.cpython-35.pyc differ diff --git a/CoLo-AT/requests/__pycache__/request_response.cpython-36.pyc b/CoLo-AT/requests/__pycache__/request_response.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7d7ac821b02e807490c880a52d3f6e990057904 Binary files /dev/null and b/CoLo-AT/requests/__pycache__/request_response.cpython-36.pyc differ diff --git a/CoLo-AT/requests/request_response.py b/CoLo-AT/requests/request_response.py new file mode 100644 index 0000000000000000000000000000000000000000..122e46a6938327cb05c32350680586cd149fb6ff --- /dev/null +++ b/CoLo-AT/requests/request_response.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Apr 3 20:18:50 2018 + +@author: william +""" + +class Request_response: + + def __init__(self, time, idx): + #self.name = name + self.message = dict([('time', time), ('robot_index', idx), ('data', None), ('groundtruth', None)]) + self.r_r_type = None + def get_message(self): + return self.message + def get_robot_index(self): + return self.message['robot_index'] + def get_data(self): + return self.message['data'] + def get_groundtruth(self): + return self.message['groundtruth'] + def get_type(self): + return self.r_r_type + + def set_time(self, time): + self.message['time'] = time + def set_robot_idx(self, idx): + self.message['robot_index'] = idx + def set_type(self, r_r_type): + self.r_r_type = r_r_type + def set_message(self, message): + self.message = message + + +class Prop_req_resp(Request_response): + + def __init__(self, time, idx): + Request_response.__init__(self, time, idx) + Request_response.set_type(self, 'odometry') + +class Meas_req_resp(Request_response): + + def __init__(self, time, idx): #r_r_type can be landmark or relative measurement + Request_response.__init__(self, time, idx) + Request_response.set_type(self, 'measurement') + + +class Comm_req_resp(Request_response): + + def __init__(self, time, idx): + Request_response.__init__(self, time, idx) + Request_response.set_type(self, 'communication') diff --git a/CoLo-AT/robots/__init__.py b/CoLo-AT/robots/__init__.py new file mode 100755 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/CoLo-AT/robots/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/CoLo-AT/robots/__init__.pyc b/CoLo-AT/robots/__init__.pyc new file mode 100644 index 0000000000000000000000000000000000000000..34b8c9b2e92fd1b2952ab308250dfee2ed247e0b Binary files /dev/null and b/CoLo-AT/robots/__init__.pyc differ diff --git a/CoLo-AT/robots/__pycache__/__init__.cpython-35.pyc b/CoLo-AT/robots/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fde83e0bb0008574e83485b2e7a81e1b926fc965 Binary files /dev/null and b/CoLo-AT/robots/__pycache__/__init__.cpython-35.pyc differ diff --git a/CoLo-AT/robots/__pycache__/__init__.cpython-36.pyc b/CoLo-AT/robots/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..968f4ea9f0609e66583b9f1a61b651c8274854be Binary files /dev/null and b/CoLo-AT/robots/__pycache__/__init__.cpython-36.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_centralized.cpython-35.pyc b/CoLo-AT/robots/__pycache__/robot_centralized.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2923475fb218f4636ecca0048cd1f783bbc77218 Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_centralized.cpython-35.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_centralized.cpython-36.pyc b/CoLo-AT/robots/__pycache__/robot_centralized.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50568596f2a6182b86836bd1966d5bab0f43e051 Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_centralized.cpython-36.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_distributive.cpython-35.pyc b/CoLo-AT/robots/__pycache__/robot_distributive.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1900e20db14dbc6e249b365145032c41259d6c67 Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_distributive.cpython-35.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_distributive.cpython-36.pyc b/CoLo-AT/robots/__pycache__/robot_distributive.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47a8be69a6c7981177a8129559bbf71301d46d2e Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_distributive.cpython-36.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_parameters.cpython-35.pyc b/CoLo-AT/robots/__pycache__/robot_parameters.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74164906cd1d1220b0a92c38542c179e50aed113 Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_parameters.cpython-35.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_parameters.cpython-36.pyc b/CoLo-AT/robots/__pycache__/robot_parameters.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b45792f8a1629a9f208525a179c2b464f765faf0 Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_parameters.cpython-36.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_system.cpython-35.pyc b/CoLo-AT/robots/__pycache__/robot_system.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4245e57f813a10c01fb0bbf3c3b16c617c77020c Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_system.cpython-35.pyc differ diff --git a/CoLo-AT/robots/__pycache__/robot_system.cpython-36.pyc b/CoLo-AT/robots/__pycache__/robot_system.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..764c3acefb5a5748df2049ab1d5b37a629ec6b64 Binary files /dev/null and b/CoLo-AT/robots/__pycache__/robot_system.cpython-36.pyc differ diff --git a/CoLo-AT/robots/robot_centralized.py b/CoLo-AT/robots/robot_centralized.py new file mode 100755 index 0000000000000000000000000000000000000000..24f8fc3549d864730b10a24615b7a77b812cd4b9 --- /dev/null +++ b/CoLo-AT/robots/robot_centralized.py @@ -0,0 +1,73 @@ +# coding: utf-8 +import numpy as np +import math +import os +import sys + +class CentralizedRobotSystem(): + + def __init__(self, name, dataset_labels, localization_algo): + #label can choose which robot in the dataset (1-5), 0 is default + #list index in the index of the robot in the robot_list + self.name = name + self.dataset_labels = dataset_labels #subject IDs + self.num_robots = len(dataset_labels) + self.state = np.matrix(np.zeros((2*self.num_robots,1))) + self.orientaions = np.matrix(np.zeros((self.num_robots,1))) + self.localization_algo = localization_algo + self.state_variance = 0 + + + def set_starting_state(self, start_state_arr): + for i, label in enumerate(self.dataset_labels): + self.time = start_state_arr[label][0] + self.state[2*i,0] = start_state_arr[label][1] #for x-axis + self.state[2*i+1,0] = start_state_arr[label][2] #for y-axis + self.orientaions[i, 0] = start_state_arr[label][3] #for orientation + self.state_variance = self.localization_algo.state_variance_init(self.num_robots) + + + + def set_time(self, time): + self.time = time + + def set_start_moving_times(self, start_moving_times): + self.prev_prop_times = start_moving_times + + def set_state(self, state): + self.state = state + + def set_state_var(self, sigma_s): + self.state_variance = sigma_s + + + def set_orientations(self, orientaions): + self.orientaions = orientaions + + + def get_name(self): + return self.name + + def get_robot_labels(self): + return self.dataset_labels + + def get_status(self): + return [self.state, self.orientaions, self.state_variance] + + def get_trace_state_variance(self, robot_index): + robot_data = [self.state, self.orientaions, self.state_variance, robot_index] + return self.localization_algo.calculate_trace_state_variance(robot_data) + + def load_map(self, landmark_map): + self.landmark_map = landmark_map + + def state_update(self, robot_index, update_type, sensor_data): + robot_data = [self.state, self.orientaions, self.state_variance, robot_index] + [self.state, self.orientaions, self.state_variance] = self.localization_algo.algo_update(robot_data, update_type, sensor_data) + return [self.state, self.orientaions, self.state_variance, update_type] + + + + + + diff --git a/CoLo-AT/robots/robot_centralized.pyc b/CoLo-AT/robots/robot_centralized.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8919a640757c222bf23ab03e38b3216f6eb69b1f Binary files /dev/null and b/CoLo-AT/robots/robot_centralized.pyc differ diff --git a/CoLo-AT/robots/robot_distributive.py b/CoLo-AT/robots/robot_distributive.py new file mode 100755 index 0000000000000000000000000000000000000000..2e08c15f19feb9532b7167cd4a4fb769530c08f0 --- /dev/null +++ b/CoLo-AT/robots/robot_distributive.py @@ -0,0 +1,132 @@ +import numpy as np +import math +import os +import sys +from robot_parameters import * + +sys.path.insert(0, 'Environmen_for_Localization_Simulation/functions/algo_EKF') +class DistributiveRobot(): + + def __init__(self, labels_set, robot_index, loc_algo): + #label can choose which robot in the dataset (1-5) + #robot index in the index of the robot in the robot_list + self.label = labels_set[robot_index] #subject ID + self.labels_set = labels_set + self.robot_index = robot_index + self.num_robots = len(labels_set) + + + self.orientaions = np.matrix(np.zeros((self.num_robots,1))) + + self.localization_algo = loc_algo # create the localization algo for the robot + + #self.th_state_variance = self.bound_algo.initialization_for_state_variance(self.num_robots) + + self.state = np.matrix(np.zeros((2*self.num_robots,1))) + self.state_variance = self.localization_algo.state_variance_init(self.num_robots) + + self.freqs = [0, 0, 0] #[f_odometery, f_observartion, f_communication] + + + def set_starting_state(self, start_state_arr, known_other_start_loc = True): + self.time = start_state_arr[self.label][0] + if known_other_start_loc: + for i, label in enumerate(self.labels_set): + self.state[2*i,0] = start_state_arr[label][1] #for x-axis + self.state[2*i+1,0] = start_state_arr[label][2] #for y-axis + self.orientaions[i, 0] = start_state_arr[label][3] #for orientation + else: + i = self.robot_index + self.state[2*i,0] = start_state_arr[self.label][1] #for x-axis + self.state[2*i+1,0] = start_state_arr[self.label][2] #for y-axis + self.orientaions[i, 0] = start_state_arr[self.label][3] #for orientation + + #self.th_state_variance = self.bound_algo.initialization_for_state_variance(self.num_robots) + self.state_variance = self.localization_algo.state_variance_init(self.num_robots) + + + def set_state(self, state): + self.state = state + + def set_state_var(self, sigma_s): + self.state_variance = sigma_s + + def set_orientations(self, orientaions): + self.orientaions = orientaions + + def set_start_time(self, time): + self.time = time + + def set_freqs(self, freqs): + self.freqs = freqs + + def get_freqs(self): + return self.freqs + + def get_name(self): + return self.name + + def get_algo_name(self): + return self.localization_algo.get_name() + + def get_status(self): + return [self.state, self.orientaions, self.state_variance] + + def get_pos(self): + return self.state[2*self.robot_index:2*self.robot_index+2] + + def get_th_state_variance(self): + return self.th_state_variance + + def get_own_orientation(self): + return self.orientaions.item(self.robot_index) + + def get_labels_set(self): + return self.labels_set + + def get_index(self): + return self.robot_index + + def get_label(self): + return self.labels_set[self.robot_index] + + def get_trace_state_variance(self): + robot_data = [self.state, self.orientaions, self.state_variance, self.robot_index] + return self.localization_algo.calculate_trace_state_variance(robot_data) + + def get_bound(self): + return self.bound_algo.calculate_trace_state_variance(self.th_state_variance, self.robot_index) + + def get_th_state_variance(self): + return self.th_state_variance + + def set_start_moving_times(self, start_moving_times): + self.prev_prop_time = start_moving_times[self.robot_index] + + def load_map(self, landmark_map): + self.landmark_map = landmark_map + + def state_update(self, update_type, sensor_data): + robot_data = [self.state, self.orientaions, self.state_variance, self.robot_index] + [self.state, self.orientaions, self.state_variance] = self.localization_algo.algo_update(robot_data, update_type, sensor_data) + return [self.state, self.orientaions, self.state_variance, update_type] + + ''' + def bound_update(self, update_type, measurement_data): + if update_type == 'propagation update' : #propagation update + v = measurement_data[1] + sigma_odo = np.matrix([[0.05, 0], [0, 0.01]]) #with respect to velocity and orientationb + sensor_covariance = [sigma_odo] + elif update_type == 'communication': + sensor_covariance = 0 + else: + #sigma_obser = np.matrix([[0.0215, 0], [0,0.01]]) #with respect to range and bearing + sigma_obser = np.matrix([[0.2, 0], [0,0.2]]) + #sensor_covariance = sigma_obser + sensor_covariance = [var_dis, var_angle] + + robot_data = [self.state, self.freqs, self.th_state_variance, self.robot_index] + sensor_data = [measurement_data, sensor_covariance] + self.th_state_variance = self.bound_algo.estimation_update(robot_data, update_type, sensor_data) + return self.th_state_variance + ''' \ No newline at end of file diff --git a/CoLo-AT/robots/robot_distributive.pyc b/CoLo-AT/robots/robot_distributive.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b333a179b26b0fb5b38fb217928739b4d7d64ced Binary files /dev/null and b/CoLo-AT/robots/robot_distributive.pyc differ diff --git a/CoLo-AT/robots/robot_parameters.py b/CoLo-AT/robots/robot_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..0f60590bf548539297e8817dcb9681737698ae69 --- /dev/null +++ b/CoLo-AT/robots/robot_parameters.py @@ -0,0 +1,11 @@ + +max_v = 0.25 +max_a_v = 0.05 + +var_u_v = 0.25*0.25*0.05*0.05 +var_u_theta = 0.1 + +var_v = 0.25*0.25 + +var_dis = pow(0.05,2) +var_angle = pow(2.0 / 180, 2) diff --git a/CoLo-AT/robots/robot_parameters.pyc b/CoLo-AT/robots/robot_parameters.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3c36d52994e4045a8a6aef09833bc292922f897 Binary files /dev/null and b/CoLo-AT/robots/robot_parameters.pyc differ diff --git a/CoLo-AT/robots/robot_system.py b/CoLo-AT/robots/robot_system.py new file mode 100644 index 0000000000000000000000000000000000000000..ae0849fd5c92ce9fd440fe7d14de067765d2b6a8 --- /dev/null +++ b/CoLo-AT/robots/robot_system.py @@ -0,0 +1,149 @@ +import os, sys +import numpy as np + + +#sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/robots') +sys.path.append(os.path.join(os.path.dirname(__file__), ".")) +from robot_centralized import CentralizedRobotSystem +from robot_distributive import DistributiveRobot + + + + +class RobotSystem: + """ + general class for robot which can be distrbutive or centralized + """ + def __init__(self, name, robot_labels, loc_algo, bound_algo = None, distr_sys = True,): + self.name = name + self.robot_labels = robot_labels + self.num_robots = int(len(robot_labels)) + self.distr_sys = distr_sys + self.time = 0 + if distr_sys: + self.robot_sys= [] + for i, label in enumerate(self.robot_labels): + dis_robot = DistributiveRobot(robot_labels, i, loc_algo) + self.robot_sys.append(dis_robot) + # create a list of distibutive robot + print('created a team of ', len(self.robot_sys), ' distibutive robots ') + + else: + self.robot_sys = CentralizedRobotSystem(name, robot_labels, loc_algo) + print('created a centralized system for ', self.num_robots, ' robots ') + + def set_starting_state(self, start_state_arr): + if self.distr_sys: + for i, label in enumerate(self.robot_labels): + self.robot_sys[i].set_starting_state(start_state_arr) + else: + self.robot_sys.set_starting_state(start_state_arr) + + def request_data(self, time): + pass + ''' + def load_map(self, landmark_map): + if self.distr_sys: + for i, label in enumerate(self.robot_labels): + self.robot_sys[i].load_map(landmark_map) + else: + self.robot_sys.load_map(landmark_map) + ''' + ''' + def set_start_moving_times(self, start_moving_times): + if self.distr_sys: + for i, label in enumerate(self.robot_labels): + self.robot_sys[i].set_start_moving_times(start_moving_times) + else: + self.robot_sys.set_start_moving_times(start_moving_times) + ''' + def load_map(self, landmark_map): + self.landmark_map = landmark_map + def set_start_moving_times(self, start_moving_times): + self.prev_prop_times = start_moving_times + + def localization_update(self, rsp): + #message format: {'time': float, 'robot_index': int, 'data': {'time': float, 'velocity': float, 'orientation': float}, 'groundtruth': None} + message = rsp.get_message() + self.time = message['time'] + robot_index = message['robot_index'] + message_data = message['data'] + rsp_type = rsp.get_type() + + valid_update = True + + if rsp_type == 'odometry': #propogation update + update_type = 'propagation' + v = message_data['velocity'] + #sigma_odo = np.matrix([[5.075*v, 0], [0, 0.1]]) #with respect to velocity and orientation for utias datasets + sigma_odo = np.matrix([[0.01, 0], [0, 0.5]]) #with respect to velocity and orientation + sensor_covariance = sigma_odo + #delta_t = message_data['time'] - self.prev_prop_times[robot_index] + delta_t = message_data['delta_t'] + ''' + if delta_t <= 0: + print('current time: ', message_data['time'] ) + print('prev_prop_time: ', self.prev_prop_times[robot_index]) + raise Exception("Error incorrect delta_t!") + ''' + sensor_input = [delta_t, v, message_data['orientation']] + self.prev_prop_times[robot_index] = message_data['time'] + + elif rsp_type == 'measurement': + + sigma_obser = np.matrix([[0.015, 0], [0,0.01]]) #with respect to range and bearing + sensor_covariance = sigma_obser + obj_id = message_data['subject_ID'] + meas_range = message_data['measurment_range'] + bearing = message_data['bearing'] + if message_data['subject_ID'] > 5: #landmark observation + update_type = 'landmark observation' + landmark_loc = self.landmark_map.get(obj_id) + if landmark_loc != None: + sensor_input = [landmark_loc, meas_range, bearing, message_data['time'], obj_id] + valid_update = True + else: + valid_update = False + else: + update_type = 'relative observation' + if obj_id in self.robot_labels: + obser_index = self.robot_labels.index(obj_id) + valid_update = True + sensor_input = [obser_index, meas_range, bearing, message_data['time'], obj_id] + else: + valid_update = False + sensor_input = None + + elif rsp_type == 'communication': + update_type = 'communication' + obj_id = message_data['subject_ID'] + sender_idx = self.robot_labels.index(obj_id) + if self.distr_sys: + [comm_rbt_state, orientaions, comm_rbt_state_variance] = self.robot_sys[sender_idx].get_status() + else: + [comm_rbt_state, orientaions, comm_rbt_state_variance] = self.robot_sys.get_status() + sensor_input = [sender_idx, comm_rbt_state, comm_rbt_state_variance] + sensor_covariance = np.matrix([[0.01, 0], [0, 0.01]]) + else: + print('undefined message type!') + + if valid_update: + sensor_data = [sensor_input, sensor_covariance] + + if valid_update: + if self.distr_sys: + [est_states, est_orientaions, est_state_variance, update_type]= self.robot_sys[robot_index].state_update(update_type, sensor_data) + trace_sigma_s = self.robot_sys[robot_index].get_trace_state_variance() + else: + [est_states, est_orientaions, est_state_variance, update_type]= self.robot_sys.state_update(robot_index, update_type, sensor_data) + trace_sigma_s = self.robot_sys.get_trace_state_variance(robot_index) + else: + if self.distr_sys: + [est_states, est_orientaions, est_state_variance]= self.robot_sys[robot_index].get_status() + trace_sigma_s = self.robot_sys[robot_index].get_trace_state_variance() + else: + [est_states, est_orientaions, est_state_variance]= self.robot_sys.get_status() + trace_sigma_s = self.robot_sys.get_trace_state_variance(robot_index) + + robot_state = {'x_pos':est_states[2*robot_index], 'y_pos': est_states[2*robot_index+1], 'trace of state variance': trace_sigma_s, 'state variance': est_state_variance, 'update_type': update_type } + return robot_state diff --git a/CoLo-AT/robots/robot_system.pyc b/CoLo-AT/robots/robot_system.pyc new file mode 100644 index 0000000000000000000000000000000000000000..621918c1802b1bc0c429dd372643cf257501cd80 Binary files /dev/null and b/CoLo-AT/robots/robot_system.pyc differ diff --git a/CoLo-AT/robots/test_robot.py b/CoLo-AT/robots/test_robot.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f852fc454926e9807adeeb530f89979bbe3598 --- /dev/null +++ b/CoLo-AT/robots/test_robot.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Apr 8 16:47:50 2018 + +@author: william +""" +import sys +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/robots') +import robot_system +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/localization_algos') +import centralized_ekf +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/dataset_manager') +from existing_dataset import Dataset + +dataset_path = '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/datasets/MRCLAM_Dataset1/' +dataset_labels = [1,2,3,4,5] +testing_dataset = Dataset('testing', dataset_path, dataset_labels) +lm_map = testing_dataset.create_landmark_map() +print(lm_map) +st,staring_states,d, t_arr = testing_dataset.initialization_MRCLAMDatasets(20) + +dataset_labels = [1,2,3,4,5] +loc_algo = centralized_ekf.centralized_EKF('cen_ekf') +r = robot_system.RobotSystem('r',dataset_labels,loc_algo, None, distr_sys = False) +r.set_states(staring_states) + + diff --git a/CoLo-AT/ros_compatibility.py b/CoLo-AT/ros_compatibility.py new file mode 100644 index 0000000000000000000000000000000000000000..d32cc350e3750653c42fc4ec4433c88fc0834873 --- /dev/null +++ b/CoLo-AT/ros_compatibility.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Wed Aug 15 15:45:54 2018 + +@author: william +""" + + +import os, sys +import getpass +sys.path.append(os.path.join(os.path.dirname(__file__), ".")) +from dataset_manager.existing_dataset import Dataset +from simulation_process.sim_manager import SimulationManager +from robots.robot_system import RobotSystem +from simulation_process.state_recorder import StatesRecorder +from data_analysis.data_analyzer import Analyzer +from data_analysis.realtime_plot import animate_plot +from pprint import pprint + +# load algorithms +sys.path.append(os.path.join(os.path.dirname(__file__), "localization_algos")) +from centralized_ekf import Centralized_EKF +from simple_ekf import Simple_EKF +from ekf_ls_bda import EKF_LS_BDA +from ekf_gs_bound import EKF_GS_BOUND +from ekf_gs_ci import EKF_GS_CI +from ekf_gs_sci2 import EKF_GS_SCI2 + +#dataset_path = '/home/william/catkin_ws/ros_colo_dataset/' +#dataset_path = '/home/william/UTIAS-dataset/MRCLAM_Dataset3/' +#dataset_path = '/home/william/full_tests/full_test9/' +compname = getpass.getuser() +dataset_path = "/home/"+ compname +"/full_tests/full_test_v3_6/" + +dataset_labels = [1,2,3] +duration = 300 # duration for the simulation in sec +testing_dataset = Dataset('testing') +start_time, starting_states, dataset_data, time_arr = testing_dataset.load_MRCLAMDatasets(dataset_path, dataset_labels, duration, delay_start = 0) +loc_algo = EKF_GS_SCI2('algo') +robot = RobotSystem('robot', dataset_labels, loc_algo, distr_sys = True) + +sim = SimulationManager('PE-CoLo EKF_GS_SCI2') +state_recorder = StatesRecorder('recorder',dataset_labels) +sim.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder, simple_plot = True) + + +analyzer = Analyzer('analyzer', dataset_labels) +loc_err_per_run, trace_per_run, t_arr = analyzer.calculate_loc_err_and_trace_state_variance_per_run(state_recorder, plot_graphs = True) +robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) +data_in_time_order = state_recorder.get_data_in_time_order() +update_in_time_order = state_recorder.get_updata_type_in_time_order() +update_type_arr = state_recorder.get_update_type_arr() +recorded_data = state_recorder.get_recorded_data() +#recorded_dataline = [time, robot_label, est_x_pos, est_y_pos, trace_state_var, gt_x_pos, gt_y_pos, loc_err] diff --git a/CoLo-AT/ros_demo.py b/CoLo-AT/ros_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..7f435357460cff6645300c2e66a81008fc0284a0 --- /dev/null +++ b/CoLo-AT/ros_demo.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jul 10 17:43:58 2018 + +@author: william +""" + +import os, sys +import getpass +sys.path.append(os.path.join(os.path.dirname(__file__), ".")) +from dataset_manager.existing_dataset import Dataset +from simulation_process.sim_manager import SimulationManager +from robots.robot_system import RobotSystem +from simulation_process.state_recorder import StatesRecorder +from data_analysis.data_analyzer import Analyzer +from data_analysis.realtime_plot import animate_plot +from pprint import pprint + +# load algorithms +sys.path.append(os.path.join(os.path.dirname(__file__), "localization_algos")) +from centralized_ekf import Centralized_EKF # works +from simple_ekf import Simple_EKF +from ekf_ls_bda import EKF_LS_BDA +from ekf_gs_bound import EKF_GS_BOUND +from ekf_gs_ci2 import EKF_GS_CI2 + +#dataset_path = '/home/william/catkin_ws/ros_colo_dataset/' +#dataset_path = '/home/william/UTIAS-dataset/MRCLAM_Dataset3/' +compname = getpass.getuser() +dataset_path = "/home/"+ compname +"/full_tests/full_test_v3_2/" + +dataset_labels = [1, 2] +duration = 240 # duration for the simulation in sec +testing_dataset = Dataset('testing') +start_time, starting_states, dataset_data, time_arr = testing_dataset.load_MRCLAMDatasets(dataset_path, dataset_labels, duration, delay_start = 0) +loc_algo = Simple_EKF('algo') +robot = RobotSystem('robot', dataset_labels, loc_algo, distr_sys = True) + +sim = SimulationManager('Simple_EKF') +state_recorder = StatesRecorder('recorder',dataset_labels) +sim.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder, simple_plot = False) + + +analyzer = Analyzer('analyzer', dataset_labels) +loc_err_per_run, trace_per_run, t_arr = analyzer.calculate_loc_err_and_trace_state_variance_per_run(state_recorder, plot_graphs = False) +robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) + +print("Animation Start: ") +animate_plot(dataset_labels, state_recorder, analyzer, lm = testing_dataset.get_landmark_map()) diff --git a/CoLo-AT/sim_demo.py b/CoLo-AT/sim_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..61eac01943e312a42bc12bd0823567a3c6fe9c51 --- /dev/null +++ b/CoLo-AT/sim_demo.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Jul 10 15:37:32 2018 + +@author: william +""" + +import os, sys +sys.path.append(os.path.join(os.path.dirname(__file__), ".")) +from dataset_manager.existing_dataset import Dataset +from simulation_process.sim_manager import SimulationManager +from robots.robot_system import RobotSystem +from simulation_process.state_recorder import StatesRecorder +from data_analysis.data_analyzer import Analyzer +from data_analysis.realtime_plot import animate_plot +from pprint import pprint + +# load algorithms +sys.path.append(os.path.join(os.path.dirname(__file__), "localization_algos")) +from centralized_ekf2 import Centralized_EKF2 +from simple_ekf import Simple_EKF +from ekf_ls_bda import EKF_LS_BDA +from ekf_gs_bound import EKF_GS_BOUND +from ekf_gs_ci2 import EKF_GS_CI2 + +#need to verify these algo +''' +from ekf_ls_ci import EKF_LS_CI +from ekf_ls_ci2 import EKF_LS_CI2 +from ekf_gs_ci import EKF_GS_CI +from ekf_gs_sci2 import EKF_GS_SCI2 + +''' + + +dataset_path = '/home/william/UTIAS-dataset/MRCLAM_Dataset3/' + +dataset_labels = [1,2,3] +duration = 200 # duration for the simulation in sec +testing_dataset = Dataset('testing') +start_time, starting_states, dataset_data, time_arr = testing_dataset.load_MRCLAMDatasets(dataset_path, dataset_labels, duration, synthetic = False) + +freqs0 = [[10, 10, 10],[1, 1, 1],[0.5, 0.5, 0.5]] +freqs1 = [[10, 10, 10],[4, 4, 4],[0.5, 0.5, 0.5]] + + + +loc_algo = EKF_GS_CI2('algo') +robot = RobotSystem('robot gs ci', dataset_labels, loc_algo, distr_sys = True) + +sim = SimulationManager('sim') +state_recorder = StatesRecorder('gs ci schedule freqs0',dataset_labels) +sim.sim_process_schedule(dataset_labels, testing_dataset, robot, state_recorder, freqs0, simple_plot = True) + + +############################################################################## +testing_dataset.dataset_reset() +robot = RobotSystem('robot gs ci', dataset_labels, loc_algo, distr_sys = True) + +sim1 = SimulationManager('sim') +state_recorder1 = StatesRecorder('gs ci schedule freq1',dataset_labels) +sim1.sim_process_schedule(dataset_labels, testing_dataset, robot, state_recorder1, freqs1, simple_plot = True) + +############################################################################## +testing_dataset.dataset_reset() +robot = RobotSystem('robot gs ci', dataset_labels, loc_algo, distr_sys = True) + +sim_n = SimulationManager('sim') +state_recorder_n = StatesRecorder('gs ci naive ',dataset_labels) +sim_n.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder_n, simple_plot = True) + +print("simulation completetd") + +############################################################################## +''' +testing_dataset.dataset_reset() +bound_algo = EKF_GS_BOUND('algo') +robot_bound = RobotSystem('robot bound', dataset_labels, bound_algo, distr_sys = True) + +sim_b = SimulationManager('sim') +state_recorder_bound = StatesRecorder('gs ci bound',dataset_labels) +sim_b.sim_process_schedule(dataset_labels, testing_dataset, robot_bound, state_recorder_bound, freqs) + + +############################################################################## +testing_dataset.dataset_reset() +cen_ekf_algo = Centralized_EKF2('algo') +robot_cen = RobotSystem('robot cen', dataset_labels, cen_ekf_algo, distr_sys = False) + +sim_c = SimulationManager('sim') +state_recorder_c= StatesRecorder('cen ekf',dataset_labels) +sim_c.sim_process_naive(dataset_labels, testing_dataset, robot_cen, state_recorder_c) + + + +############################################################################## +testing_dataset.dataset_reset() +bda_algo = EKF_LS_BDA('algo') +robot_bda = RobotSystem('robot bda', dataset_labels, bda_algo, distr_sys = False) + +sim_bda = SimulationManager('sim') +state_recorder_bda= StatesRecorder('bda',dataset_labels) +sim_bda.sim_process_naive(dataset_labels, testing_dataset, robot_bda, state_recorder_bda) +''' + + +analyzer = Analyzer('analyzer', dataset_labels) +analyzer.algos_comparison([state_recorder, state_recorder1, state_recorder_n], only_trace=['gs ci bound']) +loc_err_per_run, trace_per_run, t_arr = analyzer.calculate_loc_err_and_trace_state_variance_per_run(state_recorder) +robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) + +print("start animation") +animate_plot(dataset_labels, state_recorder, analyzer) diff --git a/CoLo-AT/simulation_process/.DS_Store b/CoLo-AT/simulation_process/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..06a4db51b47233e945b29897ebb19e9c21ba1621 Binary files /dev/null and b/CoLo-AT/simulation_process/.DS_Store differ diff --git a/CoLo-AT/simulation_process/__init__.py b/CoLo-AT/simulation_process/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e690fd59145ce8900fd9ab8d8a996ee7d33834 --- /dev/null +++ b/CoLo-AT/simulation_process/__init__.py @@ -0,0 +1 @@ +from . import * diff --git a/CoLo-AT/simulation_process/__pycache__/__init__.cpython-35.pyc b/CoLo-AT/simulation_process/__pycache__/__init__.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f0f986567d17783310f451baffb6572a4c29fd9 Binary files /dev/null and b/CoLo-AT/simulation_process/__pycache__/__init__.cpython-35.pyc differ diff --git a/CoLo-AT/simulation_process/__pycache__/__init__.cpython-36.pyc b/CoLo-AT/simulation_process/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d0372df15aae7b98875fd6917b810441fb19ca4 Binary files /dev/null and b/CoLo-AT/simulation_process/__pycache__/__init__.cpython-36.pyc differ diff --git a/CoLo-AT/simulation_process/__pycache__/sim_manager.cpython-35.pyc b/CoLo-AT/simulation_process/__pycache__/sim_manager.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b79a7a4b5bb36b2e46ded786be1a2c770c2d019d Binary files /dev/null and b/CoLo-AT/simulation_process/__pycache__/sim_manager.cpython-35.pyc differ diff --git a/CoLo-AT/simulation_process/__pycache__/sim_manager.cpython-36.pyc b/CoLo-AT/simulation_process/__pycache__/sim_manager.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6cfd0e6ece49a18042367124c5337d035c92bd4c Binary files /dev/null and b/CoLo-AT/simulation_process/__pycache__/sim_manager.cpython-36.pyc differ diff --git a/CoLo-AT/simulation_process/__pycache__/state_recorder.cpython-35.pyc b/CoLo-AT/simulation_process/__pycache__/state_recorder.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6866b0b74fece4e734a2117774892407b77ea342 Binary files /dev/null and b/CoLo-AT/simulation_process/__pycache__/state_recorder.cpython-35.pyc differ diff --git a/CoLo-AT/simulation_process/__pycache__/state_recorder.cpython-36.pyc b/CoLo-AT/simulation_process/__pycache__/state_recorder.cpython-36.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bd85f9ab45ec39fe2de3ed7f2669cf30395c845 Binary files /dev/null and b/CoLo-AT/simulation_process/__pycache__/state_recorder.cpython-36.pyc differ diff --git a/CoLo-AT/simulation_process/data_generation.py b/CoLo-AT/simulation_process/data_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..033695bc4ec8995fa28c552feaaba47a4fbb0d80 --- /dev/null +++ b/CoLo-AT/simulation_process/data_generation.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue Feb 13 16:11:36 2018 + +@author: william +""" + +class DataGenerator(): + def init_(self): + pass + + def within_map(self, loc): + ''' + For runtime self_generated data, + Parameters: + loc: array, shape = (2,1) + robot's location for [x, y] + Return: Boolean + True if within the map, False if not + ''' + center_point = [0,0] + if sqrt(pow(loc[0]-center_point[0], 2)+pow(loc[1]-center_point[1], 2)) > d_max: + return False + else: + return True + def generate_propgation_data(self, current_loc, current_orientation): + + ''' + To generate propagtion data randomly so that it will run within predefined map *assuming orientation is perfect + Parameters: + current_loc: array, shape = (2,1) + robot's current location for [x, y] + current_orientation: float + robot's current orientation in radian + Return array, shape = (2,1) + ''' + [meas_v, meas_a_v] = [random.uniform(-max_v,max_v), random.uniform(-max_a_v, max_a_v)] + actual_v = meas_v + random.normal(0, sqrt(var_u_v)) + pre_update_position = [current_loc[0] + cos(current_orientation)*actual_v*delta_t, current_loc[1] + sin(current_orientation)*actual_v*delta_t] + + while(not within_map(pre_update_position)): + [meas_v, meas_a_v] = [random.uniform(-max_v,max_v), random.uniform(-max_a_v, max_a_v)] + actual_v = meas_v + random.normal(0, sqrt(var_u_v)) + pre_update_position = [current_loc[0] + cos(current_orientation)*actual_v*delta_t, current_loc[1] + sin(current_orientation)*actual_v*delta_t] + + orientation = current_orientation + meas_a_v*delta_t + actual_loc = pre_update_position + return [actual_v, meas_v, orientation, actual_loc] + + def generate_measurement_data(self, observer_loc, observee_loc, observer_orientation): + delta_x = observer_loc[0] - observee_loc[0] + delta_y = observer_loc[1] - observee_loc[1] + dis = sqrt(delta_y*delta_y + delta_x*delta_x) + random.normal(0, sqrt(var_dis)) + bearing = atan2(delta_y, delta_x) + random.normal(0, sqrt(var_angle)) - observer_orientation + return [dis, bearing] diff --git a/CoLo-AT/simulation_process/old_code/estimation_process_centralized.py b/CoLo-AT/simulation_process/old_code/estimation_process_centralized.py new file mode 100755 index 0000000000000000000000000000000000000000..8958800fa3c6cf9f2ef79bf0d44722e2f81d1772 --- /dev/null +++ b/CoLo-AT/simulation_process/old_code/estimation_process_centralized.py @@ -0,0 +1,162 @@ +import numpy as np +import numpy.matlib +import math +import os +import sys +import random +from numpy import linalg as LA + +sys.path.insert(0, 'Environmen_for_Localization_Simulation/functions/algo_EKF') + +import class_centralized_system + +def is_pos_def(x): + return np.all(np.linalg.eigvals(x) > 0) + +def find_min_match_dataline(loc_dic, datas): + + min_index = min(loc_dic, key=lambda k: loc_dic[k][1]) + min_loc = loc_dic[min_index][0] + return [min_index, min_loc] + +def update_loc_dic(robot_index, loc_dic, datas): + next_loc = loc_dic[robot_index][0] + 1 #next location if possible in the data file + try: + loc_dic[robot_index] = [next_loc, datas[robot_index][next_loc]['time']] + except IndexError: + #print 'delect the robot from request list' + del loc_dic[robot_index] + + #return next_loc + return 1 + +def estimation_process_centralized(landmark_map, start_time, start_state_arr, measurement_data, odometry_data, centralized_system, duration, robots_valid_lm_obser, comm_fail_rate): + + algo_name = centralized_system.get_algo_name() + dataset_labels = centralized_system.get_robot_labels() + general_estimation_file_name = 'est' + algo_name + + print("Estimation process Start: "+algo_name) + + num_rela_obser = 0 + + robots_invalid_lm_obser = list(set(dataset_labels) - set(robots_valid_lm_obser)) + + est_result_file_arr = [] + for i, robot_label in enumerate(dataset_labels): #text files to keep track of the robots' paths + estimation_file_name = general_estimation_file_name+'%d' %robot_label + file="/Results_files/estimation_results/"+estimation_file_name+".txt" + path=os.getcwd()+file + est_result_file = open(path, "w") + est_result_file_arr.append(est_result_file) + + num_robots = len(dataset_labels) + est_result_array = [[] for i in range(num_robots)] + + # use loc dicrionary to keep track of the location(which line)in the data files + odo_req_loc_dic = {} + meas_req_loc_dic = {} + + #feeding initialization info into each robot and request array + #req_loc_dic= {robot_index: [loc, time]} + + for i in range(num_robots): + start_loc = 1 + odo_req_loc_dic[i] = [start_loc, odometry_data[i][start_loc]['time']] + start_loc = 0 + meas_req_loc_dic[i] = [start_loc, measurement_data[i][start_loc]['time']] + + + # recording starting state + #[s, orinetations, sigma_s] = centralized_system.get_status() + for i in range(num_robots): + sigma_s_trace = centralized_system.get_trace_state_variance(i) + est_result_file_arr[i].write(str(start_time)+ '\t'+ str(start_state_arr[i][0]) + '\t' + str(start_state_arr[i][1]) + '\t' +str(sigma_s_trace) + '\n') + est_result = {'time':start_time, 'x_pos':start_state_arr[i][0], 'y_pos':start_state_arr[i][1], 'sigma_s_trace':sigma_s_trace} + est_result_array[i].append(est_result) + + t = start_time + while t < start_time + duration and len(odo_req_loc_dic) != 0 and len(meas_req_loc_dic) != 0: + [odo_index, odo_loc] = find_min_match_dataline(odo_req_loc_dic, odometry_data) + odo_time = odometry_data[odo_index][odo_loc]['time'] + [meas_index, meas_loc] = find_min_match_dataline(meas_req_loc_dic, measurement_data) + meas_time = measurement_data[meas_index][meas_loc]['time'] + + if odo_time <= meas_time: + #propagtion update + t = odo_time + delta_t = odometry_data[odo_index][odo_loc]['time'] - odometry_data[odo_index][odo_loc-1]['time'] #look the the previous like to find time interval + velocity = odometry_data[odo_index][odo_loc]['velocity'] + orientation = odometry_data[odo_index][odo_loc]['orientation'] + propagation_data = [delta_t, velocity, orientation] + [s, orinetations, sigma_s] = centralized_system.state_update('propagation update', propagation_data, odo_index) + + + #recording datas + j = odo_index * 2 + robot_pos = s[j:j+2,0] + sigma_s_trace = centralized_system.get_trace_state_variance(odo_index) + + est_result_file_arr[odo_index].write(str(t)+ '\t'+ str(robot_pos.item(0,0)) + '\t' + str(robot_pos.item(1,0)) + '\t' +str(sigma_s_trace) + '\n') + est_result = {'time':t, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace} + est_result_array[odo_index].append(est_result) + + update_loc_dic(odo_index, odo_req_loc_dic, odometry_data) + + else: + pass + #measurement update + t = meas_time + subject_ID = measurement_data[meas_index][meas_loc]['subject_ID'] + measurment_range = measurement_data[meas_index][meas_loc]['measurment_range'] + bearing = measurement_data[meas_index][meas_loc]['bearing'] + + if int(subject_ID) > 5: + pass + #landmark observation + if meas_index+1 in robots_valid_lm_obser: + landmark_loc = landmark_map[subject_ID] + landmark_measeurement_data = [landmark_loc, measurment_range, bearing] + + [s, orinetations, sigma_s] = centralized_system.state_update('landmark observation update', landmark_measeurement_data, meas_index) + + #recording datas + j = meas_index * 2 + robot_pos = s[j:j+2,0] + sigma_s_trace = centralized_system.get_trace_state_variance(meas_index) + + est_result_file_arr[meas_index].write(str(t)+ '\t'+ str(robot_pos.item(0,0)) + '\t' + str(robot_pos.item(1,0)) + '\t' +str(sigma_s_trace) + ' obser\n') + est_result = {'time':t, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace} + est_result_array[meas_index].append(est_result) + + else: + #relative observation + + obser_index = int(subject_ID)-1 + relative_measeurement_data = [obser_index, measurment_range, bearing] + + if ((algo_name == 'LS-Cen') or ((meas_index+1 in robots_invalid_lm_obser) and (obser_index+1 in robots_valid_lm_obser) and (np.random.uniform() > comm_fail_rate))): + [s, orinetations, sigma_s] = centralized_system.state_update('relative observation update', relative_measeurement_data, meas_index) + + #recording datas + j = meas_index * 2 + robot_pos = s[j:j+2,0] + sigma_s_trace = centralized_system.get_trace_state_variance(meas_index) + + est_result_file_arr[meas_index].write(str(t)+ '\t'+ str(robot_pos.item(0,0)) + '\t' + str(robot_pos.item(1,0)) + '\t' +str(sigma_s_trace) + ' obser\n') + est_result = {'time':t, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace} + est_result_array[meas_index].append(est_result) + num_rela_obser +=1 + + + else: + pass + + update_loc_dic(meas_index, meas_req_loc_dic, measurement_data) + + for j in range(num_robots): + est_result_file_arr[j].close() + + print ("Estimation Process Finished # of relative observations: " + str(num_rela_obser)) + + return est_result_array \ No newline at end of file diff --git a/CoLo-AT/simulation_process/old_code/estimation_process_cont_multirobot.py b/CoLo-AT/simulation_process/old_code/estimation_process_cont_multirobot.py new file mode 100755 index 0000000000000000000000000000000000000000..26d3aff45ba03f0170ad33360377aca303c961e3 --- /dev/null +++ b/CoLo-AT/simulation_process/old_code/estimation_process_cont_multirobot.py @@ -0,0 +1,201 @@ + +import numpy as np +import numpy.matlib +import math +import os +import sys +#from numpy import linalg as LA +from random import randint + +sys.path.insert(0, 'Environmen_for_Localization_Simulation/functions/algo_EKF') + +class global_time(): + """docstring for """ + def __init__(self, start_time, robot_list): + self.all_possible_freqs = np.array([[round(f, 3) for f in robot.get_freqs()] for robot in robot_list]) + self.all_possible_next_time = np.array([[round(start_time+1/f, 3) for f in robot.get_freqs()] for robot in robot_list]) + def get_time(self): + self.least_index = np.unravel_index(np.argmin(self.all_possible_next_time), self.all_possible_next_time.shape) + return [self.least_index, self.all_possible_next_time[self.least_index]] #[index, time] + def get_all_freqs(self): + return self.all_possible_freqs + def time_update(self): + self.all_possible_next_time[self.least_index] = self.all_possible_next_time[self.least_index] + 1/self.all_possible_freqs[self.least_index] + + +def find_nearest_time_idx_on_data_array(data_array, start_idx, time_target): + """only for increasing order array""" + i = start_idx + if data_array[i].get('time') < time_target: + while data_array[i].get('time') < time_target: + i = i+1 + if data_array[i].get('time')-time_target < time_target-data_array[i-1].get('time'): + return i + else: + return i-1 + else: + return i + +def get_robot_actual_loc(groundtruth_data, groundtruth_indices, robot_index, current_time): + """find the actual location at a given time from groudtruth data""" + gt_index = find_nearest_time_idx_on_data_array(groundtruth_data[robot_index], groundtruth_indices[robot_index], current_time) #find the index on odometry data with closest time + robot_pos = [groundtruth_data[robot_index][gt_index].get('x_pos'), groundtruth_data[robot_index][gt_index].get('y_pos'), groundtruth_data[robot_index][gt_index].get('orientation')] + groundtruth_indices[robot_index] = gt_index + return [robot_pos, groundtruth_indices] + +def generate_observation_data(observer_pos, observee_pos, measurement_deviation): + [x_1, y_1, thera_1] = observer_pos + [x_2, y_2, thera_2] = observee_pos + distance = math.sqrt((x_1-x_2)*(x_1-x_2)+(y_1-y_2)*(y_1-y_2)) + np.random.normal(0, measurement_deviation) + bearing = math.atan2((y_2-y_1),(x_2-x_1)) - thera_1 + np.random.normal(0, measurement_deviation) + return[distance, bearing] + + +def estimation_process_cont_multirobot(landmark_map, start_time, start_state_arr, measurement_data, odometry_data, groundtruth_data, robot_list, duration, robots_valid_lm_obser, comm_fail_rate): + print landmark_map + algo_name = robot_list[0].get_algo_name() + dataset_labels = robot_list[0].get_labels_set() + general_estimation_file_name = 'est' + algo_name + + + print("Estimation process Start: "+algo_name) + num_comm = 0 + + robots_invalid_lm_obser = list(set(dataset_labels) - set(robots_valid_lm_obser)) + + num_robot = len(robot_list) + odometry_indices = np.zeros(num_robot, dtype = np.int) # bookkeeping a pointer for each robot for odometry data for easier searching + groundtruth_indices = np.zeros(num_robot, dtype = np.int) + + est_result_array = [[] for i in range(num_robot)] + + + for i, robot in enumerate(robot_list): + robot_freq = [20,5,1] + robot.set_freqs(robot_freq) + print robot_freq + + #sigma_s_trace = robot.get_trace_state_variance() + #est_result = {'time':start_time, 'x_pos':start_state_arr[i][0], 'y_pos':start_state_arr[i][1], 'sigma_s_trace':sigma_s_trace} + #est_result_array[i].append(est_result) + #start_time = 0 + envir_time = global_time(start_time, robot_list) + all_possible_freqs = envir_time.get_all_freqs() + [current_index, current_time] = envir_time.get_time() + + print 'current_index' + print current_index + + measurment_range_arr = [] + measurment_range = 0 + measurement_deviation = 0.2 + while current_time < start_time + duration: + robot_index = current_index[0] # determind which robot to perform opertations + if current_index[1] == 0: #odometry update + pass + + od_index = find_nearest_time_idx_on_data_array(odometry_data[robot_index], odometry_indices[robot_index], current_time) #find the index on odometry data with closest time + odometry_indices[robot_index] = od_index + delta_t = 1/all_possible_freqs[robot_index][0] # time interval = 1/freq_odometry + velocity = odometry_data[robot_index][od_index].get('velocity') + orientation = odometry_data[robot_index][od_index].get('orientation') + + propagation_data = [delta_t, velocity, orientation] + [s, orinetations, sigma_s] = robot_list[robot_index].state_update('propagation update', propagation_data) + th_sigma_s = robot_list[robot_index].bound_update('propagation update', propagation_data) + + + #record the data + robot_pos = robot_list[robot_index].get_pos() + sigma_s_trace = robot_list[robot_index].get_trace_state_variance() + bound = robot_list[robot_index].get_bound() + est_result = {'time':current_time, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace, 'bound': bound} + #print est_result + est_result_array[robot_index].append(est_result) + + ''' + if robot_index == 0: + print(str(robot_index) + ' prop') + print est_result + ''' + + elif current_index[1] == 1: #observation update + [self_loc, groundtruth_indices] = get_robot_actual_loc(groundtruth_data, groundtruth_indices, robot_index, current_time) + + #landmark observation + subject_ID = '15' + if robot_index == 0: + landmark_loc = landmark_map[subject_ID] + landmark_loc = [landmark_loc[0], landmark_loc[1], 0] #landmark doesn't have orinataion here + [measurment_range, bearing] = generate_observation_data(self_loc, landmark_loc, measurement_deviation) + landmark_measeurement_data = [landmark_loc, measurment_range, bearing] + [s, orinetations, sigma_s] = robot_list[robot_index].state_update('landmark observation update', landmark_measeurement_data) + th_sigma_s = robot_list[robot_index].bound_update('landmark observation update', landmark_measeurement_data) + ''' + #relative observation + target_robot_index = 1 + [target_loc, groundtruth_indices] = get_robot_actual_loc(groundtruth_data, groundtruth_indices, target_robot_index, current_time) + [measurment_range, bearing] = generate_observation_data(self_loc, target_loc, measurement_deviation) + relative_measeurement_data = [target_robot_index, measurment_range, bearing] + [s, orinetations, sigma_s] = robot_list[robot_index].state_update('relative observation update', relative_measeurement_data) + th_sigma_s = robot_list[robot_index].bound_update('relative observation update', relative_measeurement_data) + ''' + #record the data + robot_pos = robot_list[robot_index].get_pos() + sigma_s_trace = robot_list[robot_index].get_trace_state_variance() + bound = robot_list[robot_index].get_bound() + est_result = {'time':current_time, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace, 'bound': bound} + est_result_array[robot_index].append(est_result) + + #print(str(robot_index) + ' obser') + #print est_result + ''' + else: #communication update + pass + + if robot_index == 1: # 2nd robot + target_robot_index = 2 + [target_loc, groundtruth_indices] = get_robot_actual_loc(groundtruth_data, groundtruth_indices, target_robot_index, current_time) + [measurment_range, bearing] = generate_observation_data(self_loc, target_loc, measurement_deviation) + relative_measeurement_data = [target_robot_index, measurment_range, bearing] + [s, orinetations, sigma_s] = robot_list[robot_index].state_update('relative observation update', relative_measeurement_data) + th_sigma_s = robot_list[robot_index].bound_update('relative observation update', relative_measeurement_data) + + #record the data + robot_pos = robot_list[robot_index].get_pos() + sigma_s_trace = robot_list[robot_index].get_trace_state_variance() + bound = robot_list[robot_index].get_bound() + est_result = {'time':current_time, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace, 'bound': bound} + est_result_array[robot_index].append(est_result) + + + sender_robot_index = robot_index + receiver_index = 0 + [sender_s, sender_orientaions, sender_state_variance] = robot_list[sender_robot_index].get_status() + comm_data = [sender_s, sender_state_variance] + [s, orinetations, sigma_s] = robot_list[receiver_index].state_update('communication', comm_data) + comm_data = robot_list[sender_robot_index].get_th_state_variance() + th_sigma_s = robot_list[receiver_index].bound_update('communication', comm_data) + + #record the data + robot_pos = robot_list[receiver_index].get_pos() + sigma_s_trace = robot_list[receiver_index].get_trace_state_variance() + bound = robot_list[receiver_index].get_bound() + est_result = {'time':current_time, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace, 'bound': bound} + est_result_array[receiver_index].append(est_result) + + #print(str(robot_index) + ' comm') + #print est_result + ''' + measurment_range_arr.append(measurment_range) + prev_time = current_time + envir_time.time_update() + [current_index, current_time] = envir_time.get_time() + if prev_time > current_time: + print('prev: ' + str(prev_time) + '; current' + str(current_time)) + sys.exit("incorrect time handling!") + + #print ("Estimation Process Finished # of comm: " + str(num_comm)) + print 'measurment_range_arr' + print max(measurment_range_arr) + return est_result_array \ No newline at end of file diff --git a/CoLo-AT/simulation_process/old_code/estimation_process_multirobot.py b/CoLo-AT/simulation_process/old_code/estimation_process_multirobot.py new file mode 100755 index 0000000000000000000000000000000000000000..f2c552ff2f2778017a0227da0f632cee90803cdc --- /dev/null +++ b/CoLo-AT/simulation_process/old_code/estimation_process_multirobot.py @@ -0,0 +1,175 @@ +import numpy as np +import numpy.matlib +import math +import os +import sys +from numpy import linalg as LA +from random import randint + + +sys.path.insert(0, 'Environmen_for_Localization_Simulation/functions/algo_EKF') + +def is_pos_def(x): + return np.all(np.linalg.eigvals(x) > 0) + +def find_min_match_dataline(loc_dic, datas): + min_index = min(loc_dic, key=lambda k: loc_dic[k][1]) + min_loc = loc_dic[min_index][0] + return [min_index, min_loc] + +def update_loc_dic(robot_index, loc_dic, datas): + next_loc = loc_dic[robot_index][0] + 1 #next location if possible in the data file + try: + loc_dic[robot_index] = [next_loc, datas[robot_index][next_loc]['time']] + except IndexError: + #print 'delect the robot from request list' + del loc_dic[robot_index] + + return next_loc + +def estimation_process_multirobot(landmark_map, start_time, start_state_arr, measurement_data, odometry_data, robot_list, duration, robots_valid_lm_obser, comm_fail_rate): + + algo_name = robot_list[0].get_algo_name() + dataset_labels = robot_list[0].get_labels_set() + general_estimation_file_name = 'est' + algo_name + + + print("Estimation process Start: "+algo_name) + num_comm = 0 + + robots_invalid_lm_obser = list(set(dataset_labels) - set(robots_valid_lm_obser)) + + + est_result_file_arr = [] + for i, robot in enumerate(robot_list): #text files to keep track of the robots' paths + estimation_file_name = general_estimation_file_name+'%d' %robot.get_label() + file="/Results_files/estimation_results/"+estimation_file_name+".txt" + path=os.getcwd()+file + est_result_file = open(path, "w") + est_result_file_arr.append(est_result_file) + + num_robot = len(robot_list) + est_result_array = [[] for i in range(num_robot)] + + # use loc array to keep track of the location(which line)in the data files + odo_req_loc_dic = {} + meas_req_loc_dic = {} + + #feeding initialization info into each robot and request array + #req_loc_dic= {robot_index: [loc, time]} + + for i, robot in enumerate(robot_list): + start_loc = 1 + odo_req_loc_dic[i] = [start_loc, odometry_data[i][start_loc]['time']] + start_loc = 0 + meas_req_loc_dic[i] = [start_loc, measurement_data[i][start_loc]['time']] + + #[s, orinetations, sigma_s] = robot.get_status() + sigma_s_trace = robot.get_trace_state_variance() + est_result_file_arr[i].write(str(start_time)+ '\t'+ str(start_state_arr[i][0]) + '\t' + str(start_state_arr[i][1]) + '\t' +str(sigma_s_trace) + '\n') + est_result = {'time':start_time, 'x_pos':start_state_arr[i][0], 'y_pos':start_state_arr[i][1], 'sigma_s_trace':sigma_s_trace} + est_result_array[i].append(est_result) + + + t = start_time + while t < start_time + duration and len(odo_req_loc_dic) != 0 and len(meas_req_loc_dic) != 0: + [odo_index, odo_loc] = find_min_match_dataline(odo_req_loc_dic, odometry_data) + odo_time = odometry_data[odo_index][odo_loc]['time'] + [meas_index, meas_loc] = find_min_match_dataline(meas_req_loc_dic, measurement_data) + meas_time = measurement_data[meas_index][meas_loc]['time'] + + if odo_time <= meas_time: + #propagtion update + t = odo_time + delta_t = odometry_data[odo_index][odo_loc]['time'] - odometry_data[odo_index][odo_loc-1]['time'] #look at the previous time stamp to find the time interval + velocity = odometry_data[odo_index][odo_loc]['velocity'] + orientation = odometry_data[odo_index][odo_loc]['orientation'] + propagation_data = [delta_t, velocity, orientation] + [s, orinetations, sigma_s] = robot_list[odo_index].state_update('propagation update', propagation_data) + + #recording datas + j = odo_index * 2 + robot_pos = s[j:j+2,0] + sigma_s_trace = robot_list[odo_index].get_trace_state_variance() + + est_result_file_arr[odo_index].write(str(t)+ '\t'+ str(robot_pos.item(0,0)) + '\t' + str(robot_pos.item(1,0)) + '\t' +str(sigma_s_trace) + '\n') + est_result = {'time':t, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace} + est_result_array[odo_index].append(est_result) + + update_loc_dic(odo_index, odo_req_loc_dic, odometry_data) + + else: + #measurement update + pass + + t = meas_time + measurment_range = measurement_data[meas_index][meas_loc]['measurment_range'] + subject_ID = measurement_data[meas_index][meas_loc]['subject_ID'] + bearing = measurement_data[meas_index][meas_loc]['bearing'] + + if int(subject_ID) > 5: + pass + #landmark observation + if meas_index+1 in robots_valid_lm_obser: + landmark_loc = landmark_map[subject_ID] + landmark_measeurement_data = [landmark_loc, measurment_range, bearing] + [s, orinetations, sigma_s] = robot_list[meas_index].state_update('landmark observation update', landmark_measeurement_data) + + #recording datas + j = meas_index * 2 + robot_pos = s[j:j+2,0] + sigma_s_trace = robot_list[meas_index].get_trace_state_variance() + + est_result_file_arr[meas_index].write(str(t)+ '\t'+ str(robot_pos.item(0,0)) + '\t' + str(robot_pos.item(1,0)) + '\t' +str(sigma_s_trace) + ' lm_obser\n') + est_result = {'time':t, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace} + est_result_array[meas_index].append(est_result) + + + else: + pass + #relative observation + observee_index = int(subject_ID)-1 + relative_measeurement_data = [observee_index, measurment_range, bearing] + [s, orinetations, sigma_s] = robot_list[meas_index].state_update('relative observation update', relative_measeurement_data) + + #recording datas + j = meas_index * 2 + robot_pos = s[j:j+2,0] + sigma_s_trace = robot_list[meas_index].get_trace_state_variance() + + est_result_file_arr[meas_index].write(str(t)+ '\t'+ str(robot_pos.item(0,0)) + '\t' + str(robot_pos.item(1,0)) + '\t' +str(sigma_s_trace) + ' rel_obser\n') + est_result = {'time':t, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace} + est_result_array[meas_index].append(est_result) + + + if (meas_index+1 in robots_invalid_lm_obser) and (observee_index+1 in robots_valid_lm_obser) and (np.random.uniform() > comm_fail_rate): + comm_robot_index = meas_index + sender_robot_index = observee_index + + + [sender_s, sender_orientaions, sender_state_variance] = robot_list[sender_robot_index].get_status() + comm_data = [sender_s, sender_state_variance] + + [s, orinetations, sigma_s] = robot_list[comm_robot_index].state_update('communication', comm_data) + + #recording datas + j = comm_robot_index * 2 + robot_pos = s[j:j+2,0] + sigma_s_trace = robot_list[comm_robot_index].get_trace_state_variance() + + est_result_file_arr[comm_robot_index].write(str(t)+ '\t'+ str(robot_pos.item(0,0)) + '\t' + str(robot_pos.item(1,0)) + '\t' +str(sigma_s_trace) + ' comm w/ robot' + str(meas_index+1) +'\n') + est_result = {'time':t, 'x_pos':robot_pos.item(0,0), 'y_pos':robot_pos.item(1,0), 'sigma_s_trace':sigma_s_trace} + est_result_array[comm_robot_index].append(est_result) + + num_comm+=1 + + update_loc_dic(meas_index, meas_req_loc_dic, measurement_data) + + + for j in range(len(robot_list)): + est_result_file_arr[j].close() + + + print ("Estimation Process Finished # of comm: " + str(num_comm)) + + return est_result_array \ No newline at end of file diff --git a/CoLo-AT/simulation_process/old_code/estimation_process_self_generated_data.py b/CoLo-AT/simulation_process/old_code/estimation_process_self_generated_data.py new file mode 100644 index 0000000000000000000000000000000000000000..2b224c9a62c4588032377b895d534633ff51086f --- /dev/null +++ b/CoLo-AT/simulation_process/old_code/estimation_process_self_generated_data.py @@ -0,0 +1,148 @@ +import numpy as np +from numpy import random +from numpy import matrix +from math import cos, sin, atan2, sqrt +import os +import sys +from robot_parameters import * + + +d_max = 25 +delta_t = 0.5 + + +class Landmark: + def __init__(self, index, position): + self.index = index + self.position = position + + +def within_map(loc): + center_point = [0,0] + if sqrt(pow(loc[0]-center_point[0], 2)+pow(loc[1]-center_point[1], 2)) > d_max: + return False + else: + return True + +def generate_propgation_data(current_loc, current_orientation): + # assuming orientation is perfect + [meas_v, meas_a_v] = [random.uniform(-max_v,max_v), random.uniform(-max_a_v, max_a_v)] + actual_v = meas_v + random.normal(0, sqrt(var_u_v)) + pre_update_position = [current_loc[0] + cos(current_orientation)*actual_v*delta_t, current_loc[1] + sin(current_orientation)*actual_v*delta_t] + + while(not within_map(pre_update_position)): + [meas_v, meas_a_v] = [random.uniform(-max_v,max_v), random.uniform(-max_a_v, max_a_v)] + actual_v = meas_v + random.normal(0, sqrt(var_u_v)) + pre_update_position = [current_loc[0] + cos(current_orientation)*actual_v*delta_t, current_loc[1] + sin(current_orientation)*actual_v*delta_t] + + orientation = current_orientation + meas_a_v*delta_t + actual_loc = pre_update_position + return [actual_v, meas_v, orientation, actual_loc] + +def generate_measurement_data(observer_loc, observee_loc, observer_orientation): + delta_x = observer_loc[0] - observee_loc[0] + delta_y = observer_loc[1] - observee_loc[1] + dis = sqrt(delta_y*delta_y + delta_x*delta_x) + random.normal(0, sqrt(var_dis)) + bearing = atan2(delta_y, delta_x) + random.normal(0, sqrt(var_angle)) - observer_orientation + return [dis, bearing] + +def estimation_process_multirobot_generated_data(robots_list, iteration, duration): + random.seed( 9 ) + sigma_tr_arr = [0] * duration + sigma_th_tr_arr = [0] * duration + error_arr = [0] * duration + + for i in range(iteration): + print 'iteration CSK' + print i + + initial_state = matrix([1, 1, 1, 2, 2, 1, -1, -1, 1, 3], dtype=float).T + robots_act_loc = initial_state + initial_oriantation = matrix([0, 0, 0, 0, 0], dtype=float).T + for j, robot in enumerate(robots_list): + robot.set_state(initial_state) + robot.set_orientations(initial_oriantation) + + M = 1 # number of landmark + landmarks = [None] * M + for m in range(M): + landmarks[m] = Landmark(m, matrix([0.01, 0.02], dtype=float).getT()) + + for t in range(duration): + for j, robot in enumerate(robots_list): + [act_v, meas_v, orientation, actual_loc] = generate_propgation_data(robot.get_pos(), robot.get_own_orientation()) + robots_act_loc[j:j+2] = actual_loc + propagation_data = [delta_t, meas_v, orientation] + [s, orinetations, sigma_s] = robot.state_update('propagation update', propagation_data) + #th_sigma_s = robot.bound_update('propagation update', propagation_data) + + #robot 0 observes landmark + robot_idx = 0 + [measurment_range, bearing] = generate_measurement_data(robots_list[robot_idx].get_pos(), landmarks[0].position, robots_list[robot_idx].get_own_orientation()) + landmark_measeurement_data = [landmarks[0].position, measurment_range, bearing] + [s, orinetations, sigma_s] = robots_list[robot_idx].state_update('landmark observation update', landmark_measeurement_data) + #th_sigma_s = robot.state_update('landmark observation update', landmark_measeurement_data) + + + # for robot 2, it obverses robot 0 and robot 1 + robot_idx = 2 + observee_idx = 0 + [measurment_range, bearing] = generate_measurement_data(robots_list[robot_idx].get_pos(), robots_list[observee_idx].get_pos(), robots_list[robot_idx].get_own_orientation()) + relative_measeurement_data = [observee_idx , measurment_range, bearing] + [s, orinetations, sigma_s] = robots_list[robot_idx].state_update('relative observation update', relative_measeurement_data) + #th_sigma_s = robot.state_update('relative observation update', relative_measeurement_data) + + + observee_idx = 1 + [measurment_range, bearing] = generate_measurement_data(robots_list[robot_idx].get_pos(), robots_list[observee_idx].get_pos(), robots_list[robot_idx].get_own_orientation()) + relative_measeurement_data = [observee_idx , measurment_range, bearing] + [s, orinetations, sigma_s] = robots_list[robot_idx].state_update('relative observation update', relative_measeurement_data) + #th_sigma_s = robot.bound_update('relative observation update', relative_measeurement_data) + + # for robot 3, it observes landmarks and robot 4 + robot_idx = 3 + [measurment_range, bearing] = generate_measurement_data(robots_list[robot_idx].get_pos(), landmarks[0].position, robots_list[robot_idx].get_own_orientation()) + landmark_measeurement_data = [landmarks[0].position, measurment_range, bearing] + [s, orinetations, sigma_s] = robots_list[robot_idx].state_update('landmark observation update', landmark_measeurement_data) + #th_sigma_s = robot.bound_update('landmark observation update', landmark_measeurement_data) + + observee_idx = 4 + [measurment_range, bearing] = generate_measurement_data(robots_list[robot_idx].get_pos(), robots_list[observee_idx].get_pos(), robots_list[robot_idx].get_own_orientation()) + relative_measeurement_data = [observee_idx, measurment_range, bearing] + [s, orinetations, sigma_s] = robots_list[robot_idx].state_update('relative observation update', relative_measeurement_data) + #th_sigma_s = robot.bound_update('relative observation update', relative_measeurement_data) + + # communication + #robot 2 get comm info from robot 3 + sender_idx = 3 + robot_idx = 2 + comm_data = [robots_list[sender_idx].get_status()[0], robots_list[sender_idx].get_status()[2]] + [s, orinetations, sigma_s] = robots_list[robot_idx].state_update('communication', comm_data) + th_comm_data = [robots_list[sender_idx].get_status()[0], robots_list[sender_idx].get_th_state_variance()] + #th_sigma_s = robots_list[robot_idx].bound_update('communication', comm_data) + + #robot 0 get comm info from robot 2 + sender_idx = 2 + robot_idx = 0 + comm_data = [robots_list[sender_idx].get_status()[0], robots_list[sender_idx].get_status()[2]] + [s, orinetations, sigma_s] = robots_list[robot_idx].state_update('communication', comm_data) + th_comm_data = [robots_list[sender_idx].get_status()[0], robots_list[sender_idx].get_th_state_variance()] + th_sigma_s = robots_list[robot_idx].bound_update('communication', comm_data) + + + # error calculation + s = 0 + for j, robot in enumerate(robots_list): + s += pow(robot.get_pos()[0] - robots_act_loc[j],2) + pow(robot.get_pos()[0] - robots_act_loc[j+1],2) + + s = sqrt(s*0.2) + error_arr[t] = error_arr[t] + s*(1/float(iteration)) + + # covariance error + sigma_tr_arr[t] = sigma_tr_arr[t] + sqrt(0.2*robots_list[0].get_trace_state_variance())*(1/float(iteration)) + + #sigma_th_tr_arr[t] = sigma_th_tr_arr[t] + math.sqrt(0.2*robots_list[0].get_bound)*(1/float(iteration)) + + + return [error_arr, sigma_tr_arr] + diff --git a/CoLo-AT/simulation_process/old_code/simumlation_process.py b/CoLo-AT/simulation_process/old_code/simumlation_process.py new file mode 100644 index 0000000000000000000000000000000000000000..5667e1c1a38460d4fc4ded84ea8d1d5595051f0d --- /dev/null +++ b/CoLo-AT/simulation_process/old_code/simumlation_process.py @@ -0,0 +1,28 @@ +import numpy as np +from numpy import random +from numpy import matrix +from math import cos, sin, atan2, sqrt +import os +import sys +from robot_parameters import * + +class SimulationProcess(): + def __init__(self, name): + self.name = name + + class Scheduler(): + def __init__(self): + pass + +def estimation_process(robots_list, iteration, duration, generating_data): + + + + + + + + + + + diff --git a/CoLo-AT/simulation_process/sim_manager.py b/CoLo-AT/simulation_process/sim_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..4db03761e6bbbce42f9a891f8c542d9cfab33024 --- /dev/null +++ b/CoLo-AT/simulation_process/sim_manager.py @@ -0,0 +1,150 @@ +import numpy as np +import numpy.matlib +import math +import os +import sys + +#sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/requests') +sys.path.append(os.path.join(os.path.dirname(__file__), "../requests")) +import request_response +from numpy import linalg as LA +from random import randint + +class SimulationManager(): + def __init__(self, name): + self.name = name + + def sim_process_naive(self, dataset_labels, dm, robot_system, state_recorder, comm = True, simple_plot = False): + #dm: dataset manager + print('******** Simulation Process Started! ********') + print('communication: ', comm) + self.dataset_labels = dataset_labels + + start_time = dm.get_start_time() + duration = dm.get_duration() + self.time = start_time + prev_time = 0 + + #recording starting states + starting_st= dm.get_starting_states() + state_recorder.set_starting_state(starting_st) + + # + robot_system.set_starting_state(dm.get_starting_states()) + robot_system.set_start_moving_times(dm.get_start_moving_times()) + robot_system.load_map(dm.get_landmark_map()) + + state_var = [] + while self.time < start_time+duration: + #print("Current time:", self.time) + g_req= request_response.Request_response(None, None) + prev_time = self.time + valid, self.time , rsp = dm.respond(g_req, self.time) + #print(rsp.get_message()) + if self.time < prev_time: + print('Time inconsistant!') + break + if valid == False : + break + robot_state = robot_system.localization_update(rsp) + state_var.append(robot_state['state variance']) + state_recorder.record_state(rsp, robot_state) + + # communication protocall + if comm: + if rsp.get_type()=='measurement': + rbt_idx = rsp.get_robot_index() + comm_rsp = request_response.Comm_req_resp(self.time, rbt_idx) + message = comm_rsp.get_message() + sender_idx = (rbt_idx+1)%(robot_system.num_robots) + sender_id = dataset_labels[sender_idx] + message['data'] = {'subject_ID':sender_id} + message['groundtruth'] = rsp.get_groundtruth + comm_rsp.set_message(message) + + robot_state = robot_system.localization_update(comm_rsp) + state_var.append(robot_state['state variance']) + state_recorder.record_state(rsp, robot_state) + + if simple_plot: + state_recorder.simple_plot(plot_title=self.name) + + print('******** Simulation Process Finished! ********') + print('End time: ', self.time) + + + return state_var + + + def sim_process_schedule(self, dataset_labels, dm, robot_system, state_recorder, freqs, simple_plot = False): + #dm: dataset manager + print('******** Simulation Process Started! ********') + req_type_list = ['odometry', 'measurement', 'communication'] + self.dataset_labels = dataset_labels + + start_time = dm.get_start_time() + duration = dm.get_duration() + self.time = start_time + prev_time = 0 + + + #recording starting states + starting_st= dm.get_starting_states() + state_recorder.set_starting_state(starting_st) + + robot_system.set_starting_state(dm.get_starting_states()) + # due to adding data + robot_system.set_start_moving_times([start_time]*len(dataset_labels)) + robot_system.load_map(dm.get_landmark_map()) + + next_possible_time_array= [ [ start_time + 1/freq for freq in f] for f in freqs] + #print(next_possible_time_array) + while self.time < start_time+duration: + min_loc = np.argwhere(next_possible_time_array == np.min(next_possible_time_array)) + req_type_idx = min_loc[0][0] + robot_idx = min_loc[0][1] + req_type = req_type_list[req_type_idx] + req_time = next_possible_time_array[req_type_idx][robot_idx] + if req_type != 'communication': + g_req = request_response.Request_response(req_time, robot_idx) + g_req.set_type(req_type) + prev_time = self.time + valid, rsp_time , rsp = dm.respond(g_req, req_time, need_specific_time = True) + if valid == False: + print('invalid responese') + break + robot_state = robot_system.localization_update(rsp) + state_recorder.record_state(rsp, robot_state) + + else: + comm_rsp = request_response.Comm_req_resp(req_time, robot_idx) + message = comm_rsp.get_message() + obser_id = (robot_idx+1)%(robot_system.num_robots) + message['data'] = obser_id + message['groundtruth'] = rsp.get_groundtruth + comm_rsp.set_message(message) + + robot_state = robot_system.localization_update(rsp) + #state_recorder.record_state(rsp, robot_state) + + #print(req_type) + #print('time:', self.time) + #print('#########') + self.time = next_possible_time_array[req_type_idx][robot_idx] + if self.time < prev_time: + print('current time:', self.time) + print(next_possible_time_array) + sys.quit('Time inconsistant!') + + next_possible_time_array[req_type_idx][robot_idx] += 1/freqs[req_type_idx][robot_idx] + + + if simple_plot: + state_recorder.simple_plot(plot_title=self.name) + + print('******** Simulation Process Finished! ********') + #print('End time: ', self.time) + + + return self.time + diff --git a/CoLo-AT/simulation_process/state_recorder.py b/CoLo-AT/simulation_process/state_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..559fb1d6a403a64103580c4e57a34e563588c2b1 --- /dev/null +++ b/CoLo-AT/simulation_process/state_recorder.py @@ -0,0 +1,139 @@ +import numpy as np +import os, sys + +#sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/requests') +sys.path.append(os.path.join(os.path.dirname(__file__), "../requests")) +import request_response +from math import sqrt +import matplotlib.pyplot as plt + + +class StatesRecorder(): + """docstring for ClassName""" + def __init__(self, name, dataset_labels): + self.name = name + self.dataset_labels = dataset_labels + self.recorded_data = {} + self.data_in_time_order = [] + self.updata_type_in_time_order = [] + self.num_robots = len(self.dataset_labels) + self.loc_err_arr = {} + self.trace_sigma_s_arr = {} + self.updata_type_arr = {} + for i, label in enumerate(self.dataset_labels): + self.recorded_data[label]=[] + self.loc_err_arr[label]=[] + self.trace_sigma_s_arr[label]=[] + self.updata_type_arr[label]=[] + + def set_starting_state(self, stating_states): + for i, label in enumerate(self.dataset_labels): + self.start_time = stating_states[label][0] + time = 0 + x_pos = stating_states[label][1] + y_pos = stating_states[label][2] + initial_trace_state_var = 0.0001 + loc_err = 0 + recorded_dataline = [time, label, x_pos, y_pos, initial_trace_state_var, x_pos, y_pos, loc_err] + self.data_in_time_order.append(recorded_dataline) + self.recorded_data[label].append(recorded_dataline) + self.loc_err_arr[label].append(0) + self.trace_sigma_s_arr[label].append(initial_trace_state_var) + self.updata_type_arr[label].append('ini') + self.updata_type_in_time_order.append('ini') + def get_dataset_labels(self): + return self.dataset_labels + + + def record_state(self, req, robot_state): + message = req.get_message() + robot_idx = message['robot_index'] + time = message['time']-self.start_time + gt = message['groundtruth'] + #print(robot_state['x_pos']) + est_x_pos = float(robot_state['x_pos']) + est_y_pos = float(robot_state['y_pos']) + trace_state_var = robot_state['trace of state variance'] + updata_type = robot_state['update_type'] + gt_x_pos = gt['x_pos'] + gt_y_pos = gt['y_pos'] + robot_label = self.dataset_labels[robot_idx] + loc_err = sqrt((est_x_pos-gt_x_pos)*(est_x_pos-gt_x_pos)+(est_y_pos-gt_y_pos)*(est_y_pos-gt_y_pos)) + + recorded_dataline = [time, robot_label, est_x_pos, est_y_pos, trace_state_var, gt_x_pos, gt_y_pos, loc_err] + + if(trace_state_var<0): + print('TIME: ', time+self.start_time) + print(updata_type) + print('neg trace: ', recorded_dataline) + + if(loc_err >= 1): + print(updata_type) + print('>1 m loc err: ',recorded_dataline) + print(req.get_message()) + + #print(recorded_dataline) + self.data_in_time_order.append(recorded_dataline) + self.updata_type_in_time_order.append(updata_type) + self.recorded_data[robot_label].append(recorded_dataline) + self.loc_err_arr[robot_label].append(loc_err) + self.trace_sigma_s_arr[robot_label].append(trace_state_var) + self.updata_type_arr[robot_label].append(updata_type) + + + def get_data_in_time_order(self): + return self.data_in_time_order + + def get_updata_type_in_time_order(self): + return self.updata_type_in_time_order + + def get_recorded_data(self): + return self.recorded_data + + def get_time_arr(self, robot_id): + time_arr = np.array(self.recorded_data[robot_id])[:,0] + return time_arr + + def get_loc_err_arr(self): + return self.loc_err_arr + + def get_trace_sigma_s_arr(self): + return self.trace_sigma_s_arr + + def get_update_type_arr(self): + return self.updata_type_arr + + def simple_plot(self, plot_title = ''): + fig = plt.figure() + plt.suptitle(plot_title + ' Correctness analysis') + fig1 = fig.add_subplot(211) + fig2 = fig.add_subplot(212) + loc_err_arr = self.get_loc_err_arr() + trace_sigma_s_arr = self.get_trace_sigma_s_arr() + for i, label in enumerate(self.dataset_labels): + time_arr = self.get_time_arr(label) + fig1.plot(time_arr, loc_err_arr[label], label= 'Robot %d' %label) + fig2.plot(time_arr, trace_sigma_s_arr[label], label= 'Robot %d' %label) + print('Robot',label, 'loc err: ', sum(loc_err_arr[label])/len(loc_err_arr[label])) + fig1.set_title('Estimation deviation error') + fig1.set_xlabel('Time[s]') + fig1.set_ylabel('RMS[m]') + #fig1.set_ylim(0, 6) + fig1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + + fig2.set_title('Trace of state variance') + fig2.set_xlabel('Time [s]') + fig2.set_ylabel('Sigma_s [m^2]') + #fig2.set_ylim(0, 0.08) + fig2.legend(loc='center left', bbox_to_anchor=(1, 0.5)) + + fig.subplots_adjust(hspace = 0.8) + plt.show() + + return True + + + + + + \ No newline at end of file diff --git a/CoLo-AT/simulation_process/test_recorder.py b/CoLo-AT/simulation_process/test_recorder.py new file mode 100644 index 0000000000000000000000000000000000000000..2223fd93d48b278067ec41ff2010eca337db9ca8 --- /dev/null +++ b/CoLo-AT/simulation_process/test_recorder.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Thu Apr 5 15:24:42 2018 + +@author: william +""" + +import sys +import numpy as np +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/requests') +import request_response +sys.path.insert(0, '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/functions/dataset_manager') +from existing_dataset import Dataset +from state_recorder import StatesRecorder + +dataset_path = '/Users/william/Documents/SLAM_Simulation/Simulation-Environment-for-Cooperative-Localization/datasets/MRCLAM_Dataset1/' +dataset_labels = [1,2,3,4,5] +testing_dataset = Dataset('testing', dataset_path, dataset_labels) +lm_map = testing_dataset.create_landmark_map() +print(lm_map) +st,staring_states,d, t_arr = testing_dataset.initialization_MRCLAMDatasets(20) +sr = StatesRecorder('sr',dataset_labels) +sr.set_stating_states(staring_states) + + +print('*********************************') +g_req = request_response.Request_response(None, None) +c_t,rsp = testing_dataset.respond(g_req, st) +print(g_req.get_message()) +print(g_req.get_type()) + +robot_state = {'x_pos': 1, 'y_pos':1, 'trace of state variance': 0.1 } +sr.record_state(rsp, robot_state) + + +print('*********************************') +g_req = request_response.Request_response(None, None) +c_t,rsp = testing_dataset.respond(g_req, st) +print(g_req.get_message()) +print(g_req.get_type()) + +robot_state = {'x_pos': 2, 'y_pos':1, 'trace of state variance': 0.2 } +sr.record_state(rsp, robot_state) + +print('*********************************') +g_req = request_response.Request_response(None, None) +c_t,rsp = testing_dataset.respond(g_req, st) +print(g_req.get_message()) +print(g_req.get_type()) + +robot_state = {'x_pos': 3, 'y_pos':1, 'trace of state variance': 0.3 } +sr.record_state(rsp, robot_state) + +# +print('#############') +print(dataset_labels.index(4)) +print(sr.get_loc_err_arr(1)) +print(sr.get_trace_sigma_s_arr(1)) +print(sr.get_time_arr(1)) + +''' +r_d=sr.get_recorded_data() +r_d_a = np.array(r_d[1]) +print(r_d_a[:,0]) + +''' \ No newline at end of file diff --git a/CoLo-AT/test_simulation.py b/CoLo-AT/test_simulation.py new file mode 100644 index 0000000000000000000000000000000000000000..8080a85d4bcfd4c4bdfbce18fd284715bb0d2223 --- /dev/null +++ b/CoLo-AT/test_simulation.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Apr 8 17:55:06 2018 + +@author: william +""" + +import os, sys +sys.path.append(os.path.join(os.path.dirname(__file__), ".")) +from dataset_manager.existing_dataset import Dataset +from simulation_process.sim_manager import SimulationManager +from robots.robot_system import RobotSystem +from simulation_process.state_recorder import StatesRecorder +from data_analysis.data_analyzer import Analyzer +from data_analysis.realtime_plot import animate_plot +from pprint import pprint + +# load algorithms +sys.path.append(os.path.join(os.path.dirname(__file__), "localization_algos")) + +from centralized_ekf import Centralized_EKF # works +from simple_ekf import Simple_EKF +from ekf_ls_bda import EKF_LS_BDA +from ekf_ls_ci import EKF_LS_CI +from ekf_gs_bound import EKF_GS_BOUND +from ekf_gs_ci import EKF_GS_CI + +#need to verify these algo +from ekf_gs_sci2 import EKF_GS_SCI2 +from ekf_ls_ci2 import EKF_LS_CI2 +from ekf_gs_ci2 import EKF_GS_CI2 +from centralized_ekf2 import Centralized_EKF2 + + +dataset_path = '/home/william/UTIAS-dataset/MRCLAM_Dataset3/' + +dataset_labels = [1,2,3] +duration = 100 # duration for the simulation in sec +testing_dataset = Dataset('testing') +start_time, starting_states, dataset_data, time_arr = testing_dataset.load_MRCLAMDatasets(dataset_path, dataset_labels, duration) + +freqs0 = [[10, 10, 10],[1, 1, 1],[0.5, 0.5, 0.5]] +freqs1 = [[10, 10, 10],[4, 4, 4],[0.5, 0.5, 0.5]] + +loc_algo = Centralized_EKF('algo') +robot = RobotSystem('robot', dataset_labels, loc_algo, distr_sys = False) + +sim = SimulationManager('sim Centralized_EKF') +state_recorder = StatesRecorder('recorder',dataset_labels) +#sim.sim_process_schedule(dataset_labels, testing_dataset, robot, state_recorder, freqs0, simple_plot = True) +sim.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder, simple_plot = True) + +analyzer = Analyzer('analyzer', dataset_labels) +loc_err_per_run, trace_per_run, t_arr = analyzer.calculate_loc_err_and_trace_state_variance_per_run(state_recorder, plot_graphs = False) +robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) +data_in_time_order = state_recorder.get_data_in_time_order() +update_in_time_order = state_recorder.get_updata_type_in_time_order() + +''' +############################################################################## +testing_dataset.dataset_reset() +robot = RobotSystem('robot gs ci', dataset_labels, loc_algo, distr_sys = True) + +sim1 = SimulationManager('sim') +state_recorder1 = StatesRecorder('gs ci schedule freq1',dataset_labels) +sim1.sim_process_schedule(dataset_labels, testing_dataset, robot, state_recorder1, freqs1, simple_plot = True) + +############################################################################## +testing_dataset.dataset_reset() +robot = RobotSystem('robot gs ci', dataset_labels, loc_algo, distr_sys = True) + +sim_n = SimulationManager('sim') +state_recorder_n = StatesRecorder('gs ci naive ',dataset_labels) +sim_n.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder_n, simple_plot = True) + +analyzer = Analyzer('analyzer', dataset_labels) +analyzer.algos_comparison([state_recorder, state_recorder1, state_recorder_n], only_trace=['gs ci bound']) +print('done!') +''' +''' +sim.sim_process_naive(dataset_labels, testing_dataset, robot, state_recorder, simple_plot = True) +loc_err_per_run, trace_per_run, t_arr = analyzer.calculate_loc_err_and_trace_state_variance_per_run(state_recorder) +#robot_location_at_unit_time_interval +robot_loc_time_unit = analyzer.robot_location_at_unit_time_interval(state_recorder) + + +# simulated data +data_sim = state_recorder.get_recorded_data() +le = state_recorder.get_loc_err_arr() +ud = state_recorder.get_update_type_arr() +t_S = state_recorder.get_trace_sigma_s_arr() +ud_t = state_recorder.get_updata_type_in_time_order() +data_sim_t = state_recorder.get_data_in_time_order() +get_robot_time_func = state_recorder.get_time_arr +# le, t_S is an array of 5 elements, each element has error, state variance +# based on time stamp +# print(len(t_S[1]), len(t_S[2]), len(t_S[3]), len(t_S[4]), len(t_S[5])) +# print(data_sim) +''' + +#animate_plot(dataset_labels, state_recorder, analyzer, get_robot_time_func, le, t_S) diff --git a/CoLo-AT/verifications/.DS_Store b/CoLo-AT/verifications/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 Binary files /dev/null and b/CoLo-AT/verifications/.DS_Store differ