diff --git a/CoLo-AT/dataset_manager/data_generator.py b/CoLo-AT/dataset_manager/data_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac97eac9655df13137c626fc902b443d46a12be8
--- /dev/null
+++ b/CoLo-AT/dataset_manager/data_generator.py
@@ -0,0 +1,167 @@
+import numpy as np
+from math import pi, sqrt, atan2, hypot
+
+class DataGenerator():
+    def __init__(self, boundary, landmarks, duration, robot_labels, starting_states, start_time, delta_t,
+    velocity_noise = 0, angular_velocity_noise = 0, measurement_range_noise = 0, bearing_noise = 0,  communication_noise = 0, 
+    robot_fov = 2*pi/3):
+        
+        self.duration = duration
+        self.boundary = boundary
+        self.robot_labels = robot_labels
+        self.num_robots = len(robot_labels)
+        self.start_time = start_time
+        self.end_time = self.start_time + duration
+        self.delta_t = delta_t
+        
+        # {robot label: [time, x_pos, y_pos, orientation], ... }
+        self.starting_states = starting_states
+        
+        # angular width (radians) of robot's view in reference to x_axis
+        # ex: robot_fov = 2pi/3 means 60 degrees above and below x-axis are within vision
+        self.robot_fov = robot_fov
+         
+        self.velocity_noise = velocity_noise
+        self.angular_velocity_noise = angular_velocity_noise
+        self.measurement_range_noise = measurement_range_noise
+        self.bearing_noise = bearing_noise
+        self.communication_noise = communication_noise
+
+        # landmarks = {landmark ID: [x,y]}
+        self.landmark_map = landmarks
+        for landmark_id in landmarks:
+            if landmark_id < 5:
+                raise Exception("Invalid landmark ID: landmark IDs must be bigger than 5.")
+
+        self.time_arr = {'odometry': [[] for robot in robot_labels], 'measurement': [[] for robot in robot_labels], 'groundtruth': [[] for robot in robot_labels]}
+        for data_type in self.time_arr:
+            for i in range(self.num_robots):
+                self.time_arr[data_type][i] = np.arange(self.start_time, self.end_time)
+
+        self.groundtruth_data = [ [] for robot in range(self.num_robots)]
+        self.odometry_data = [[] for robot in range(self.num_robots)]
+        self.measurement_data = [[] for robot in range(self.num_robots)]
+        
+    # Inputs: None
+    # Outputs: time_arr, groundtruth_data, self.odometry_data, measurement_data, self.dataset_data
+    # Generates simulated data based upon simulation parameters passed from user to SimulatedDataSetManager
+    def generate_straight_line_data(self):
+
+        # Generate ground truth data
+        for i, label in enumerate(self.robot_labels, 0):
+            # append starting state
+            self.groundtruth_data[i].append({'time' : self.starting_states[label][0], 'x_pos': self.starting_states[label][1], 'y_pos': self.starting_states[label][2], 'orientation' : self.starting_states[label][3]})
+            for time_idx in range(1, len(self.time_arr['groundtruth'][i])):
+                curr_x = self.groundtruth_data[i][time_idx-1]['x_pos']
+                curr_y = self.groundtruth_data[i][time_idx-1]['y_pos']
+                curr_orientation = self.groundtruth_data[i][time_idx-1]['orientation']
+
+                next_x = curr_x
+                next_y = curr_y
+                next_orientation = curr_orientation
+
+                if (self.within_map([curr_x + 1, curr_y + 1])):
+                   next_x = curr_x + 1
+                   next_y = curr_y
+                
+                self.groundtruth_data[i].append({'time' : self.time_arr['groundtruth'][i][time_idx], 'x_pos': next_x, 'y_pos': next_y, 'orientation': next_orientation})
+       
+        # Generate Odometery Data
+        for i, label in enumerate(self.robot_labels, 0):
+            # Initial robot state
+            self.odometry_data[i].append({'time': self.start_time, 'velocity' : 0, 'angular velocity' : 0, 'orientation' : self.groundtruth_data[i][0]['orientation'],
+             'delta_t' : 0})
+            for time_idx in range(1, len(self.time_arr['odometry'][i])):
+                loc1 = [self.groundtruth_data[i][time_idx-1]['x_pos'], self.groundtruth_data[i][time_idx-1]['y_pos']]
+                loc2 = [self.groundtruth_data[i][time_idx]['x_pos'], self.groundtruth_data[i][time_idx]['y_pos']]
+                velocity = self.calc_distance(loc1, loc2)/self.delta_t + np.random.normal(loc=0.0,scale=self.velocity_noise)
+                
+                theta_1 = self.groundtruth_data[i][time_idx-1]['orientation']
+                theta_2 = self.groundtruth_data[i][time_idx]['orientation']
+                angular_velocity = (theta_2-theta_1)/self.delta_t + np.random.normal(loc=0.0,scale=self.angular_velocity_noise)
+
+                self.odometry_data[i].append({'time': self.time_arr['odometry'][i][time_idx], 'velocity' : velocity, 'angular velocity' : angular_velocity, 
+                'orientation' : self.groundtruth_data[i][time_idx]['orientation'], 'delta_t' : self.delta_t})
+
+        # Generate Measurement Data
+        for robot_idx, label in enumerate(self.robot_labels, 0):
+            # NOTE - time_arr is used to emplace a one-to-one index-to-time correspondence 
+            # between measurement_data and time_arr['measurement'] to account for varing number of landmark observations
+            time_arr = []
+            # iterate over all possible times and (potentially) observable landmarks
+            for time_idx in range(0, len(self.time_arr['measurement'][i])):
+               for landmarkID, landmark_loc in self.landmark_map.items():
+                    
+                    time = self.time_arr['measurement'][robot_idx][time_idx]
+                    robot_loc_x = self.groundtruth_data[robot_idx][time_idx]['x_pos']
+                    robot_loc_y = self.groundtruth_data[robot_idx][time_idx]['y_pos']
+                    robot_loc = [robot_loc_x, robot_loc_y]
+                    
+                    if (self.within_vision(self.robot_fov, robot_loc, landmark_loc)):
+                        measurement_range = self.calc_distance(robot_loc, landmark_loc) + np.random.normal(loc=0.0,scale=self.measurement_range_noise)
+                        bearing = atan2((landmark_loc[1]-robot_loc[1]), (landmark_loc[0]-robot_loc[0])) + np.random.normal(loc=0.0,scale=self.bearing_noise)
+                        self.measurement_data[robot_idx].append({'time': time, 'subject_ID': landmarkID, 'measurment_range':measurement_range, 'bearing':bearing})
+                        time_arr.append(time) # we expect repeats/skipped times in time array
+            self.time_arr['measurement'][robot_idx] = time_arr
+        self.dataset_data = {'odometry': self.odometry_data, 'measurement': self.measurement_data, 'groundtruth': self.groundtruth_data}
+        
+        return self.time_arr, self.groundtruth_data, self.odometry_data, self.measurement_data, self.dataset_data
+
+    # Input: loc=[x,y]
+    # Output: boolean if the point [x,y] is within the map
+    def within_map(self, loc): 
+        return hypot(loc[0], loc[1]) < self.boundary
+
+    # Input: loc1=[x1,y1], loc2=[x2,y2]
+    # Output: distance between two points
+    def calc_distance(self, loc1, loc2):
+
+        x1 = loc1[0]
+        y1 = loc1[1]
+
+        x2 = loc2[0]
+        y2 = loc2[1]
+
+        distance = sqrt((x1-x2)**2 + (y1-y2)**2)
+        return distance
+
+    # NOTE - add distance limitations to robot_vision
+    # Input: robot_loc, landmark_loc, [x,y]
+    # Output: boolean, a given point is with the field of view (sector of a circle) of a robot
+    def within_vision(self, robot_fov, robot_loc, landmark_loc):    
+        # between pi and -pi with respect to the x axis
+        bearing = atan2((landmark_loc[1]-robot_loc[1]),(landmark_loc[0]-robot_loc[0]))
+        return abs(bearing) <= robot_fov/2
+
+# NOTE - Data formatting
+'''
+Groundtruth format
+ORIENTATION DEFINED WITH RESPECT TO GLOBAL FRAME OF REFERENCE
+{
+    'time': 1536012433.141,
+    'x_pos': -0.690963,
+    'y_pos': -1.685781,
+    'orientation': 0.03251611228318508
+}
+
+Odometry Format
+{
+    'time': 1536012462.22,
+    'velocity': 0.0,
+    'angular velocity': -0.2,
+    'orientation': -0.4324032103332002,
+    'delta_t': 0.09999990463256836
+}
+
+
+Measurement Data Format
+subject ID = Landmark ID
+measurement_range = distance between robot and landmark
+bearing = angle betweeen robot and landmark, defined with respect to ROBOT'S FRAME OF REFERENCE
+{
+    'time': 1536012482.625,
+    'subject_ID': 120,
+    'measurment_range': 2.8757099026371695,
+    'bearing': 0.29890824572449504
+}
+'''
\ No newline at end of file
diff --git a/CoLo-AT/dataset_manager/simulated_dataset_manager.py b/CoLo-AT/dataset_manager/simulated_dataset_manager.py
index 2638659ec56c159de6f9fd52b06baca20222fa79..bab766d1e00f3b864e98370d394b3227be360371 100644
--- a/CoLo-AT/dataset_manager/simulated_dataset_manager.py
+++ b/CoLo-AT/dataset_manager/simulated_dataset_manager.py
@@ -1,184 +1,74 @@
-from math import pi, sqrt, atan2
+from math import pi
 import numpy as np
 
-import IPython
-
-# If need to import modules from other folders
-'''
+# Import from other directories in repository
 import sys
 import os
 import inspect
 currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
 parentdir = os.path.dirname(currentdir)
 sys.path.insert(0,parentdir) 
-from requests.request_response import Request_response
-'''
 
-# TODO - incorporate communcation (LQI, RSS) as a threshold/cutoff for communication
+from dataset_manager.data_generator import DataGenerator
+
+
+# TODO 
+# - Circular path data in DataGenerator class
+# - communication
+# - animated plots
 class SimulatedDataSetManager:
 
     def __init__(self, dataset_name, boundary, landmarks, duration, robot_labels,
     velocity_noise = 0, angular_velocity_noise = 0, measurement_range_noise = 0, bearing_noise = 0,  communication_noise = 0, 
     robot_fov = 2*pi/3):
         
-
         self.name = dataset_name
         self.duration = duration
-        self.boundary = boundary
         self.robot_labels = robot_labels
         self.num_robots = len(robot_labels)
         self.start_time = 0.0
-        self.end_time = self.start_time + self.duration
+        self.end_time = self.start_time + duration
         self.start_moving_times = [self.start_time for label in robot_labels]
         
-        # angular width of robots's vision with respect to robot's x_axis (radians)
-        # robot_fov = angular width (radians) of robot's view in reference to x_axis
-        # ex: robot_fov = 2pi/3 means 60 degrees above and below x-axis are within vision
-        self.robot_fov = robot_fov
-        
-        # Alternates between 'odometry' and 'measurement'
+        # Used to alternate between odometry and measurement and cycle through multiple robots
         self.curr_request = 'odometry'
-        # cycles through indices of robot_labels
         self.curr_robot_idx = 0
         
-        # Internal time index/counter
-        self.time_idx = 0
+        # time step of the simulation (secs)
         self.delta_t = 1.0 
-        
-        # User-specified noises, if none given: defaulted to 0
-        self.velocity_noise = velocity_noise
-        self.angular_velocity_noise = angular_velocity_noise
-        self.measurement_range_noise = measurement_range_noise
-        self.bearing_noise = bearing_noise
-        self.communication_noise = communication_noise
-
-        # landmarks = {landmark ID: [x,y]}
+
+        # landmarks = {landmark ID: [x,y]} (m)
         self.landmark_map = landmarks
         for landmark_id in landmarks:
             if landmark_id < 5:
                 raise Exception("Invalid landmark ID: landmark IDs must be bigger than 5.")
 
         # starting_states =	{robot label: [time, x_pos, y_pos, orientation], ... }
-        # TODO - modify generation of starting tates
-        self.starting_states = {label : [self.start_time, float(i), float(i), 0.0] for i,label in enumerate(robot_labels)}
-
-        # NOTE - length of odometry and groundtruth is fixed to be duration
-        # measurment can be greater or equal length depending on number of measurments made
-        self.time_arr = {'odometry': [[] for robot in robot_labels], 'measurement': [[] for robot in robot_labels], 'groundtruth': [[] for robot in robot_labels]}
-        for data_type in self.time_arr:
-            for i in range(self.num_robots):
-                self.time_arr[data_type][i] = np.arange(self.start_time, self.end_time)
-
-
-        # Measurement Data Format
-        # subject ID = Landmark ID
-        # measurement_range = distance between robot and landmark
-        # bearing = angle betweeen robot and landmark, defined with respect to ROBOT'S FRAME OF REFERENCE
-        '''
-        {
-            'time': 1536012482.625,
-            'subject_ID': 120, 
-            'measurment_range': 2.8757099026371695,
-            'bearing': 0.29890824572449504
-        }
-        '''
-        self.measurement_data = [[] for robot in range(self.num_robots)]
-        
-        # Odometry Data Format
-        '''
-        {
-            'time': 1536012462.22,
-            'velocity': 0.0,
-            'angular velocity': -0.2,
-            'orientation': -0.4324032103332002,
-            'delta_t': 0.09999990463256836
-        }
-        '''
-        self.odometry_data = [[] for robot in range(self.num_robots)]
+        self.starting_states = {label : [self.start_time, float(i), float(i), 0.0] for i,label in enumerate(self.robot_labels)}
 
-        # Groundtruth format
-        # ORIENTATION DEFINED WITH RESPECT TO GLOBAL FRAME OF REFERENCE
-        '''
-        {
-            'time': 1536012433.141,
-            'x_pos': -0.690963,
-            'y_pos': -1.685781,
-            'orientation': 0.03251611228318508
-        }
-        '''
+        # groundtruth & odometry are of fixed length=duration
+        # measurement_data is of variable length
+        self.time_arr = {}
         self.groundtruth_data = [ [] for robot in range(self.num_robots)]
+        self.odometry_data = [[] for robot in range(self.num_robots)]
+        self.measurement_data = [[] for robot in range(self.num_robots)]
+        self.dataset_data = []
+
+        self.generator = DataGenerator(boundary, landmarks, duration, robot_labels, self.starting_states, self.start_time, self.delta_t,
+        velocity_noise, angular_velocity_noise, measurement_range_noise, bearing_noise, communication_noise, robot_fov)
 
+    # Input: robot_path (line, circle, etc.)
+    # Output: Generates simulated data (gt, odo, meas)
     def simulate_dataset(self, path='line'):
         if (path == 'line'):
-            return self.generate_straight_line_data()
+            self.time_arr, self.groundtruth_data, self.odometry_data, self.measurement_data, self.dataset_data = self.generator.generate_straight_line_data()
+            return self.start_time, self.starting_states, self.dataset_data, self.time_arr
         # TODO
         elif(path=='circle'):
             pass
         else:
             raise Exception("Unsupported robot path specified.")
 
-    # TODO - create a separate class file that does data generation
-    def generate_straight_line_data(self):
-        
-        # Generate ground truth data
-        for i, label in enumerate(self.robot_labels, 0):
-            # append starting state
-            self.groundtruth_data[i].append({'time' : self.starting_states[label][0], 'x_pos': self.starting_states[label][1], 'y_pos': self.starting_states[label][2], 'orientation' : self.starting_states[label][3]})
-            for time_idx in range(1, len(self.time_arr['groundtruth'][i])):
-                curr_x = self.groundtruth_data[i][time_idx-1]['x_pos']
-                curr_y = self.groundtruth_data[i][time_idx-1]['y_pos']
-                curr_orientation = self.groundtruth_data[i][time_idx-1]['orientation']
-
-                next_x = curr_x
-                next_y = curr_y
-                next_orientation = curr_orientation
-
-                if (self.within_map([curr_x + 1, curr_y + 1])):
-                   next_x = curr_x + 1
-                   next_y = curr_y
-                
-                self.groundtruth_data[i].append({'time' : self.time_arr['groundtruth'][i][time_idx], 'x_pos': next_x, 'y_pos': next_y, 'orientation': next_orientation})
-       
-        # Generate Odometery Data
-        for i, label in enumerate(self.robot_labels, 0):
-            # Initial robot state
-            self.odometry_data[i].append({'time': self.start_time, 'velocity' : 0, 'angular velocity' : 0, 'orientation' : self.groundtruth_data[i][0]['orientation'],
-             'delta_t' : 0})
-            for time_idx in range(1, len(self.time_arr['odometry'][i])):
-                loc1 = [self.groundtruth_data[i][time_idx-1]['x_pos'], self.groundtruth_data[i][time_idx-1]['y_pos']]
-                loc2 = [self.groundtruth_data[i][time_idx]['x_pos'], self.groundtruth_data[i][time_idx]['y_pos']]
-                velocity = self.calc_distance(loc1, loc2)/self.delta_t + np.random.normal(loc=0.0,scale=self.velocity_noise)
-                
-                theta_1 = self.groundtruth_data[i][time_idx-1]['orientation']
-                theta_2 = self.groundtruth_data[i][time_idx]['orientation']
-                angular_velocity = (theta_2-theta_1)/self.delta_t + np.random.normal(loc=0.0,scale=self.angular_velocity_noise)
-
-                self.odometry_data[i].append({'time': self.time_arr['odometry'][i][time_idx], 'velocity' : velocity, 'angular velocity' : angular_velocity, 
-                'orientation' : self.groundtruth_data[i][time_idx]['orientation'], 'delta_t' : self.delta_t})
-
-        # Generate Measurement Data
-        for robot_idx, label in enumerate(self.robot_labels, 0):
-            # NOTE - time_arr is used to emplace a one-to-one index-to-time correspondence 
-            # between measurement_data and time_arr['measurement'] to account for varing number of landmark observations
-            time_arr = []
-            # iterate over all possible times and (potentially) observable landmarks
-            for time_idx in range(0, len(self.time_arr['measurement'][i])):
-               for landmarkID, landmark_loc in self.landmark_map.items():
-                    
-                    time = self.time_arr['measurement'][robot_idx][time_idx]
-                    robot_loc_x = self.groundtruth_data[robot_idx][time_idx]['x_pos']
-                    robot_loc_y = self.groundtruth_data[robot_idx][time_idx]['y_pos']
-                    robot_loc = [robot_loc_x, robot_loc_y]
-                    
-                    if (self.within_vision(self.robot_fov, robot_loc, landmark_loc)):
-                        measurement_range = self.calc_distance(robot_loc, landmark_loc) + np.random.normal(loc=0.0,scale=self.measurement_range_noise)
-                        bearing = atan2((landmark_loc[1]-robot_loc[1]), (landmark_loc[0]-robot_loc[0])) + np.random.normal(loc=0.0,scale=self.bearing_noise)
-                        self.measurement_data[robot_idx].append({'time': time, 'subject_ID': landmarkID, 'measurment_range':measurement_range, 'bearing':bearing})
-                        time_arr.append(time) # we expect repeats/skipped times in time array
-            self.time_arr['measurement'][robot_idx] = time_arr
-        self.dataset_data = {'odometry': self.odometry_data, 'measurement': self.measurement_data, 'groundtruth': self.groundtruth_data}
-        return self.start_time, self.starting_states, self.dataset_data, self.time_arr
-
     def get_dataset_name(self):
         return self.name
 
@@ -210,6 +100,10 @@ class SimulatedDataSetManager:
             return True
         return False
 
+    # Inputs: request, current_time (from simulation manager)
+    # Outputs: valid_respond, current_time, req
+    # Calls get_dataline and sets appropriate message state before returning
+    # Manages incrementation of current_time to return sim manager
     def respond(self, req, current_time, need_specific_time = False):
 
         valid_respond, message, req_type, robot_idx = self.get_dataline(req, current_time)
@@ -220,14 +114,15 @@ class SimulatedDataSetManager:
             req.set_robot_idx(robot_idx)
             req.set_type(req_type)
 
-        # TODO - add in update parameters for multiple robots (and robot_ctr about to reset)
         if (req_type == 'measurement' and self.reset_robot_ctr()):
             current_time += self.delta_t
             self.reset_robot_idx = False
 
         return valid_respond, current_time, req
 
-    # Alternates odometry and measurement updates and cycles through multiple robots
+    # Inputs: current time, empty request (passed by respond)
+    # Outputs: valid_dataline, message, req_type, robot_idx
+    # Selects robot and request type, emplaces message data
     def get_dataline(self, req, current_time):
         message = req.get_message()
 
@@ -265,37 +160,8 @@ class SimulatedDataSetManager:
 
         gt_time_idx = np.where(self.time_arr['groundtruth'][robot_idx] == current_time)[0][0]
         message['groundtruth'] = self.groundtruth_data[robot_idx][gt_time_idx]
-        
-        return valid_dataline, message, req_type, robot_idx
-        
-    # Check if a given loc [x,y] is within map
-    def within_map(self, loc): 
-        center_point = [0,0]
-        if sqrt(pow(loc[0]-center_point[0], 2)+pow(loc[1]-center_point[1], 2)) > self.boundary:
-            return False
-        else:
-            return True
-
-    # Compute the distance betwen loc1 = [x1,y1] and loc2 = [x2,y2]
-    def calc_distance(self, loc1, loc2):
 
-        x1 = loc1[0]
-        y1 = loc1[1]
-
-        x2 = loc2[0]
-        y2 = loc2[1]
-
-        distance = sqrt((x1-x2)**2 + (y1-y2)**2)
-        return distance
-
-    # TODO - add distance limitations to robot_vision
-    # [x,y] for landmark and robot_loc
-    def within_vision(self, robot_fov, robot_loc, landmark_loc):
-        
-        # between pi and -pi with respect to the x axis
-        bearing = atan2((landmark_loc[1]-robot_loc[1]),(landmark_loc[0]-robot_loc[0]))
-        
-        return abs(bearing) <= robot_fov/2
+        return valid_dataline, message, req_type, robot_idx
 
     # Inputs: current time, robot_idx
     # Outputs:  closest landmark measurement for given time and robot, if available