raithunderjet
11/29/2017 - 1:13 PM

robots.py

# kirim command kepada robot
def _send_robot_commands( self ):
  ...
  v_l, v_r = self._uni_to_diff( v, omega )
  self.robot.set_wheel_drive_rates( v_l, v_r )

def _uni_to_diff( self, v, omega ):
  # v = translational velocity (m/s)
  # omega = angular velocity (rad/s)

  R = self.robot_wheel_radius
  L = self.robot_wheel_base_length

  v_l = ( (2.0 * v) - (omega*L) ) / (2.0 * R)
  v_r = ( (2.0 * v) + (omega*L) ) / (2.0 * R)

  return v_l, v_r

  # update posisi robot
def _update_odometry( self ):
    R = self.robot_wheel_radius
    N = float( self.wheel_encoder_ticks_per_revolution )
    
    # read the wheel encoder values
    ticks_left, ticks_right = self.robot.read_wheel_encoders()
    
    # get the difference in ticks since the last iteration
    d_ticks_left = ticks_left - self.prev_ticks_left
    d_ticks_right = ticks_right - self.prev_ticks_right
    
    # estimate the wheel movements
    d_left_wheel = 2*pi*R*( d_ticks_left / N )
    d_right_wheel = 2*pi*R*( d_ticks_right / N )
    d_center = 0.5 * ( d_left_wheel + d_right_wheel )
    
    # calculate the new pose
    prev_x, prev_y, prev_theta = self.estimated_pose.scalar_unpack()
    new_x = prev_x + ( d_center * cos( prev_theta ) )
    new_y = prev_y + ( d_center * sin( prev_theta ) )
    new_theta = prev_theta + ( ( d_right_wheel - d_left_wheel ) / self.robot_wheel_base_length )
    
    # update the pose estimate with the new values
    self.estimated_pose.scalar_update( new_x, new_y, new_theta )
    
    # save the current tick count for the next iteration
    self.prev_ticks_left = ticks_left
    self.prev_ticks_right = ticks_right

    # heading vector
def calculate_gtg_heading_vector( self ):
    # get the inverse of the robot's pose
    robot_inv_pos, robot_inv_theta = self.supervisor.estimated_pose().inverse().vector_unpack()
    
    # calculate the goal vector in the robot's reference frame
    goal = self.supervisor.goal()
    goal = linalg.rotate_and_translate_vector( goal, robot_inv_theta, robot_inv_pos )
    
    return goal

    # kalkulasi error terms
theta_d = atan2( self.gtg_heading_vector[1], self.gtg_heading_vector[0] )

# kalkulasi angular velocity
omega = self.kP * theta_d



# calculate translational velocity
# velocity is v_max when omega is 0,
# drops rapidly to zero as |omega| rises
v = self.supervisor.v_max() / ( abs( omega ) + 1 )**0.5

# sensor gains (weights)
self.sensor_gains = [ 1.0+( (0.4*abs(p.theta)) / pi )
                      for p in supervisor.proximity_sensor_placements() ]
                  
...

# return an obstacle avoidance vector in the robot's reference frame
# also returns vectors to detected obstacles in the robot's reference frame
def calculate_ao_heading_vector( self ):
    # initialize vector
    obstacle_vectors = [ [ 0.0, 0.0 ] ] * len( self.proximity_sensor_placements )
    ao_heading_vector = [ 0.0, 0.0 ]             
    
    # get the distances indicated by the robot's sensor readings
    sensor_distances = self.supervisor.proximity_sensor_distances()
    
    # calculate the position of detected obstacles and find an avoidance vector
    robot_pos, robot_theta = self.supervisor.estimated_pose().vector_unpack()
    
    for i in range( len( sensor_distances ) ):
        # calculate the position of the obstacle
        sensor_pos, sensor_theta = self.proximity_sensor_placements[i].vector_unpack()
        vector = [ sensor_distances[i], 0.0 ]
        vector = linalg.rotate_and_translate_vector( vector, sensor_theta, sensor_pos )
        obstacle_vectors[i] = vector   # store the obstacle vectors in the robot's reference frame
     
        # accumluate the heading vector within the robot's reference frame
        ao_heading_vector = linalg.add( ao_heading_vector,
                                     linalg.scale( vector, self.sensor_gains[i] ) )
                                   
    return ao_heading_vector, obstacle_vectors