Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def build_experiments(self):
"""
Creates the whole set of experiment objects,
The experiments created depend on the selected Town.
"""
# We set the camera
# This single RGB camera is used on every experiment
camera = Camera('rgb')
camera.set(FOV=100)
camera.set_image_size(800, 600)
camera.set_position(2.0, 0.0, 1.4)
camera.set_rotation(-15.0, 0, 0)
poses_tasks = self._poses()
vehicles_tasks = [0, 15, 70]
pedestrians_tasks = [0, 50, 150]
task_names = ['empty', 'normal', 'cluttered']
experiments_vector = []
for weather in self.weathers:
def build_experiments(self):
"""
Creates the whole set of experiment objects,
The experiments created depend on the selected Town.
"""
# We set the camera
# This single RGB camera is used on every experiment
camera = Camera('rgb')
camera.set(FOV=100)
camera.set_image_size(800, 600)
camera.set_position(2.0, 0.0, 1.4)
camera.set_rotation(-15.0, 0, 0)
poses_tasks = self._poses()
vehicles_tasks = [0]
pedestrians_tasks = [250]
experiments_vector = []
for weather in self.weathers:
for iteration in range(len(poses_tasks)):
poses = poses_tasks[iteration]
vehicles = vehicles_tasks[iteration]
'weathers_train': [1],
'weathers_validation': []
},
'Town02': {'poses': _poses_town02(),
'vehicles': [15],
'pedestrians': [50],
'weathers_train': [],
'weathers_validation': [14]
}
}
# We set the camera
# This single RGB camera is used on every experiment
camera = Camera('rgb')
camera.set(FOV=100)
camera.set_image_size(800, 600)
camera.set_position(2.0, 0.0, 1.4)
camera.set_rotation(-15.0, 0, 0)
sensor_set = [camera]
return _build_experiments(exp_set_dict, sensor_set), exp_set_dict
# The default camera captures RGB images of the scene.
camera0 = Camera('CameraRGB')
# Set image resolution in pixels.
camera0.set_image_size(800, 600)
# Set its position relative to the car in centimeters.
camera0.set_position(30, 0, 130)
settings.add_sensor(camera0)
# Let's add another camera producing ground-truth depth.
camera1 = Camera('CameraDepth', PostProcessing='Depth')
camera1.set_image_size(800, 600)
camera1.set_position(30, 0, 130)
settings.add_sensor(camera1)
camera2 = Camera('CameraSemanticSegmentation', PostProcessing='SemanticSegmentation')
camera2.set_image_size(800, 600)
camera2.set_position(30, 0, 130)
settings.add_sensor(camera2)
# Now we load these settings into the server. The server replies
# with a scene description containing the available start spots for
# the player. Here we can provide a CarlaSettings object or a
# CarlaSettings.ini file as string.
scene = client.load_settings(settings)
# Choose one player start at random.
number_of_player_starts = len(scene.player_start_spots)
player_start = 0 # random.randint(0, max(0, number_of_player_starts - 1))
# Notify the server that we want to start the episode at the
# player_start index. This function blocks until the server is ready
camera.set_position(2.0, 0, 1.4)
camera.set_rotation(-15.0, 30, 0)
settings.add_sensor(camera)
# add a front facing depth camera
if CameraTypes.DEPTH in cameras:
camera = Camera(CameraTypes.DEPTH.value)
camera.set_image_size(camera_width, camera_height)
camera.set_position(0.2, 0, 1.3)
camera.set_rotation(8, 30, 0)
camera.PostProcessing = 'Depth'
settings.add_sensor(camera)
# add a front facing semantic segmentation camera
if CameraTypes.SEGMENTATION in cameras:
camera = Camera(CameraTypes.SEGMENTATION.value)
camera.set_image_size(camera_width, camera_height)
camera.set_position(0.2, 0, 1.3)
camera.set_rotation(8, 30, 0)
camera.PostProcessing = 'SemanticSegmentation'
settings.add_sensor(camera)
return settings
def default_settings():
settings = CarlaSettings()
settings.set(
SynchronousMode=True,
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=0,
NumberOfPedestrians=0,
WeatherId=1, # random.choice([1, 3, 7, 8, 14]),
PlayerVehicle='/Game/Blueprints/Vehicles/Mustang/Mustang.Mustang_C',
QualityLevel='Epic')
settings.randomize_seeds()
camera_RGB = Camera('CameraRGB')
camera_RGB.set_image_size(256, 256)
camera_RGB.set_position(1, 0, 2.50)
settings.add_sensor(camera_RGB)
camera_seg = Camera('CameraSegmentation', PostProcessing='SemanticSegmentation')
camera_seg.set_image_size(256, 256)
camera_seg.set_position(1, 0, 2.50)
settings.add_sensor(camera_seg)
return settings
# We check the town, based on that we define the town related parameters
# The size of the vector is related to the number of tasks, inside each
# task there is also multiple poses ( start end, positions )
if self._city_name == 'Town01':
poses_tasks = [[[7, 3]], [[138, 17]], [[140, 134]], [[140, 134]]]
vehicles_tasks = [0, 0, 0, 20]
pedestrians_tasks = [0, 0, 0, 50]
else:
poses_tasks = [[[4, 2]], [[37, 76]], [[19, 66]], [[19, 66]]]
vehicles_tasks = [0, 0, 0, 15]
pedestrians_tasks = [0, 0, 0, 50]
# We set the camera
# This single RGB camera is used on every experiment
camera = Camera('CameraRGB')
camera.set(FOV=100)
camera.set_image_size(800, 600)
camera.set_position(2.0, 0.0, 1.4)
camera.set_rotation(-15.0, 0, 0)
# Based on the parameters, creates a vector with experiment objects.
experiments_vector = []
for weather in self.weathers:
for iteration in range(len(poses_tasks)):
poses = poses_tasks[iteration]
vehicles = vehicles_tasks[iteration]
pedestrians = pedestrians_tasks[iteration]
conditions = CarlaSettings()
conditions.set(
print('CarlaClient connected')
settings = CarlaSettings()
settings.set(
SynchronousMode=True,
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=20,
NumberOfPedestrians=0,
WeatherId= 1) # random.choice([1, 3, 7, 8, 14]))
settings.randomize_seeds()
camera0 = Camera('CameraRGB')
camera0.set_image_size(800, 600)
camera0.set_position(30, 0, 130)
settings.add_sensor(camera0)
camera1 = Camera('CameraDepth', PostProcessing='Depth')
camera1.set_image_size(800, 600)
camera1.set_position(30, 0, 130)
settings.add_sensor(camera1)
camera2 = Camera('CameraSemanticSegmentation', PostProcessing='SemanticSegmentation')
camera2.set_image_size(800, 600)
camera2.set_position(30, 0, 130)
settings.add_sensor(camera2)
scene = client.load_settings(settings)
number_of_player_starts = len(scene.player_start_spots)
player_start = random.randint(0, max(0, number_of_player_starts - 1))
print('Starting...')
client.start_episode(player_start)
camera.set_position(2.0, 0, 1.4)
camera.set_rotation(-15.0, 0, 0)
settings.add_sensor(camera)
# add a left facing camera
if CameraTypes.LEFT in cameras:
camera = Camera(CameraTypes.LEFT.value)
camera.set(FOV=100)
camera.set_image_size(camera_width, camera_height)
camera.set_position(2.0, 0, 1.4)
camera.set_rotation(-15.0, -30, 0)
settings.add_sensor(camera)
# add a right facing camera
if CameraTypes.RIGHT in cameras:
camera = Camera(CameraTypes.RIGHT.value)
camera.set(FOV=100)
camera.set_image_size(camera_width, camera_height)
camera.set_position(2.0, 0, 1.4)
camera.set_rotation(-15.0, 30, 0)
settings.add_sensor(camera)
# add a front facing depth camera
if CameraTypes.DEPTH in cameras:
camera = Camera(CameraTypes.DEPTH.value)
camera.set_image_size(camera_width, camera_height)
camera.set_position(0.2, 0, 1.3)
camera.set_rotation(8, 30, 0)
camera.PostProcessing = 'Depth'
settings.add_sensor(camera)
# add a front facing semantic segmentation camera
NumberOfVehicles=0,
NumberOfPedestrians=0,
WeatherId=1) # random.choice([1, 3, 7, 8, 14]))
settings.randomize_seeds()
camera0 = Camera('CameraRGB')
camera0.set_image_size(800, 600)
camera0.set_position(30, 0, 130)
settings.add_sensor(camera0)
camera1 = Camera('CameraDepth', PostProcessing='Depth')
camera1.set_image_size(800, 600)
camera1.set_position(30, 0, 130)
settings.add_sensor(camera1)
camera2 = Camera('CameraSemanticSegmentation', PostProcessing='SemanticSegmentation')
camera2.set_image_size(800, 600)
camera2.set_position(30, 0, 130)
settings.add_sensor(camera2)
scene = client.load_settings(settings)
number_of_player_starts = len(scene.player_start_spots)
player_start = 1 # random.randint(0, max(0, number_of_player_starts - 1))
print('Starting...')
client.start_episode(player_start)
if show_camera:
plt.ion()
plt.show()