How to use the pybullet.resetDebugVisualizerCamera function in pybullet

To help you get started, we’ve selected a few pybullet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github Healthcare-Robotics / assistive-gym / assistive_gym / envs / feeding.py View on Github external
self.build_assistive_env('wheelchair')
        if self.robot.wheelchair_mounted:
            wheelchair_pos, wheelchair_orient = self.furniture.get_base_pos_orient()
            self.robot.set_base_pos_orient(wheelchair_pos + np.array(self.robot.toc_base_pos_offset[self.task]), [0, 0, -np.pi/2.0])

        joints_positions = [(self.human.j_right_elbow, -90), (self.human.j_left_elbow, -90), (self.human.j_right_hip_x, -90), (self.human.j_right_knee, 80), (self.human.j_left_hip_x, -90), (self.human.j_left_knee, 80)]
        joints_positions += [(self.human.j_head_x, self.np_random.uniform(-30, 30)), (self.human.j_head_y, self.np_random.uniform(-30, 30)), (self.human.j_head_z, self.np_random.uniform(-30, 30))]
        self.human.setup_joints(joints_positions, use_static_joints=True, reactive_force=None)

        # Create a table
        self.table = Furniture()
        self.table.init('table', self.directory, self.id, self.np_random)

        self.generate_target()

        p.resetDebugVisualizerCamera(cameraDistance=1.10, cameraYaw=40, cameraPitch=-45, cameraTargetPosition=[-0.2, 0, 0.75], physicsClientId=self.id)

        target_ee_pos = np.array([-0.15, -0.65, 1.15]) + self.np_random.uniform(-0.05, 0.05, size=3)
        target_ee_orient = np.array(p.getQuaternionFromEuler(np.array(self.robot.toc_ee_orient_rpy[self.task]), physicsClientId=self.id))
        if self.robot.wheelchair_mounted:
            # Use IK to find starting joint angles for mounted robots
            self.robot.ik_random_restarts(right=True, target_pos=target_ee_pos, target_orient=target_ee_orient, max_iterations=1000, max_ik_random_restarts=40, success_threshold=0.01, step_sim=True, check_env_collisions=True)
        else:
            # Use TOC with JLWKI to find an optimal base position for the robot near the person
            self.robot.position_robot_toc(self.task, 'right', [(target_ee_pos, target_ee_orient), (self.target_pos, None)], [(self.target_pos, target_ee_orient)], self.human, step_sim=True, check_env_collisions=False)
        # Open gripper to hold the tool
        self.robot.set_gripper_open_position(self.robot.right_gripper_indices, self.robot.gripper_pos[self.task], set_instantly=True)
        # Initialize the tool in the robot's gripper
        self.tool.init(self.robot, self.task, self.directory, self.id, self.np_random, right=True, mesh_scale=[0.08]*3)

        # Place a bowl on a table
        self.bowl = Furniture()
github Healthcare-Robotics / assistive-gym / assistive_gym / envs / drinking.py View on Github external
def reset(self):
        super(DrinkingEnv, self).reset()
        self.build_assistive_env('wheelchair')
        if self.robot.wheelchair_mounted:
            wheelchair_pos, wheelchair_orient = self.furniture.get_base_pos_orient()
            self.robot.set_base_pos_orient(wheelchair_pos + np.array(self.robot.toc_base_pos_offset[self.task]), [0, 0, -np.pi/2.0])

        joints_positions = [(self.human.j_right_elbow, -90), (self.human.j_left_elbow, -90), (self.human.j_right_hip_x, -90), (self.human.j_right_knee, 80), (self.human.j_left_hip_x, -90), (self.human.j_left_knee, 80)]
        joints_positions += [(self.human.j_head_x, self.np_random.uniform(-30, 30)), (self.human.j_head_y, self.np_random.uniform(-30, 30)), (self.human.j_head_z, self.np_random.uniform(-30, 30))]
        self.human.setup_joints(joints_positions, use_static_joints=True, reactive_force=None)

        self.generate_target()

        p.resetDebugVisualizerCamera(cameraDistance=1.10, cameraYaw=55, cameraPitch=-45, cameraTargetPosition=[-0.2, 0, 0.75], physicsClientId=self.id)

        target_ee_pos = np.array([-0.2, -0.5, 1]) + self.np_random.uniform(-0.05, 0.05, size=3)
        target_ee_orient = np.array(p.getQuaternionFromEuler(np.array(self.robot.toc_ee_orient_rpy[self.task]), physicsClientId=self.id))
        if self.robot.wheelchair_mounted:
            # Use IK to find starting joint angles for mounted robots
            self.robot.ik_random_restarts(right=True, target_pos=target_ee_pos, target_orient=target_ee_orient, max_iterations=1000, max_ik_random_restarts=40, success_threshold=0.01, step_sim=True, check_env_collisions=True)
        else:
            # Use TOC with JLWKI to find an optimal base position for the robot near the person
            self.robot.position_robot_toc(self.task, 'right', [(target_ee_pos, target_ee_orient), (self.target_pos, None)], [(self.target_pos, target_ee_orient)], self.human, step_sim=True, check_env_collisions=False)
        # Open gripper to hold the tool
        self.robot.set_gripper_open_position(self.robot.right_gripper_indices, self.robot.gripper_pos[self.task], set_instantly=True)
        # Initialize the tool in the robot's gripper
        self.tool.init(self.robot, self.task, self.directory, self.id, self.np_random, right=True, mesh_scale=[0.045]*3, alpha=0.75)
        self.cup_top_center_offset = np.array([0, 0, -0.055])
        self.cup_bottom_center_offset = np.array([0, 0, 0.07])
github StanfordVL / GibsonEnvV2 / realenv / core / physics / drivers / baselines / baseline_husky.py View on Github external
score = 0
        restart_delay = 0
        obs = env.reset()
       
        while 1:
            time.sleep(0.01)
            #a = pi.act(obs)
            a = [1] * env.action_space.shape[0]
            obs, r, done, _ = env.step(a)
            print("observations", len(obs))
            score += r
            frame += 1
            distance=5
            yaw = 0
            huskyPos, huskyOrn = p.getBasePositionAndOrientation(huskyId)
            p.resetDebugVisualizerCamera(distance,yaw,-20,humanPos);

            still_open = env.render("human")
            if still_open==False:
                return
            if not done: continue
            if restart_delay==0:
                print("score=%0.2f in %i frames" % (score, frame))
                restart_delay = 60*2  # 2 sec at 60 fps
            else:
                restart_delay -= 1
                if restart_delay==0: break
github StanfordVL / GibsonEnvV2 / realenv / core / physics / physics_env.py View on Github external
'dist' :cameraDistSlider,
                'yaw'  : cameraYawSlider,
                'pitch': cameraPitchSlider
            }
            self.viewMatrix = p.computeViewMatrixFromYawPitchRoll([0, 0, 0], 10, 0, 90, 0, 2)
            self.projMatrix = p.computeProjectionMatrix(-0.01, 0.01, -0.01, 0.01, 0.01, 128)
            p.getCameraImage(256, 256, viewMatrix = self.viewMatrix, projectionMatrix = self.projMatrix)

        else:
            cameraDist = p.readUserDebugParameter(self.debug_sliders['dist'])
            cameraYaw  = p.readUserDebugParameter(self.debug_sliders['yaw'])
            cameraPitch = p.readUserDebugParameter(self.debug_sliders['pitch'])

            pos_xyz, quat_wxyz = self.robot.getViewPosAndOrientation()
            p.getCameraImage(256, 256, viewMatrix = self.viewMatrix, projectionMatrix = self.projMatrix)
            p.resetDebugVisualizerCamera(cameraDist, cameraYaw, cameraPitch, pos_xyz)
github benelot / pybullet-gym / pybulletgym / examples / roboschool-weights / enjoy_TF_Walker2DPyBulletEnv_v0_2017may.py View on Github external
while 1:
        frame = 0
        score = 0
        restart_delay = 0
        obs = env.reset()
        print("frame")
        while 1:
            time.sleep(0.05)
            a = pi.act(obs)
            obs, r, done, _ = env.step(a)
            score += r
            frame += 1
            distance = 5
            yaw = 0
            humanPos = p.getLinkState(torsoId,4)[0]
            p.resetDebugVisualizerCamera(distance, yaw, -20, humanPos)
            still_open = env.render("human")
            if still_open is None:
                return
            if not done:
                continue
            if restart_delay == 0:
                print("score=%0.2f in %i frames" % (score, frame))
                restart_delay = 60*2  # 2 sec at 60 fps
            else:
                restart_delay -= 1
                if restart_delay == 0:
                    break
github Rhoban / onshape-to-robot / onshape_to_robot / simulation.py View on Github external
def lookAt(self, target):
        """Control the look of the visualizer camera

        Arguments:
            target {tuple} -- target as (x,y,z) tuple
        """
        if self.gui:
            params = p.getDebugVisualizerCamera()
            p.resetDebugVisualizerCamera(params[10], params[8], params[9], target)
github caelan / ss-pybullet / pybullet_tools / utils.py View on Github external
def set_camera(yaw, pitch, distance, target_position=np.zeros(3)):
    p.resetDebugVisualizerCamera(distance, yaw, pitch, target_position, physicsClientId=CLIENT)
github smearle / gym-city / enjoy.py View on Github external
player_act = None
    if infos[0]:
        if 'player_move' in infos[0].keys():
            player_act = infos[0]['player_move']

    num_step += 1

    masks.fill_(0.0 if done else 1.0)

    if args.env_name.find('Bullet') > -1:
        if torsoId > -1:
            distance = 5
            yaw = 0
            humanPos, humanOrn = p.getBasePositionAndOrientation(torsoId)
            p.resetDebugVisualizerCamera(distance, yaw, -20, humanPos)
github erwincoumans / pybullet_robots / atlas.py View on Github external
if 1:
	objs = p.loadSDF("botlab/botlab.sdf", globalScaling=2.0)
	zero=[0,0,0]
	p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)
	print("converting y to z axis")
	for o in objs:
		pos,orn = p.getBasePositionAndOrientation(o)
		y2x = p.getQuaternionFromEuler([3.14/2.,0,3.14/2])
		newpos,neworn = p.multiplyTransforms(zero,y2x,pos,orn)
		p.resetBasePositionAndOrientation(o,newpos,neworn)
else:
	p.loadURDF("plane.urdf",[0,0,-3])
	
p.loadURDF("boston_box.urdf",[-2,3,-2], useFixedBase=True)

p.resetDebugVisualizerCamera( cameraDistance=1, cameraYaw=148, cameraPitch=-9, cameraTargetPosition=[0.36,5.3,-0.62])

p.loadURDF("boston_box.urdf",[0,3,-2],useFixedBase=True)

p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)

p.getCameraImage(320,200)#, renderer=p.ER_BULLET_HARDWARE_OPENGL )


t=0
p.setRealTimeSimulation(1)
while (1):
	p.setGravity(0,0,-10)
	time.sleep(0.01)
	t+=0.01
	keys = p.getKeyboardEvents()
	for k in keys: