How to use the pybullet.getBasePositionAndOrientation function in pybullet

To help you get started, we’ve selected a few pybullet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github araffin / robotics-rl-srl / environments / KukaGymEnv.py View on Github external
def _reward(self):

        # rewards is height of target object
        blockPos, blockOrn = p.getBasePositionAndOrientation(self.blockUid)
        closestPoints = p.getClosestPoints(self.blockUid, self._kuka.kukaUid, 1000, -1, self._kuka.kukaEndEffectorIndex)

        reward = -1000

        numPt = len(closestPoints)
        # print(numPt)
        if numPt > 0:
            # print("reward:")
            reward = -closestPoints[0][8] * 10
        if blockPos[2] > 0.2:
            reward = reward + 10000
            print("successfully grasped a block!!!")
            # print("self._envStepCounter")
            # print(self._envStepCounter)
            # print("self._envStepCounter")
            # print(self._envStepCounter)
github StanfordVL / GibsonEnvV2 / gibson2 / core / physics / drivers / minitaur.py View on Github external
def GetBaseOrientation(self):
        """Get the orientation of minitaur's base, represented as quaternion.

        Returns:
          The orientation of minitaur's base.
        """
        _, orientation = (p.getBasePositionAndOrientation(self.minitaur))
        return orientation
github maximecb / gym-miniworld / pytorch-a2c-ppo-acktr / enjoy.py View on Github external
while True:
    with torch.no_grad():
        value, action, _, recurrent_hidden_states = actor_critic.act(
            obs, recurrent_hidden_states, masks, deterministic=False)

    # Obser reward and next obs
    obs, reward, done, _ = env.step(action)

    masks.fill_(0.0 if done else 1.0)

    if args.env_name.find('Bullet') > -1:
        if torsoId > -1:
            distance = 5
            yaw = 0
            humanPos, humanOrn = p.getBasePositionAndOrientation(torsoId)
            p.resetDebugVisualizerCamera(distance, yaw, -20, humanPos)

    if render_func is not None:
        render_func('human')
github jhu-lcsr / costar_plan / costar_task_plan / python / costar_task_plan / simulation / world.py View on Github external
def GetObjectState(handle):
    '''
    Look up the handle and get its base position and eventually other
    information, if we find that necessary.
    '''
    pos, rot = pb.getBasePositionAndOrientation(handle)
    return SimulationObjectState(handle,
                                 base_pos=pos,
                                 base_rot=rot)
github araffin / robotics-rl-srl / environments / KukaGymEnv.py View on Github external
def getExtendedObservation(self):
        self._observation = self._kuka.getObservation()
        gripperState = p.getLinkState(self._kuka.kukaUid, self._kuka.kukaGripperIndex)
        gripperPos = gripperState[0]
        gripperOrn = gripperState[1]
        blockPos, blockOrn = p.getBasePositionAndOrientation(self.blockUid)

        invGripperPos, invGripperOrn = p.invertTransform(gripperPos, gripperOrn)
        gripperMat = p.getMatrixFromQuaternion(gripperOrn)
        dir0 = [gripperMat[0], gripperMat[3], gripperMat[6]]
        dir1 = [gripperMat[1], gripperMat[4], gripperMat[7]]
        dir2 = [gripperMat[2], gripperMat[5], gripperMat[8]]

        gripperEul = p.getEulerFromQuaternion(gripperOrn)
        # print("gripperEul")
        # print(gripperEul)
        blockPosInGripper, blockOrnInGripper = p.multiplyTransforms(invGripperPos, invGripperOrn, blockPos, blockOrn)
        projectedBlockPos2D = [blockPosInGripper[0], blockPosInGripper[1]]
        blockEulerInGripper = p.getEulerFromQuaternion(blockOrnInGripper)
        # print("projectedBlockPos2D")
        # print(projectedBlockPos2D)
        # print("blockEulerInGripper")
github benelot / pybullet-gym / pybulletgym / examples / roboschool-weights / enjoy_TF_AntPyBulletEnv_v0_2017may.py View on Github external
while 1:
        frame = 0
        score = 0
        restart_delay = 0
        obs = env.reset()
        print("frame")
        while 1:
            time.sleep(0.02)
            a = pi.act(obs)
            obs, r, done, _ = env.step(a)
            score += r
            frame += 1
            distance = 5
            yaw = 0
            humanPos, humanOrn = p.getBasePositionAndOrientation(torsoId)
            p.resetDebugVisualizerCamera(distance, yaw, -20, humanPos)

            still_open = env.render("human")
            if still_open is None:
                return
            if not done:
                continue
            if restart_delay == 0:
                print("score=%0.2f in %i frames" % (score, frame))
                restart_delay = 60*2  # 2 sec at 60 fps
            else:
                restart_delay -= 1
                if restart_delay == 0:
                    break
github benelot / bullet-gym / pybulletgym / envs / gym_mujoco_xml_env.py View on Github external
def state_fields_of_pose_of(self, body_id, link_id=-1):  # a method you will most probably need a lot to get pose and orientation
		if link_id == -1:
			(x, y, z), (a, b, c, d) = p.getBasePositionAndOrientation(body_id)
		else:
			(x, y, z), (a, b, c, d), _, _, _, _ = p.getLinkState(body_id, link_id)
		return np.array([x, y, z, a, b, c, d])
github Healthcare-Robotics / assistive-gym / assistive_gym / envs / agents / agent.py View on Github external
def get_pos_orient(self, link, center_of_mass=False, convert_to_realworld=False):
        # Get the 3D position and orientation (4D quaternion) of a specific link on the body
        if link == self.base:
            pos, orient = p.getBasePositionAndOrientation(self.body, physicsClientId=self.id)
        else:
            if not center_of_mass:
                pos, orient = p.getLinkState(self.body, link, computeForwardKinematics=True, physicsClientId=self.id)[4:6]
            else:
                pos, orient = p.getLinkState(self.body, link, computeForwardKinematics=True, physicsClientId=self.id)[:2]
        if convert_to_realworld:
            return self.convert_to_realworld(pos, orient)
        else:
            return np.array(pos), np.array(orient)
github jhu-lcsr / costar_plan / costar_task_plan / python / costar_task_plan / simulation / world.py View on Github external
def GetObjectState(handle):
    '''
    Look up the handle and get its base position and eventually other
    information, if we find that necessary.
    '''
    pos, rot = pb.getBasePositionAndOrientation(handle)
    return SimulationObjectState(handle,
                                 base_pos=pos,
                                 base_rot=rot)