How to use the pybullet.connect function in pybullet

To help you get started, we’ve selected a few pybullet examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github FlorianWilk / SpotMicroAI / Core / spotmicroai.py View on Github external
self.fixedTimeStep = 1. / 550
        self.numSolverIterations = 200
        self.useFixedBase =useFixedBase
        self.useStairs=useStairs

        self.init_oritentation=p.getQuaternionFromEuler([0, 0, 90.0])
        self.init_position=[0, 0, 0.3]

        self.reflection=False
        self.state=RobotState.OFF
        # Parameters for Servos - still wrong
        self.kp = 0.045#0.012
        self.kd = .4#.2
        self.maxForce = 12.5

        self.physId = p.connect(p.SHARED_MEMORY)
        if (self.physId < 0):
            p.connect(p.GUI)
        self.angle = 90

        self.oldTextId=0
        self.textId=0
        self.oldDebugInfo=[]
        self.rot=(0,0,0)
        self.pos=(0,0,0)
        self.t=0
        if self.reflection:
            p.configureDebugVisualizer(p.COV_ENABLE_PLANAR_reflection, 1)
        p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER, 1)
        self.IDkp = p.addUserDebugParameter("Kp", 0, 0.05, self.kp) # 0.05
        self.IDkd = p.addUserDebugParameter("Kd", 0, 1, self.kd) # 0.5
        self.IDmaxForce = p.addUserDebugParameter("MaxForce", 0, 50, 12.5)
github StanfordVL / GibsonEnvV2 / gibson2 / core / simulator.py View on Github external
self.renderer = MeshRendererG2G(width=self.resolution,
                                         height=self.resolution,
                                         fov=self.fov,
                                         device_idx=self.device_idx,
                                         use_fisheye=self.use_fisheye)
        else:
            self.renderer = MeshRenderer(width=self.resolution,
                                     height=self.resolution,
                                     fov=self.fov,
                                     device_idx=self.device_idx,
                                     use_fisheye=self.use_fisheye)

        if self.mode == 'gui':
            self.cid = p.connect(p.GUI)
        else:
            self.cid = p.connect(p.DIRECT)
        p.setTimeStep(self.timestep)
        p.setGravity(0, 0, -self.gravity)

        if self.mode == 'gui' and not self.render_to_tensor:
            self.add_viewer()

        self.visual_objects = {}
        self.robots = []
        self.scene = None
        self.objects = []
github StanfordVL / NTP-vat-release / vat / simulation / bullet / bullet_world.py View on Github external
self.view_matrix = p.computeViewMatrix(*view_matrix)
        self.projection_matrix = p.computeProjectionMatrixFOV(
            fov, aspect, near, far)

        self.video_log_key = 0

        # Connect to the simulation
        # TODO(Kuan): If VR
        #     p.connect(p.SHARED_MEMORY)
        if self._display:
            p.connect(p.GUI)
        else:
            if key is None:
                p.connect(p.DIRECT)
            else:
                p.connect(p.DIRECT, key=key)
github FlorianWilk / SpotMicroAI / Simulation / quadru.py View on Github external
import pybullet as p
cin = p.connect(p.SHARED_MEMORY)
if (cin < 0):
    cin = p.connect(p.GUI)
objects = [p.loadURDF("plane.urdf", 0.000000,0.000000,0.000000,0.000000,0.000000,0.000000,1.000000)]
objects = [p.loadURDF("../urdf/spotmicroai_gen.urdf.xml", 1.019864,-0.976667,0.129847,0.041700,-0.031063,0.154790,0.986578)]
objects = p.loadMJCF("../urdf/spotmicroai_gen.urdf.xml")
ob = objects[0]
p.resetBasePositionAndOrientation(ob,[1.019864,-0.976667,0.129847],[0.041700,-0.031063,0.154790,0.986578])
jointPositions=[ 0.000000, 0.000000, -0.268521, -1.499458, 2.470289, 0.000000, 0.049039, -1.499396, 2.469628, 0.000000, -0.036335, -1.499316, 2.472407, 0.000000, -0.249185, -1.498936, 2.475876, 0.000000 ]
for jointIndex in range (p.getNumJoints(ob)):
	p.resetJointState(ob,jointIndex,jointPositions[jointIndex])

p.setGravity(0.000000,0.000000,-9.810000)
p.stepSimulation()
p.disconnect()
github FlorianWilk / SpotMicroAI / Simulation / simulation.py View on Github external
fixedTimeStep = 1. / 100
numSolverIterations = 50


speed = 10
amplitude = 0.8
jump_amp = 0.5
maxForce = 3.5
kneeFrictionForce = 0
kp = 1
kd = .5
maxKneeForce = 1000

physId = p.connect(p.SHARED_MEMORY)
if (physId < 0):
  p.connect(p.GUI)
#p.resetSimulation()

angle = 0  # pick in range 0..0.2 radians
orn = p.getQuaternionFromEuler([0, angle, 0])
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.loadURDF("plane.urdf", [0, 0, 0], orn)
p.setPhysicsEngineParameter(numSolverIterations=numSolverIterations)
p.startStateLogging(p.STATE_LOGGING_GENERIC_ROBOT,
                    "genericlogdata.bin",
                    maxLogDof=16,
                    logFlags=p.STATE_LOG_JOINT_TORQUES)
p.setTimeOut(4000000)

p.setGravity(0, 0, -9.81)
p.setTimeStep(fixedTimeStep)
github benelot / bullet-gym / pybulletgym / Trainer.py View on Github external
def __init__(self):
		# initialize random seed
		np.random.seed(int(time.time()))

		cid = p.connect(p.SHARED_MEMORY) # only show graphics if the browser is already running....
		self.visualize = (cid >= 0)
		if cid < 0:
			cid = p.connect(p.DIRECT)  # If no shared memory browser is active, we switch to headless mode (DIRECT is much faster)
github StanfordVL / GibsonEnvV2 / realenv / core / physics / render_physics.py View on Github external
def __init__(self, obj_path, framePerSec, debug, human):
        context = zmq.Context()
        self.visn_socket = context.socket(zmq.REQ)
        self.visn_socket.bind("tcp://*:5556")
        
        self.debug_sliders = {}

        if debug:
            p.connect(p.GUI)
            p.configureDebugVisualizer(p.COV_ENABLE_KEYBOARD_SHORTCUTS, 0)
            self._startDebugRoomMap()
        else:
            # Headless training mode
            p.connect(p.DIRECT)

        p.setRealTimeSimulation(0)

        collisionId = p.createCollisionShape(p.GEOM_MESH, fileName=obj_path, meshScale=[1, 1, 1], flags=p.GEOM_FORCE_CONCAVE_TRIMESH)

        if debug:
            visualId = p.createVisualShape(p.GEOM_MESH, fileName=obj_path, meshScale=[1, 1, 1], rgbaColor = [1, 0.2, 0.2, 0.3], specularColor=[0.4, 4.0])
            boundaryUid = p.createMultiBody(baseCollisionShapeIndex = collisionId, baseVisualShapeIndex = visualId)
            print("Exterior boundary", boundaryUid)
            p.changeVisualShape(boundaryUid, -1, rgbaColor=[1, 0.2, 0.2, 0.3], specularColor=[1, 1, 1])
            #p.changeVisualShape(visualId, -1, rgbaColor=[1, 0.2, 0.2, 0.3])
        else:
            visualId = 0

        #p.setGravity(0,0,-10)
        p.setRealTimeSimulation(0)
github araffin / robotics-rl-srl / environments / KukaObjects.py View on Github external
self._cam_yaw = 180
        self._cam_pitch = -40
        self._dv = dv
        self._p = p
        self._removeHeightHack = removeHeightHack
        self._blockRandom = blockRandom
        self._cameraRandom = cameraRandom
        self._width = width
        self._height = height
        self._numObjects = numObjects
        self._isTest = isTest

        if self._renders:
            self.cid = p.connect(p.SHARED_MEMORY)
            if self.cid < 0:
                self.cid = p.connect(p.GUI)
            p.resetDebugVisualizerCamera(1.3, 180, -41, [0.52, -0.2, -0.33])
        else:
            self.cid = p.connect(p.DIRECT)
        self._seed()

        if self._isDiscrete:
            if self._removeHeightHack:
                self.action_space = spaces.Discrete(9)
            else:
                self.action_space = spaces.Discrete(7)
        else:
            self.action_space = spaces.Box(low=-1, high=1, shape=(3,))  # dx, dy, da
            if self._removeHeightHack:
                self.action_space = spaces.Box(low=-1,
                                               high=1,
                                               shape=(4,))  # dx, dy, dz, da