basic test


In [1]:
import sys
sys.path.append('../../build')
import libry as ry
import numpy as np
import time

In [2]:
#-- REAL WORLD configuration, which is attached to the physics engine
# accessing this directly would be cheating!
RealWorld = ry.Config()
RealWorld.addFile("../../scenarios/challenge.g")

In [3]:
S = RealWorld.simulation(ry.SimulatorEngine.physx, True)
S.addSensor("camera")

In [4]:
#-- MODEL WORLD configuration, this is the data structure on which you represent
# what you know about the world and compute things (controls, contacts, etc)
C = ry.Config()
#D = C.view() #rather use the ConfiguratioViewer below
C.addFile("../../scenarios/pandasTable.g")

In [5]:
#-- using the viewer, you can view configurations or paths
V = ry.ConfigurationViewer()
V.setConfiguration(C)

In [6]:
#-- the following is the simulation loop
tau = .01

for t in range(300):
    time.sleep(0.01)

    #grab sensor readings from the simulation
    q = S.get_q()
    if t%10 == 0:
            [rgb, depth] = S.getImageAndDepth()  #we don't need images with 100Hz, rendering is slow

    #some good old fashioned IK
    C.setJointState(q) #set your robot model to match the real q
    V.setConfiguration(C) #to update your model display
    [y,J] = C.evalFeature(ry.FS.position, ["R_gripper"])
    vel = J.T @ np.linalg.inv(J@J.T + 1e-2*np.eye(y.shape[0])) @ [0.,0.,-1e-1];

    #send velocity controls to the simulation
    S.step(vel, tau, ry.ControlMode.velocity)

doing things relative to an object


In [7]:
# add a new frame to the MODEL configuration
# (Perception will later have to do exactly this: add perceived objects to the model)
obj = C.addFrame("object")

In [8]:
# set frame parameters, associate a shape to the frame, 
obj.setPosition([.8,0,1.5])
obj.setQuaternion([1,0,.5,0])
obj.setShape(ry.ST.capsule, [.2,.02])
obj.setColor([1,0,1])
V.setConfiguration(C)
V.recopyMeshes(C) #this is rarely necessary, only when you change meshes within C

In [9]:
#-- the following is the simulation loop
tau = .01

for t in range(300):
    time.sleep(0.01)

    #grab sensor readings from the simulation
    q = S.get_q()
    if t%10 == 0:
            [rgb, depth] = S.getImageAndDepth()  #we don't need images with 100Hz, rendering is slow

    #some good old fashioned IK
    C.setJointState(q) #set your robot model to match the real q
    V.setConfiguration(C) #to update your model display
    [y,J] = C.evalFeature(ry.FS.positionDiff, ["R_gripperCenter", "object"])
    vel = J.T @ np.linalg.inv(J@J.T + 1e-2*np.eye(y.shape[0])) @ (-y);

    #send velocity controls to the simulation
    S.step(vel, tau, ry.ControlMode.velocity)

How could you align the gripper for a proper grasp?


In [10]:
S=0
V=0
C=0
RealWorld=0

Direct inverse Kinematics with multiple objectives


In [ ]:
import os
os._exit(0)

In [1]:
import sys
sys.path.append('../../build')
import libry as ry
import numpy as np
import time

In [2]:
RealWorld = ry.Config()
RealWorld.addFile("../../scenarios/challenge.g")

S = RealWorld.simulation(ry.SimulatorEngine.physx, True)
S.addSensor("camera")


Out[2]:
<libry.CameraViewSensor at 0x7f327898aae8>

In [3]:
C = ry.Config()
C.addFile("../../scenarios/pandasTable.g")

V = ry.ConfigurationViewer()
V.setConfiguration(C)

In [5]:
tau = .01

for t in range(300):
    time.sleep(0.01)

    q = S.get_q()
    if t%10 == 0:
            [rgb, depth] = S.getImageAndDepth()  #we don't need images with 100Hz, rendering is slow

    C.setJointState(q) #set your robot model to match the real q
    V.setConfiguration(C) #to update your model display

    #evaluate a first feature
    [y1,J1] = C.evalFeature(ry.FS.position, ["R_gripper"])
    #redefine y1 to become the desired change-of-value ("error" or "residual"); here just a constant velocity
    y1 = np.array([0.,0.,-1e-1])
    #you can multiply y1 and J1 here with some number, to increase the importance of the first feature
    
    #evaluate a second feature
    [y2,J2] = C.evalFeature(ry.FS.scalarProductYZ, ["R_gripper","world"])
    #redefine y2 to become the desired change-of-value ("error" or "residual"); here by subtracting the target
    y2 = [1.] - y2
    #you can multiply y2 and J2 here with some number, to increase the importance of the second feature

    #stack all tasks
    y = np.block([y1, y2])
    J = np.block([[J1],[J2]])
    
    vel = J.T @ np.linalg.inv(J@J.T + 1e-2*np.eye(y.shape[0])) @ y;

    #send velocity controls to the simulation
    S.step(vel, tau, ry.ControlMode.velocity)


---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-5-7d7681224da5> in <module>
     11 
     12     C.setJointState(q) #set your robot model to match the real q
---> 13     V.setConfiguration(C) #to update your model display
     14 
     15     #evaluate a first feature

KeyboardInterrupt: 

In [ ]: