In [1]:
import sys
sys.path.append('../../build')
import cv2 as cv
import numpy as np
import libry as ry
import time
print(cv.__version__)


4.2.0

In [2]:
#Let's edit the real world before we create the simulation
RealWorld = ry.Config()
RealWorld.addFile("../../scenarios/challenge.g")
V = ry.ConfigurationViewer()
V.setConfiguration(RealWorld)

In [3]:
#change some colors
RealWorld.getFrame("obj0").setColor([0,1,0])
RealWorld.getFrame("obj1").setColor([1,0,0])
RealWorld.getFrame("obj2").setColor([1,1,0])
RealWorld.getFrame("obj3").setColor([1,0,1])
RealWorld.getFrame("obj4").setColor([0,1,1])

#you can also change the shape & size
RealWorld.getFrame("obj0").setColor([1.,0,0])
RealWorld.getFrame("obj0").setShape(ry.ST.sphere, [.03])
#RealWorld.getFrame("obj0").setShape(ry.ST.ssBox, [.05, .05, .2, .01])
RealWorld.getFrame("obj0").setPosition([0., .2, 2.])

#remove some objects
for o in range(5,30):
    name = "obj%i" % o
    print("deleting", name)
    RealWorld.delFrame(name)

V.recopyMeshes(RealWorld)
V.setConfiguration(RealWorld)


deleting obj5
deleting obj6
deleting obj7
deleting obj8
deleting obj9
deleting obj10
deleting obj11
deleting obj12
deleting obj13
deleting obj14
deleting obj15
deleting obj16
deleting obj17
deleting obj18
deleting obj19
deleting obj20
deleting obj21
deleting obj22
deleting obj23
deleting obj24
deleting obj25
deleting obj26
deleting obj27
deleting obj28
deleting obj29

In [4]:
# instantiate the simulation
S = RealWorld.simulation(ry.SimulatorEngine.physx, True)
S.addSensor("camera")


Out[4]:
<libry.CameraViewSensor at 0x7f0b2bb81c00>

In [5]:
# we're adding an "imp" to the simulation, which is a little process that can inject perturbations
S.addImp(ry.ImpType.objectImpulses, ['obj0'], [])

In [6]:
# create your model world
C = ry.Config()
C.addFile('../../scenarios/pandasTable.g')
#V = ry.ConfigurationViewer()
V.setConfiguration(C)
cameraFrame = C.frame("camera")

#the focal length
f = 0.895
f = f * 360.
fxfypxpy = [f, f, 320., 180.]

In [ ]:
points = []
tau = .01

for t in range(300):
    time.sleep(0.01)

    #grab sensor readings from the simulation
    q = S.get_q()
    if t%10 == 0:
        [rgb, depth] = S.getImageAndDepth()  #we don't need images with 100Hz, rendering is slow
        points = S.depthData2pointCloud(depth, fxfypxpy)
        cameraFrame.setPointCloud(points, rgb)
        V.recopyMeshes(C)
        V.setConfiguration(C)
            
        if len(rgb)>0: cv.imshow('OPENCV - rgb', rgb)
        if len(depth)>0: cv.imshow('OPENCV - depth', 0.5* depth)

        if cv.waitKey(1) & 0xFF == ord('q'):
            break
        
    S.step([], tau, ry.ControlMode.none)

In [10]:
cv.destroyAllWindows()

In [ ]: