# Example One, getting frames off a webcam.
# basic_webcam.py
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
cv2.imshow('Basic Web Cam',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# Example two, advance camera properties.
import numpy as np
import cv2
import time
pass
# create a map to keep track of all these names
prop_map = {
"pos_msec":cv2.cv.CV_CAP_PROP_POS_MSEC,
"pos_frame":cv2.cv.CV_CAP_PROP_POS_FRAMES,
"avi_ratio":cv2.cv.CV_CAP_PROP_POS_AVI_RATIO ,
"width":cv2.cv.CV_CAP_PROP_FRAME_WIDTH ,
"height":cv2.cv.CV_CAP_PROP_FRAME_HEIGHT ,
"fps":cv2.cv.CV_CAP_PROP_FPS ,
"fourcc":cv2.cv.CV_CAP_PROP_FOURCC ,
"frame_count":cv2.cv.CV_CAP_PROP_FRAME_COUNT,
"format":cv2.cv.CV_CAP_PROP_FORMAT ,
"mode":cv2.cv.CV_CAP_PROP_MODE ,
"brightness":cv2.cv.CV_CAP_PROP_BRIGHTNESS ,
"contrast":cv2.cv.CV_CAP_PROP_CONTRAST ,
"saturation":cv2.cv.CV_CAP_PROP_SATURATION,
"hue":cv2.cv.CV_CAP_PROP_HUE ,
"gain":cv2.cv.CV_CAP_PROP_GAIN ,
"exposure":cv2.cv.CV_CAP_PROP_EXPOSURE ,
"convert_rgb":cv2.cv.CV_CAP_PROP_CONVERT_RGB ,
# "white_balance":cv2.cv.CV_CAP_PROP_WHITE_BALANCE ,
"rectification":cv2.cv.CV_CAP_PROP_RECTIFICATION}
# get a camera property
def get_prop(cam,name,prop_map):
return cam.get(prop_map[name])
# set a camera property
def set_prop(cam,name,prop_map,value):
cam.set(prop_map[name],value)
# print out all of the properites
def poll_props(cam,prop_map):
out_map = {}
for k,v in prop_map.items():
result = cam.get(v)
if( result == -1.0 ):
out_map[k] = None
else:
out_map[k] = result
return out_map
# create a camera and get its property
cam = cv2.VideoCapture(0)
properties = poll_props(cam,prop_map)
# list our properties
for k,v in properties.items():
print "{0:<12}\t:{1:<12}".format(k,v)
while(True):
# toggle properties and get results.
sat = get_prop(cam,"saturation",prop_map)
if( sat > 0.5 ):
set_prop(cam,"saturation",prop_map,0.1)
else:
set_prop(cam,"saturation",prop_map,1.0)
time.sleep(0.05)
ret, frame = cam.read()
cv2.imshow('Basic Web Cam',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# put our toys back on the shelf
cam.release()
cv2.destroyAllWindows()
In [1]:
from goprohero import GoProHero # helper lib
import urllib # used to "wget" files
import bs4 # beautiful soup, to parse file
import time
from IPython.display import Image
def download_and_save(name,route="http://10.5.5.9:8080/videos/DCIM/100GOPRO/"):
"""
Download a specific file on a connected GoPro camera.
"""
grab = route+name
result = urllib.urlopen(grab) # basically wget
if( result.code == 200 ): # 200 means OK
with open(name,'wb') as fp:
fp.write(result.read()) # write to file
result.close()
def get_new(last=[],url="http://10.5.5.9:8080/videos/DCIM/100GOPRO/"):
"""
Find files recently added to our go pro. Requires a list of files
"""
unique = None
last = set(last)
out = urllib.urlopen(url)
if(out.code == 200):
soup = bs4.BeautifulSoup(out.read()) # read the file tree
# make a set of all <a href> in the tree
fresh = set([row.renderContents() for row in soup.findAll('a')])
# do a set difference with our initial set
unique = list(fresh.difference(last))
return unique
In [5]:
# check the camera file system
out = get_new()
# setup our controller
cam = GoProHero(password="herpderp")
# put the camera in burst mode
cam.command('mode','burst')
# tell it to take a shoth
cam.command('record','on')
# wait to finish
time.sleep(3)
# get the new files
out = get_new(last=out)
for o in out:
print o
download_and_save(o)
In [ ]:
Image(out[0])
"""
Read and display a simple MJPEG stream given a URL
"""
import cv2
import urllib
import numpy as np
# open our url stream
stream=urllib.urlopen('http://192.168.1.228:8080/video')
# create a buffer of bytes
bytes=''
invert = False
while True:
# read some bytes from the stream
# how much to read is a matter of taste
bytes+=stream.read(1024*8)
# \xff\xd8 is the start of a jpg
# \xff\xd9 is the end of a jpg
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
# if we find them
if a!=-1 and b!=-1:
jpg = bytes[a:b+2] # create a buffer
bytes= bytes[b+2:] # save the remaining bytes
#numpy can handle strings as image data
img = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)
# Add some bling to show we can process
if invert:
img = 255-img
# show our image
cv2.imshow("live_and_direct",img)
# MVP keyboard input
my_key = cv2.waitKey(1)
if my_key & 0xFF == ord('q'):
break
if my_key & 0xFF == ord('i'):
invert = not invert
# put our toys away when done.
stream.close()
cv2.destroyAllWindows()
import cv2
import numpy as np
import time
import livestreamer
# use live streamer to figure out the stream info
streams = livestreamer.streams("http://www.youtube.com/watch?v=-JcaDowz2G8&ab_channel=HopeCollegeAthletics")
stream = streams['best']
# open our out file.
fname = "test.mpg"
vid_file = open(fname,"wb")
# dump from the stream into an mpg file -- get a buffer going
fd = stream.open()
for i in range(0,1048):
if i%256==0:
print "Buffering..."
new_bytes = fd.read(1024)
vid_file.write(new_bytes)
# open the video file from the begining
print "Done buffering."
cam = cv2.VideoCapture(fname)
while True:
ret, img = cam.read()
try:
if ret:
img = 255-img # invert the colors
cv2.imshow('live_img',img)
except:
print "DERP"
continue
if (0xFF & cv2.waitKey(5) == 27) or img.size == 0:
break
time.sleep(0.05)
# dump some more data to the stream so we don't run out.
new_bytes = fd.read(1024*8)
vid_file.write(new_bytes)
vid_file.close()
fd.close()
del fd
del vid_file
del new_bytes
import logging
import os
import subprocess
import sys
import gphoto2 as gp
import time
def setup():
"""
Attempt to attach to a gphoto device and grab the camera and context. Return the results.
"""
logging.basicConfig(
format='%(levelname)s: %(name)s: %(message)s', level=logging.WARNING)
gp.check_result(gp.use_python_logging())
context = gp.gp_context_new()
camera = gp.check_result(gp.gp_camera_new())
gp.check_result(gp.gp_camera_init(camera, context))
text = gp.check_result(gp.gp_camera_get_summary(camera, context))
print text.text
return camera,context
def recurse_config(child,params={}):
"""
The gphoto control structure is a byzantine swig structure.
This function traverses it recursively and puts it in a
nice python dictionary.
"""
if(child.count_children() <= 0):
my_choices = []
try:
n = child.count_choices()
if( n > 0 ):
for k in range(0,n):
my_choices.append(child.get_choice(int(k)))
except:
return my_choices
return my_choices
else:
for i in range(0, child.count_children()):
chill = child.get_child(i)
name = chill.get_name()
params[name] = recurse_config(chill,{})
return params
def print_config_dict(cdict,lvl=""):
"""
Print a the python config dictionary for a camera.
"""
if isinstance(cdict,dict):
for k,v in cdict.items():
print "{0}{1}".format(lvl,k)
print_config_dict(v,lvl+"\t")
elif isinstance(cdict,list):
for l in cdict:
print "{0}{1}".format(lvl,l)
return
def set_config(camera,context,config,path,value):
"""
Given a gphoto camera, context, and config
traverse the path of the config tree and set
a parameter value. The path is basically the nodes
to address in the control structure. Once the config
object has been modified we have to set it on the camera.
"""
current = config
for p in path:
current = current.get_child_by_name(p)
current.acquire()
current.set_value(value)
current.disown()
gp.check_result(gp.gp_camera_set_config(camera,config, context))
print "Set {0} to {1}".format(current.get_name(),current.get_value())
def capture_image(camera,context,name):
"""
Use gphoto to capture an image and retrieve it.
Place the file in /tmp/name
"""
file_path = gp.check_result(gp.gp_camera_capture(
camera, gp.GP_CAPTURE_IMAGE, context))
target = os.path.join('/tmp', name)
print 'Copying image to {0}'.format(target)
camera_file = gp.check_result(gp.gp_camera_file_get(
camera, file_path.folder, file_path.name,
gp.GP_FILE_TYPE_NORMAL, context))
gp.check_result(gp.gp_file_save(camera_file, target))
gp.check_result(gp.gp_camera_exit(camera, context))
def main():
# set up our camera.
camera,context = setup()
# grab a single test image.
capture_image(camera,context,"crap.jpg")
# Get the configuration of the camera
config = gp.check_result(gp.gp_camera_get_config(camera, context))
# Pythonify and print the configuration of the camera so
# we can see what parameters we can play with.
pconfig = recurse_config(config)
print_config_dict(pconfig)
# Put the camera in AV mode, or aperture priority.
# Camera needs this to fiddle with aperture.
set_config(camera,context,config,["capturesettings","autoexposuremode"],"AV")
count = 0
# for all of the available aperture settings...
for param in pconfig["capturesettings"]["aperture"]:
# get the camera configuration
config = gp.check_result(gp.gp_camera_get_config(camera, context))
# set the new configuration
set_config(camera,context,config,["capturesettings","aperture"],param)
# and capture an image.
fname = "Capture{:0>5}.jpg".format(count)
capture_image(camera,context,fname)
count += 1
if __name__ == "__main__":
sys.exit(main())
#!/usr/bin/env python
import rospy
# This is the tool that marshals images into OpenCV
from cv_bridge import CvBridge, CvBridgeError
# Import some stock ROS message types.
from sensor_msgs.msg import Image
# import some utils.
import numpy as np
import cv
import SimpleCV as scv
import copy as copy
class ProcessDepth:
def __init__(self):
# Allows conversion between numpy arrays and ROS sensor_msgs/Image
self.bridge = CvBridge()
# Allow our topics to be dynamic.
self.input_camera_topic = rospy.get_param('~input_camera_topic', '/camera/depth/image_rect')
self.output_camera_topic = rospy.get_param('~output_camera_topic', '/processed')
# WE are going to publish a debug image as it comes in.
self.pub = rospy.Publisher(self.output_camera_topic, Image,queue_size=10)
rospy.Subscriber(self.input_camera_topic, Image, self._process_depth_img)
# run the node
self._run()
# Keep the node alive
def _run(self):
rospy.spin()
def _process_depth_img(self,input):
# convert our image to CV2 numpy format from ROS format
latest_image = self.bridge.imgmsg_to_cv2(input)
if( latest_image is not None ):
try:
# convert the image to SimpleCV
# The input image is single channel float and we want rgb uint8
# it is also full of nasty nans. We get the min and max and scale
# the image from [0,flt_max] to [0,255]
dmin = np.nanmin(latest_image)
dmax = np.nanmax(latest_image)
latest_image = latest_image - dmin
sf = 255.0/(dmax-dmin)
latest_image = sf*latest_image
# Convert to uint8
temp = latest_image.astype(np.uint8)
# move to SimpleCV RGB
img = scv.Image(temp, cv2image=True, colorSpace=scv.ColorSpace.RGB)
# get values less than 128
lt = img.threshold(128).invert()
# get values greater than 64
gt = img.threshold(64)
# do the logical and of the two depths
range = lt*gt
# apply the mask to the input image
blobs = img.findBlobsFromMask(mask=range)
# draw the results.
if( blobs ):
blobs.draw(color=scv.Color.RED,width=-1)
img = img.applyLayers()
# convert SimpleCV to CV2 Numpy Format
cv2img = img.getNumpyCv2()
# Convert Cv2 numpy to ROS format
img_msg = self.bridge.cv2_to_imgmsg(cv2img, "bgr8")
# publish the topic.
self.pub.publish(self.bridge.cv2_to_imgmsg(cv2img, "bgr8"))
except CvBridgeError, e:
rospy.logwarn("PROCESSING EXCEPTION {0}".format(e))
# Boilerplate node spin up.
if __name__ == '__main__':
try:
rospy.init_node('ProcessDepth')
p = ProcessDepth()
except rospy.ROSInterruptException:
pass
In [ ]: