Commit 612a7d34 authored by hazaa's avatar hazaa

added mjpeg-server

parent d447d4ac
#!/usr/bin/env python
from __future__ import division
from __future__ import print_function
import cv2 # OpenCV camera
import time # sleep
import platform # determine linux or darwin (OSX)
import os # check for travis.ci environment
# travis-ci has a fit ... trying to get around it
# if platform.system().lower() == 'linux' and 'TRAVIS' not in os.environ:
# import picamera
# import picamera.array
# else:
# # from .fake_hw import picamera
# from fake_rpi import picamera
class VideoError(Exception):
pass
class SaveVideo(object):
"""
Simple class to save frames to video (mp4v)
macOS: avc1
windows: h264?
"""
def __init__(self):
self.out = None
self.encoder('avc1')
def open(self, filename, width, height, fps=30):
self.out = cv2.VideoWriter()
self.out.open(filename, self.mpg4, fps, (width,height))
def encoder(self, fourcc):
try:
self.mpg4 = cv2.VideoWriter_fourcc(*fourcc)
except Exception as err:
print(err)
print('Please select another encoder for your platform')
raise
def __del__(self):
self.release()
def write(self, image):
self.out.write(image)
def release(self):
if self.out:
self.out.release()
# class VideoPublisher(object):
# """
# """
# def __init__(self):
# pass
CAMERA_PI = 0
CAMERA_CV = 1
CAMERA_VIDEO = 2 # useful??
kind = {
0: 'pi',
1: 'cv',
2: 'video'
}
class CameraPi(object):
ctype = CAMERA_PI
def __init__(self):
# if platform.system().lower() == 'linux' and 'TRAVIS' not in os.environ:
try:
import picamera
import picamera.array
except ImportError:
from fake_rpi import picamera
self.camera = picamera.PiCamera()
def __del__(self):
# the red light should shut off
self.close()
def close(self):
self.camera.close()
print('exiting CameraPi ... bye!')
def init(self, win, cameraNumber, fileName, calibration):
# self.camera.vflip = True # camera is mounted upside down
self.camera.resolution = win
self.bgr = picamera.array.PiRGBArray(self.camera, size=win)
if calibration:
self.cal = calibration
def read(self):
self.camera.capture(self.bgr, format='bgr', use_video_port=True)
tmp = self.bgr.array
img = tmp.copy()
self.bgr.truncate(0) # clear stream
return True, img
def isOpen(self):
return True # FIXME 2016-05-15
def type(self):
return self.ctype
# def get(self, display=True):
# if display:
# print('-----------------')
# print('Pi Camera')
# print('-----------------')
#
# return {'type': 'PiCamera'}
class CameraCV(object):
ctype = CAMERA_CV
def __init__(self):
self.camera = cv2.VideoCapture()
def __del__(self):
self.close()
def close(self):
self.camera.release()
print('exiting CameraCV ... bye!')
def init(self, win=None, cameraNumber=None, fileName=None, calibration=None):
# print('win', win)
if (cameraNumber or cameraNumber == 0) and not fileName:
live = True
port = cameraNumber
elif fileName:
live = False
port = fileName
else:
raise VideoError('CameraCV::init() must set cameraNumber OR fileName')
self.camera.open(port)
time.sleep(1) # let camera warm-up
if live:
# print('setting win size to:', win)
self.camera.set(3, win[0])
self.camera.set(4, win[1])
if calibration:
self.cal = calibration
def read(self):
ret, img = self.camera.read()
if not ret:
return False, None
return True, img
def isOpen(self):
return self.camera.isOpened()
def type(self):
return self.ctype
# def get(self, display=True):
# if display:
# print('-----------------')
# print('OpenCV Camera')
# print('-----------------')
#
# return {'type': 'OpenCV', 'number': 0, 'size': (0, 0)}
class Camera(object):
"""
Generic camera object that can switch between OpenCV in PiCamera. This can
also handle reading mp4's using OpenCV too.
"""
camera = None
gray = False
def __init__(self, cam='cv'):
"""
Constructor
Sets up the camera either for OpenCV camera or PiCamera. If nothing is
passed in, then it determines the operating system and picks which
camera to use.
types:
pi - PiCamera
cv - an OpenCV camera
video - an mjpeg video clip to read from
default:
linux: PiCamera
OSX: OpenCV
in: type: cv video, or pi
out: None
"""
self.cal = None
# sys = platform.system().lower() # grab OS name and make lower case
if cam == 'pi':
self.camera = CameraPi()
elif cam == 'cv' or cam == 'video':
self.camera = CameraCV()
else:
raise VideoError('Error, {0!s} not supported'.format((cam)))
# print('[+] Camera type {}'.format(self.camera.type()))
def __del__(self):
self.close()
def close(self):
self.camera.close()
def init(self, win=(640, 480), cameraNumber=None, fileName=None, calibration=None):
"""
Initialize the camera and set the image size
in: image size (tuple (width,height), cameraNumber, calibration)
out: None
"""
self.camera.init(win=win, cameraNumber=cameraNumber, fileName=fileName, calibration=calibration)
def read(self):
"""
Reads a gray scale image
in: None
out: cv image (numpy array) in grayscale
"""
ret, img = self.camera.read()
if self.cal and ret: # FIXME 2016-05-15
print('do calibration correction ... not done yet')
if self.gray and ret:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return ret, img
def isOpen(self):
"""
Determines if the camera is opened or not
in: None
out: True/False
"""
return self.camera.isOpen()
# -*- coding: utf-8
#!/usr/bin/env python
# https://walchko.github.io/blog/Vision/Python-MJpeg-Streamer.html
# MIT License
# (c) 2017 Kevin J. Walchko
from __future__ import print_function
import cv2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import time
import argparse
#from opencvutils import Camera #AH
from Camera import Camera #AH
import socket as Socket
VERSION = 0.9 #AH
#from opencvutils import __version__ as VERSION
# import errno
import os
import re
# I use to do 0.0.0.0 to bind to all interfaces, but that seemed to be really
# slow. feeding it the correct ip address seems to greatly speed things up.
camera = None
def getIP(iface):
#search_str = 'ip addr show wlan0'.format(iface) # AH
search_str = 'ip addr show '+iface #AH
ipv4 = re.search(re.compile(r'(?<=inet )(.*)(?=\/)', re.M), os.popen(search_str).read()).groups()[0]
ipv6 = '' # AH
#ipv6 = re.search(re.compile(r'(?<=inet6 )(.*)(?=\/)', re.M), os.popen(search_str).read()).groups()[0]
return (ipv4, ipv6)
def setUpCameraPi(win=(320, 240)):
global camera
camera = Camera('pi')
camera.init(win=win)
def setUpCameraCV(win=(320, 240), cv=0):
global camera
camera = Camera('cv')
camera.init(cameraNumber=cv, win=win)
def compress(orig, comp):
return float(orig) / float(comp)
class mjpgServer(BaseHTTPRequestHandler):
"""
A simple mjpeg server that either publishes images directly from a camera
or republishes images from another pygecko process.
"""
ip = None
hostname = None
def do_GET(self):
global camera
print('connection from:', self.address_string())
if self.ip is None or self.hostname is None:
self.ip, _ = getIP('wlan0')
self.hostname = Socket.gethostname()
if self.path == '/mjpg':
self.send_response(200)
self.send_header(
'Content-type',
'multipart/x-mixed-replace; boundary=--jpgboundary'
)
self.end_headers()
while True:
if camera:
# print('cam')
ret, img = camera.read()
else:
raise Exception('Error, camera not setup')
if not ret:
print('no image from camera')
time.sleep(1)
continue
ret, jpg = cv2.imencode('.jpg', img)
# print 'Compression ratio: %d4.0:1'%(compress(img.size,jpg.size))
self.wfile.write("--jpgboundary")
self.send_header('Content-type', 'image/jpeg')
# self.send_header('Content-length',str(tmpFile.len))
self.send_header('Content-length', str(jpg.size))
self.end_headers()
self.wfile.write(jpg.tostring())
# time.sleep(0.05)
elif self.path == '/':
# hn = self.server.server_address[0]
port = self.server.server_address[1]
ip = self.ip
hostname = self.hostname
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s}[{1!s}]:{2!s}</h1>'.format(hostname, ip, port))
self.wfile.write('<img src="http://{}:{}/mjpg"/>'.format(ip, port))
self.wfile.write('<p>{0!s}</p>'.format((self.version_string())))
# self.wfile.write('<p>The mjpg stream can be accessed directly at:<ul>')
# self.wfile.write('<li>http://{0!s}:{1!s}/mjpg</li>'.format(ip, port))
# self.wfile.write('<li><a href="http://{0!s}:{1!s}/mjpg"/>http://{0!s}:{1!s}/mjpg</a></li>'.format(hostname, port))
# self.wfile.write('</p></ul>')
self.wfile.write('<p>This only handles one connection at a time</p>')
self.wfile.write('</body></html>')
else:
print('error', self.path)
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<h1>{0!s} not found</h1>'.format(self.path))
self.wfile.write('</body></html>')
def handleArgs():
parser = argparse.ArgumentParser(version=VERSION, description='A simple mjpeg server Example: mjpeg-server -p 8080 --camera 4')
parser.add_argument('-p', '--port', help='mjpeg publisher port, default is 9000', type=int, default=9000)
parser.add_argument('-c', '--camera', help='set opencv camera number, ex. -c 1', type=int, default=0)
parser.add_argument('-t', '--type', help='set camera type, either pi or cv, ex. -t pi', default='cv')
parser.add_argument('-s', '--size', help='set size', nargs=2, type=int, default=(320, 240))
args = vars(parser.parse_args())
args['size'] = (args['size'][0], args['size'][1])
return args
def main():
args = handleArgs()
#iface = 'wlan0' #AH
iface = 'eth0' #AH
try:
win = args['size']
if args['type'] is 'cv':
cv = args['camera']
setUpCameraCV(cv=cv, win=win)
else:
setUpCameraPi(win=win)
# server = HTTPServer(('0.0.0.0', args['port']), mjpgServer)
ipv4, ipv6 = getIP(iface)
print(iface, ipv4)
mjpgServer.ip = ipv4
mjpgServer.hostname = Socket.gethostname()
server = HTTPServer((ipv4, args['port']), mjpgServer)
print("server started on {}:{}".format(Socket.gethostname(), args['port']))
server.serve_forever()
except KeyboardInterrupt:
print('KeyboardInterrupt')
server.socket.close()
if __name__ == '__main__':
main()
......@@ -25,11 +25,13 @@ Ils sont presque toujours composés des éléments suivants, comme dans la figur
Sujets 2018-2019:
* TP [Raspberry Pi](docs/tp-raspi.md)
* Révision: TP [Arduino](tp-arduino.md)
* Robot de téléprésence: depuis un navigateur, on veut piloter le robot, voir ce que filme sa caméra, visualiser la vitesse de rotation de roues (BONUS: et le courant qui traverse chaque moteur).
* Pilotage de moteur à courant continu avec [encodeur à quadrature v2](docs/moteur_cc_encodeur_quadrature_2.md)
* serveur streaming video [reefwingrobotics](http://reefwingrobotics.blogspot.com/2017/03/streaming-video-from-raspberry-pi-camera.html), serveur streaming + commande [reefwingrobotics](http://reefwingrobotics.blogspot.com/2017/03/controlling-raspberry-pi-via-web-browser.html), [blog.miguelgrinberg](https://blog.miguelgrinberg.com/post/stream-video-from-the-raspberry-pi-camera-to-web-browsers-even-on-ios-and-android). BONUS: [real-time-object-detection](https://hackernoon.com/tensorflow-js-real-time-object-detection-in-10-lines-of-code-baf15dfb95b2)
* TP [Raspberry Pi](docs/tp-raspi.md)
* Révision: [TP Arduino](tp-arduino.md)
* Révision: [TP Python](tp-python.md)
* Robot de téléprésence: depuis un navigateur, on veut piloter le robot, voir ce que filme sa caméra, visualiser la vitesse de rotation de roues (BONUS: et le courant qui traverse chaque moteur).
* serveur streaming video [TP téléprésence](tp-telepresence-streaming.md)
* (Pilotage de moteur à courant continu avec [encodeur à quadrature v2](docs/moteur_cc_encodeur_quadrature_2.md) )
Sujets 2017-2018:
......
......@@ -148,9 +148,11 @@ Pré-requis: [TP Arduino](tp-arduino.md)
Dans cette partie on veut commander l'arduino depuis la raspberry pi.
Dialogue série: faites la partie correspondante dans le TP [Arduino](tp-arduino.md)
Solution 1 : faites la partie Dialogue série dans le TP [Arduino](tp-arduino.md)
Une autre solution est d'utiliser l'arduino en mode "slave" avec une librairie telle que nanpy:
Solution 2 : utiliser l'arduino en mode "slave" avec une librairie telle que nanpy.
Le programme Nanpy.io est installé sur l'arduino, et ne doit pas être modifié.
Nanpy.io permet à Python de "prendre le contrôle" de l'arduino de manière transparente.
1. Flasher nanpy-firmware. Pour cela, depuis le logiciel de programmation de l’arduino, cliquer sur Fichier/Ouvrir, et sélectionner Nanpy.ino, qui se trouve dans /home/mctr/local/bin/nanpy-firmware/Nanpy.
1. Sur la Raspberry, ouvrir un terminal et lancer python en mode interactif. Taper :
......
Robot de téléprésence
======
Un robot de téléprésence est commandé à distance. Il est muni d'une caméra qui
permet à l'opérateur de visualiser l'environnement.
Serveur de streaming
------
Sur la raspberry pi on fait tourner un serveur web, écrit en Python.
Ce serveur web affiche le flux vidéo capté par la camera du raspi.
L'opérateur se trouve sur une autre machine (machine de bureau) sur le même réseau
que la raspi.
Il se connecte sur le serveur.
1. Copiez sur votre raspi les fichiers mjpeg_server_base.py et Camera.py à partir de [code/UI/mjpeg-server](../code/UI/mjpeg-server)
1. Connectez-vous sur la raspi et lancez le serveur mjpeg_server_base.py
1. Sur une machine de bureau, connectez-vous sur le serveur.
Liens:
* [reefwingrobotics](http://reefwingrobotics.blogspot.com/2017/03/streaming-video-from-raspberry-pi-camera.html),
Serveur de commande à distance du robot
------
Dans cette partie on crée un serveur qui permet de commander le robot.
Modifiez le code côté serveur [code/UI/cgi](../code/UI/cgi) pour ajouter sur la page web:
1. un bouton qui permet de lancer un print côté serveur.
1. un bouton qui permet d'allumer une led sur l'arduino.
1. un bouton qui permet de faire tourner un moteur sur l'arduino (rotation dans un seul sens).
1. des boutons qui permettent de commander les deux moteurs du robot (rotation de chaque moteur dans un seul sens).
1. une information sur l'état du robot: état d'un pin digital 0/1, état d'un pin analogique.
Liens:
* [reefwingrobotics](http://reefwingrobotics.blogspot.com/2017/03/controlling-raspberry-pi-via-web-browser.html)
* ([blog.miguelgrinberg](https://blog.miguelgrinberg.com/post/stream-video-from-the-raspberry-pi-camera-to-web-browsers-even-on-ios-and-android) ).
Serveur de streaming + commande à distance du robot
------
En utilisant ce qui précède, codez un serveur qui permet à la fois de streamer et de commander le robot.
Liens
------
* [mjpeg_streamer.py](https://walchko.github.io/blog/Vision/Python-MJpeg-Streamer.html): comparatif BaseHTTPServer/Flask/bottle. problem: bottle dépend de gevent
* https://github.com/MomsFriendlyRobotCompany/opencvutils
* https://bottlepy.org/docs/dev/ https://github.com/bottlepy/bottle/
BONUS:
------
* tensorflow.js dans le browser: [real-time-object-detection](https://hackernoon.com/tensorflow-js-real-time-object-detection-in-10-lines-of-code-baf15dfb95b2)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment