...
 
Commits (5)
import argparse
import tflib_lite
from rpi import pilib
from rpi import devel, prepare
parser = argparse.ArgumentParser(description='Run the Smiling Robot on device or in test mode')
parser.add_argument('--dry_run', action='store_true', help='evaluate the smile bot on the images in the captures/ folder')
parser.add_argument('--use_vj_detector', action='store_true', help='use Viola-Jones face detector to prepare')
args = parser.parse_args()
provider = prepare.Provider(args.use_vj_detector)
pin_config = pilib.DEFAULT_PINS
buttons = pilib.Buttons(pin_config)
camera = pilib.Camera(debug=True)
if args.dry_run:
camera = devel.FakeCamera()
buttons = devel.EvaluateFolder('captures/', camera)
else:
from rpi import pilib
pin_config = pilib.DEFAULT_PINS
buttons = pilib.Buttons(pin_config)
camera = pilib.Camera(debug=True)
#inference = tflib.Inference('', on_true=buttons.yes(), on_false=buttons.no(), on_error=buttons.err())
#inference = pilib.FakeInference('', on_true=buttons.yes(), on_false=buttons.no(), on_error=buttons.err())
......
absl-py==0.7.1
alabaster==0.7.12
albumentations==0.2.3
anaconda-client==1.7.2
anaconda-project==0.8.2
asn1crypto==0.24.0
astor==0.7.1
astroid==2.2.5
astropy==3.1.2
atomicwrites==1.3.0
attrs==19.1.0
Babel==2.6.0
backcall==0.1.0
backports.os==0.1.1
backports.shutil-get-terminal-size==1.0.0
beautifulsoup4==4.7.1
bitarray==0.8.3
bkcharts==0.2
bleach==3.1.0
bokeh==1.0.4
boto==2.49.0
Bottleneck==1.2.1
certifi==2019.3.9
cffi==1.12.2
chardet==3.0.4
Click==7.0
cloudpickle==0.8.0
clyent==1.2.2
colorama==0.4.1
contextlib2==0.5.5
cryptography==2.6.1
cycler==0.10.0
Cython==0.29.6
cytoolz==0.9.0.1
dask==1.1.4
decorator==4.4.0
defusedxml==0.5.0
distributed==1.26.0
docutils==0.14
entrypoints==0.3
et-xmlfile==1.0.1
fastcache==1.0.2
filelock==3.0.10
Flask==1.0.2
future==0.17.1
gast==0.2.2
gevent==1.4.0
glob2==0.6
gmpy2==2.0.8
google==2.0.2
google-pasta==0.1.5
greenlet==0.4.15
grpcio==1.20.1
h5py==2.9.0
heapdict==1.0.0
html5lib==1.0.1
idna==2.8
imageio==2.5.0
imagesize==1.1.0
imgaug==0.2.6
importlib-metadata==0.0.0
ipdb==0.12
ipykernel==5.1.0
ipython==7.4.0
ipython-genutils==0.2.0
ipywidgets==7.4.2
isort==4.3.16
itsdangerous==1.1.0
jdcal==1.4
jedi==0.13.3
jeepney==0.4
Jinja2==2.10
jsonschema==3.0.1
jupyter==1.0.0
jupyter-client==5.2.4
jupyter-console==6.0.0
jupyter-core==4.4.0
jupyterlab==0.35.4
jupyterlab-server==0.2.0
kaggle==1.5.3
Keras-Applications==1.0.7
Keras-Preprocessing==1.0.9
keyring==18.0.0
kiwisolver==1.0.1
lazy-object-proxy==1.3.1
libarchive-c==2.8
lief==0.9.0
llvmlite==0.28.0
locket==0.2.0
lxml==4.3.2
Markdown==3.1
MarkupSafe==1.1.1
matplotlib==3.0.3
mccabe==0.6.1
mistune==0.8.4
mkl-fft==1.0.10
mkl-random==1.0.2
more-itertools==6.0.0
mpmath==1.1.0
msgpack==0.6.1
multipledispatch==0.6.0
nbconvert==5.4.1
nbformat==4.4.0
networkx==2.2
nltk==3.4
nose==1.3.7
notebook==5.7.8
numba==0.43.1
numexpr==2.6.9
numpy==1.16.2
numpydoc==0.8.0
olefile==0.46
opencv-python-headless==4.1.0.25
openpyxl==2.6.1
packaging==19.0
pandas==0.24.2
pandocfilters==1.4.2
parso==0.3.4
partd==0.3.10
path.py==11.5.0
pathlib2==2.3.3
patsy==0.5.1
pep8==1.7.1
pexpect==4.6.0
pickleshare==0.7.5
Pillow==5.4.1
pkginfo==1.5.0.1
pluggy==0.9.0
ply==3.11
prometheus-client==0.6.0
prompt-toolkit==2.0.9
protobuf==3.7.1
psutil==5.6.1
ptyprocess==0.6.0
py==1.8.0
pycodestyle==2.5.0
pycosat==0.6.3
pycparser==2.19
pycrypto==2.6.1
pycurl==7.43.0.2
pyflakes==2.1.1
Pygments==2.3.1
pylint==2.3.1
pyodbc==4.0.26
pyOpenSSL==19.0.0
pyparsing==2.3.1
pyrsistent==0.14.11
PySocks==1.6.8
pytest==4.3.1
pytest-arraydiff==0.3
pytest-astropy==0.5.0
pytest-doctestplus==0.3.0
pytest-openfiles==0.3.2
pytest-remotedata==0.3.1
python-dateutil==2.8.0
python-slugify==3.0.2
pytz==2018.9
PyWavelets==1.0.2
PyYAML==5.1
pyzmq==18.0.0
QtAwesome==0.5.7
qtconsole==4.4.3
QtPy==1.7.0
requests==2.21.0
rope==0.12.0
ruamel-yaml==0.15.46
scikit-image==0.14.2
scikit-learn==0.20.3
scipy==1.2.1
seaborn==0.9.0
SecretStorage==3.1.1
Send2Trash==1.5.0
simplegeneric==0.8.1
singledispatch==3.4.0.3
six==1.12.0
snowballstemmer==1.2.1
sortedcollections==1.1.2
sortedcontainers==2.1.0
soupsieve==1.8
Sphinx==1.8.5
sphinxcontrib-websupport==1.1.0
spyder==3.3.3
spyder-kernels==0.4.2
SQLAlchemy==1.3.1
statsmodels==0.9.0
sympy==1.3
tables==3.5.1
tb-nightly==1.14.0a20190301
tblib==1.3.2
tensorflow-gpu==2.0.0a0
termcolor==1.1.0
terminado==0.8.1
testpath==0.4.2
text-unidecode==1.2
tf-estimator-nightly==1.14.0.dev2019030115
toolz==0.9.0
torch==1.0.1
tornado==6.0.2
tqdm==4.31.1
traitlets==4.3.2
unicodecsv==0.14.1
urllib3==1.24.1
wcwidth==0.1.7
webencodings==0.5.1
Werkzeug==0.14.1
widgetsnbextension==3.4.2
wrapt==1.11.1
wurlitzer==1.0.2
xlrd==1.2.0
XlsxWriter==1.1.5
xlwt==1.3.0
zict==0.1.4
zipp==0.3.3
"""Base classes for Smile Bot"""
from abc import ABC, abstractmethod
def _do_sleep_a_bit():
time.sleep(0.44)
class ButtonsBase(ABC):
def __init__(self):
super().__init__()
self.on_press = _do_sleep_a_bit
def yes(self):
def do_yes():
print("Yes!")
return do_yes
def no(self):
def do_no():
print("No!")
return do_no
def err(self):
def do_err(err):
print("Err!")
return do_err
@abstractmethod
def Run(self):
pass
"""Bindings for running the smile bot inference in non-prod environment."""
import os
import imageio
from rpi import base
# This class must follow the interface
class EvaluateFolder(base.ButtonsBase):
def __init__(self, folder, fake_camera):
self.folder = folder
self.fake_camera = fake_camera
def err(self):
def do_err(err):
raise err
return do_err
def Run(self):
print("Running fake eval in {}".format(self.folder))
for dp, dn, files in os.walk(self.folder):
for f in sorted(files):
fn = os.path.join(dp, f)
print("walking {}".format(f))
self.fake_camera.set_next_image(fn)
self.on_press()
class FakeCamera(object):
def __init__(self):
self.next_image = None
self.next_debuginfo = None
def set_next_image(self, file_name):
self.next_image = imageio.imread(file_name)
self.next_debuginfo = file_name
def capture_and(self, on_start):
def ret():
return on_start(self.next_image, self.next_debuginfo)
return ret
#!/bin/bash
IN=rpi/requirements.txt
OUT=rpi/non-pi-requirements.txt
grep -v picamera <$IN >$OUT
......@@ -2,6 +2,8 @@ import RPi.GPIO as GPIO
import time
import picamera
import picamera.array
from rpi import base
from smilebot import imageops
from io import BytesIO
from PIL import Image
......@@ -15,13 +17,10 @@ DEFAULT_PINS = {
KEEP_SMILE_SECONDS = 3
KEEP_CAPTURES = 20
def _do_sleep_a_bit():
time.sleep(0.44)
class Buttons(object):
class Buttons(base.ButtonsBase):
def __init__(self, pins):
super().__init__()
self.pins = pins
self.on_press = _do_sleep_a_bit
self.test = False
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.pins['take'], GPIO.IN, pull_up_down=GPIO.PUD_UP)
......@@ -47,7 +46,7 @@ class Buttons(object):
return do_no
def err(self):
def do_err():
def do_err(err):
print("Err!")
return do_err
......@@ -105,8 +104,11 @@ class Camera(object):
def do_nothing():
pass
def do_nothing_arg(ignore_argument):
pass
class FakeInference(object):
def __init__(self, modelfile, threshold=0.5, on_true=do_nothing, on_false=do_nothing, on_error=do_nothing):
def __init__(self, modelfile, threshold=0.5, on_true=do_nothing, on_false=do_nothing, on_error=do_nothing_arg):
self.fns = {}
self.on_true = on_true
self.on_false = on_false
......@@ -127,7 +129,7 @@ class FakeInference(object):
else:
self.on_false()
except Exception as e:
self.on_error()
self.on_error(e)
if self.test:
raise(e)
......
# Prepare input camera image for inference using raspberry pi.
import os
from os.path import realpath, normpath, dirname
from smilebot import imageops
class Provider(object):
def __init__(self, use_vj, classifier_path='data/haarcascade_frontalface_default.xml', vj_scale=1.2):
self.use_vj = use_vj
self.vj_scale = vj_scale
if self.use_vj:
import cv2 as cv
model_fname = os.path.join(normpath(dirname(cv.__file__)), classifier_path)
print("Loading VJ model from {}...".format(model_fname))
self.face_cascade = cv.CascadeClassifier(model_fname)
def for_pi(self, img, norm_shape):
if self.use_vj:
return self.viola_jones_first(img, norm_shape)
else:
return self.regular(img, norm_shape)
def regular(self, img, norm_shape, force_noresize=True):
print("Rescaling...")
img = imageops.rescale_to_crop(img, norm_shape, force_noresize=force_noresize)
print(img.shape)
print(img.dtype)
print("Inferring...")
img = img / 255.0
return img
def viola_jones_first(self, img, norm_shape):
print("Img size: {} {} ".format(*img.shape))
import cv2 as cv
# Detect face. If a face was detected, crop to the face. Otherwise, return the original image.
grayscale_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
detected_faces = self.face_cascade.detectMultiScale(grayscale_image)
if not len(detected_faces):
return self.regular(img, norm_shape)
for (c,r,w,h) in detected_faces:
print("Detected: {} {} , {} {}".format(c,r,c+w,r+h))
# Let's zoom out of the face a bit. We don't want to accidentally crop it out.
mwh = max(w,h) # Resize to fit.
mwh *= self.vj_scale # Add 10% from the sides.
c -= (mwh-w) / 2
w += (mwh-w) / 2
r -= (mwh-h) / 2
h += (mwh-h) / 2
c = int(max(0, c))
r = int(max(0, r))
w = int(min(img.shape[1], w))
h = int(min(img.shape[0], h))
print("After resize: {} {} , {} {}".format(c,r,c+w,r+h))
# These are transposed...
img = img[r:r+h, c:c+w, :]
print("Face size: {} {} ".format(*img.shape))
# The face is usually smaller; we have to resize
return self.regular(img, norm_shape, force_noresize=False)
import numpy as np
from PIL import Image
def rescale_to_crop(img, target_shape, force_noresize=False):
#import ipdb; ipdb.set_trace()
Y, X, c = img.shape
tY, tX = target_shape
......
# This particular file isn't really used; instead, use tflib_lite.py
import tensorflow.keras.backend as K
import tensorflow as tf
import numpy as np
......
......@@ -2,7 +2,7 @@ import tensorflow.keras.backend as K
import tensorflow as tf
import numpy as np
from smilebot import imageops
from rpi import prepare
# Load the needed model from the modelfile and prepare it for inference.
......@@ -10,13 +10,18 @@ from smilebot import imageops
def do_nothing():
pass
def do_nothing_arg(ignored):
pass
class Inference(object):
def __init__(self, modelfile, threshold=0.5, on_true=do_nothing, on_false=do_nothing, on_error=do_nothing):
def __init__(self, modelfile, feature_provider, threshold=0.5, on_true=do_nothing, on_false=do_nothing, on_error=do_nothing_arg):
self.fns = {}
self.on_true = on_true
self.on_false = on_false
self.on_error = on_error
self.provider = feature_provider
self.threshold = threshold
self.interpreter = tf.lite.Interpreter(model_path=modelfile)
......@@ -35,23 +40,16 @@ class Inference(object):
self.on_true()
else:
self.on_false()
except:
self.on_error()
except Exception as e:
self.on_error(e)
return do_start
def infer(self, img, debug):
print("Rescaling...")
norm_shape = (224, 224)
img = imageops.rescale_to_crop(img, norm_shape, force_noresize=True)
print(img.shape)
print(img.dtype)
print("Inferring...")
img = img / 255.0
img = self.provider.for_pi(img, norm_shape)
print(img[0][0])
img = np.expand_dims(img, axis=0)
#img = (img * 255).astype(np.uint8)
......