Skip to content

The Python application is now deployable from ground up #81

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
May 18, 2018
Merged
10 changes: 9 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -65,4 +65,12 @@ ehthumbs.db
Thumbs.db

# Swap files
*.swp
*.swp


# Python3 Virtual Environment folders

bin/
lib/
share/
pyvenv.cfg
56 changes: 28 additions & 28 deletions audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@
import re
import sys

from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
#from google.cloud import speech
#from google.cloud.speech import enums
#from google.cloud.speech import types
import pyaudio
from six.moves import queue
# [END import_libraries]
Expand Down Expand Up @@ -79,7 +79,7 @@ def __init__(self):
except Exception as e:
logging.info("Audio: input stream not available")

self._google_speech_client = speech.SpeechClient()
#self._google_speech_client = speech.SpeechClient()

def exit(self):
pass
Expand Down Expand Up @@ -201,32 +201,32 @@ def speech_recog(self, model):
logging.info("recog text: " + recog_text)
return recog_text

def speech_recog_google(self, locale):
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=locale)
streaming_config = types.StreamingRecognitionConfig(
config=config,
interim_results=False,
single_utterance=True)

t1 = time.time()
with self.stream_in as stream:
audio_generator = stream.generator()
requests = (types.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator)

responses = self._google_speech_client.streaming_recognize(streaming_config, requests)
# def speech_recog_google(self, locale):
# config = types.RecognitionConfig(
# encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
# sample_rate_hertz=RATE,
# language_code=locale)
# streaming_config = types.StreamingRecognitionConfig(
# config=config,
# interim_results=False,
# single_utterance=True)
#
# t1 = time.time()
# with self.stream_in as stream:
# audio_generator = stream.generator()
# requests = (types.StreamingRecognizeRequest(audio_content=content)
# for content in audio_generator)
#
# responses = self._google_speech_client.streaming_recognize(streaming_config, requests)

# Now, put the transcription responses to use.
for response in responses:
if time.time() - t1 > 10:
return ""
if response.results:
result = response.results[0]
if result.is_final:
return result.alternatives[0].transcript
# for response in responses:
# if time.time() - t1 > 10:
# return ""
# if response.results:
# result = response.results[0]
# if result.is_final:
# return result.alternatives[0].transcript

class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
Expand Down
10 changes: 5 additions & 5 deletions camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,11 +85,11 @@ def __init__(self):
self._photos.append({'name': filename})
self.save_photo_metadata()

self._cnn_classifiers = {}
cnn_model = config.Config.get().get("cnn_default_model", "")
if cnn_model != "":
self._cnn_classifiers[cnn_model] = CNNManager.get_instance().load_model(cnn_model)
self._cnn_classifier_default = self._cnn_classifiers[cnn_model]
#self._cnn_classifiers = {}
#cnn_model = config.Config.get().get("cnn_default_model", "")
#if cnn_model != "":
# self._cnn_classifiers[cnn_model] = CNNManager.get_instance().load_model(cnn_model)
# self._cnn_classifier_default = self._cnn_classifiers[cnn_model]

self._camera.grab_start()
self._image_cv = self.get_image()
Expand Down
2 changes: 1 addition & 1 deletion cv/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
}

try:
ocr = cv2.text.OCRTesseract_create("/usr/share/tesseract-ocr/", "eng", tesseract_whitelists['unspec'], 0, cv2.text.OCR_LEVEL_TEXTLINE)
ocr = cv2.text.OCRTesseract_create(Null, "eng", tesseract_whitelists['unspec'], 0, cv2.text.OCR_LEVEL_TEXTLINE)
except:
logging.info("tesseract not availabe")

Expand Down
41 changes: 37 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@

sh = logging.StreamHandler()
# add a rotating handler
fh = logging.handlers.RotatingFileHandler('/home/pi/coderbot/logs/coderbot.log', maxBytes=1000000, backupCount=5)
fh = logging.handlers.RotatingFileHandler('./logs/coderbot.log', maxBytes=1000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
sh.setFormatter(formatter)
fh.setFormatter(formatter)
Expand Down Expand Up @@ -92,8 +92,7 @@ def handle_home():
config=app.bot_config,
program_level=app.bot_config.get("prog_level", "std"),
cam=cam != None,
cnn_model_names=json.dumps([[name] for name in cnn.get_models().keys()]))

cnn_model_names = json.dumps({}))
@app.route("/config", methods=["POST"])
def handle_config():
Config.write(request.form)
Expand Down Expand Up @@ -187,6 +186,21 @@ def video_stream(a_cam):
yield frame
yield "\r\n"

@app.route("/video")
def handle_video():
return """
<html>
<head>
<style type=text/css>
body { background-image: url(/video/stream); background-repeat:no-repeat; background-position:center top; background-attachment:fixed; height:100% }
</style>
</head>
<body>
&nbsp;
</body>
</html>
"""

@app.route("/video/stream")
def handle_video_stream():
try:
Expand All @@ -198,6 +212,25 @@ def handle_video_stream():
except:
pass

def video_stream_cv(a_cam):
while not app.shutdown_requested:
frame = a_cam.get_image_cv_jpeg()
yield ("--BOUNDARYSTRING\r\n" +
"Content-type: image/jpeg\r\n" +
"Content-Length: " + str(len(frame)) + "\r\n\r\n" +
frame + "\r\n")

@app.route("/video/stream/cv")
def handle_video_stream_cv():
try:
h = Headers()
h.add('Age', 0)
h.add('Cache-Control', 'no-cache, private')
h.add('Pragma', 'no-cache')
return Response(video_stream_cv(cam), headers=h, mimetype="multipart/x-mixed-replace; boundary=--BOUNDARYSTRING")
except:
pass

@app.route("/photos", methods=["GET"])
def handle_photos():
logging.info("photos")
Expand Down Expand Up @@ -352,7 +385,7 @@ def run_server():
except picamera.exc.PiCameraError:
logging.error("Camera not present")

cnn = CNNManager.get_instance()
#cnn = CNNManager.get_instance()
event = EventManager.get_instance("coderbot")
conv = Conversation.get_instance()

Expand Down
24 changes: 24 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
apiai==1.2.3
Babel==2.5.3
click==6.7
Flask==1.0.2
Flask-Babel==0.11.2
Flask-Cors==3.0.4
itsdangerous==0.24
Jinja2==2.10
MarkupSafe==1.0
numpy==1.14.3
opencv-contrib-python==3.4.0.12
picamera==1.13
pigpio==1.40.post1
Pillow==5.1.0
pkg-resources==0.0.0
protobuf==3.0.0
PyAudio==0.2.11
pycairo==1.17.0
Pypubsub==4.0.0
pytz==2018.4
six==1.11.0
smbus2==0.2.0
tensorflow==0.11.0
Werkzeug==0.14.1