Large diffs are not rendered by default.

@@ -3,6 +3,8 @@
import libardrone
import pygame
import move
import time
import cv2

f = open('log.txt','a\n')

@@ -11,11 +13,21 @@
screen = pygame.display.set_mode((W, H))
clock = pygame.time.Clock()


# drone = move.Drone(clock)
drone = move.Drone(clock)
# drone.reset()
# drone.takeoff()

drone.useDemoMode(True) # Set 15 basic dataset/sec

##### Mainprogram begin #####
drone.setConfigAllID() # Go to multiconfiguration-mode
drone.sdVideo() # Choose lower resolution (try hdVideo())
drone.frontCam() # Choose front view
CDC = drone.ConfigDataCount
while CDC==drone.ConfigDataCount: time.sleep(0.001) # Wait until it is done (after resync)
drone.startVideo() # Start video-function # Display the video

def move_in_m(drone, count, clock):
i = 0
@@ -26,6 +38,7 @@ def move_in_m(drone, count, clock):


running = True
iter = 0
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
@@ -89,6 +102,12 @@ def move_in_m(drone, count, clock):
if event.key == pygame.K_b:
print('rotation')
drone.back_circle()
if event.key == pygame.K_p:
print('image has been saved')
frame = drone.getImage()
img_process(frame)
i+=1
cv2.imwrite('test'+ i +'.jpeg', frame)

#<<<<<<< Updated upstream:python-ardrone-master/main.py
# try:
@@ -0,0 +1,29 @@
import libardrone
import pygame
pygame.init()
W, H = 320, 240
screen = pygame.display.set_mode((W, H))

drone = libardrone.ARDrone()
drone.reset()
drone.takeoff()
running = True
while running:
#print('hello')
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
print('hello')
drone.land()
if event.key == pygame.K_UP:
drone.move_up()
print('move_up')
if event.key == pygame.K_ESCAPE:
running = False
if event.key == pygame.K_DOWN:
drone.move_down()

drone.land()
drone.halt()
@@ -0,0 +1,37 @@
import os
import sys
import cv2
import numpy as np
import logging

MODEL_FILE = "model.mdl"

def detect(img, cascade):
gray = to_grayscale(img)
rects = cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=2, minSize=(15, 15), flags = cv2.CASCADE_SCALE_IMAGE)

if len(rects) == 0:
return []
return rects
#haarcascade_frontalface_alt.xml
#haarcascade_fullbody.xml
def detect_faces(img):
cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
return detect(img, cascade)

def to_grayscale(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
return gray

def contains_face(img):
return len(detect_faces(img)) > 0

def save(path, img):
cv2.imwrite(path, img)

def crop_faces(img, faces):
for face in faces:
x, y, h, w = [result for result in face]
return img[y:y+h,x:x+w]

@@ -93,8 +93,16 @@ def __init__(self):
self.__pYellowStr = "\033[93m"
self.__pBlueStr = "\033[94m"
self.__pPurpleStr = "\033[95m"
self.__pLineUpStr = "\033[1A"
self._show_pLineUpStr = "\033[1A"

def getImage(self):
vPath = self.__VidPipePath
capture = cv2.VideoCapture(vPath)
success, image = capture.read()
return image
##################################
#img_process(image)

###### Connect to the drone and start all procedures
def startup(self):
# Check for drone in the network and wake it up
@@ -1007,12 +1015,14 @@ def vCapture(VidPipePath, parent_pipe):
codecOK = False
lastKey = ""
cc=0

'''
while not commitsuicideV:
decTimeRev = time.time()
receiveWatchdog = threading.Timer(2.0, VideoReceiveWatchdog, [parent_pipe,"vCapture", debugV]) # Resets video if something hangs
receiveWatchdog.start()
success, image = capture.read()
##################################
#img_process(image)
cc+=1
receiveWatchdog.cancel()
decTime = decTimeRev-time.time()
@@ -1058,7 +1068,7 @@ def vCapture(VidPipePath, parent_pipe):
cv2.destroyAllWindows()
capture.release()
if debugV: print "vCapture-Thread : committed suicide"

'''
### Process to decode the videostream in the FIFO-Pipe, stored there from main-loop.
# Storing and decoding must not be processed in the same process, thats why decoding is external.
# vDecode controls the vCapture-thread which captures and decodes finally the videostream.
@@ -0,0 +1,16 @@
import sys
import cv2
import numpy

img = cv2.imread('5.jpg')
template = cv2.imread('4.jpg')
th, tw = template.shape[:2]

result = cv2.matchTemplate(img, template, cv2.TM_CCORR_NORMED)
num = 0
n = 0
for i in result:
num += reduce(lambda x, y: x + y, i) / len(i)
n+=1
num/=n
print num
@@ -20,8 +20,10 @@ def img_process(frame):
sum_sqr_rect += w*h
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)

main_rect = (main_rect[0]/len(faces),main_rect[1]/len(faces))

if 0 != len(faces):
main_rect = (main_rect[0]/len(faces),main_rect[1]/len(faces))
else:
return 0
w, h = frame.shape[:2]

return main_rect[0]-h/2, main_rect[1]-w/2, sum_sqr_rect