|
6 | 6 | import tensorflow as tf
|
7 | 7 | import zipfile
|
8 | 8 | import time
|
| 9 | +from Xlib import display |
9 | 10 | import cv2
|
10 | 11 | import yaml
|
11 | 12 |
|
12 |
| -from Xlib import display, X |
13 | 13 |
|
14 | 14 | from collections import defaultdict
|
15 | 15 | from io import StringIO
|
16 |
| -from PIL import Image |
17 |
| - |
18 |
| -#cap = cv2.VideoCapture(0) |
19 |
| -#cap = cv2.VideoCapture('../opencv_extra/testdata/highgui/video/big_buck_bunny.mp4') |
| 16 | +#from PIL import Image |
20 | 17 |
|
21 | 18 | sys.path.append('../tensorflow_models/research')
|
22 | 19 | sys.path.append('../tensorflow_models/research/slim')
|
|
25 | 22 | from utils import label_map_util
|
26 | 23 | from utils import visualization_utils as vis_util
|
27 | 24 |
|
| 25 | +from stuff.helper import FPS |
| 26 | +from stuff.input import ScreenInput, VideoInput |
28 | 27 |
|
29 | 28 | # Load config values from config.obj_detect.sample.yml (as default values) updated by optional user-specific config.obj_detect.yml
|
30 | 29 | ## see also http://treyhunner.com/2016/02/how-to-merge-dictionaries-in-python/
|
|
35 | 34 | #for section in cfg:
|
36 | 35 | # print(section, ":", cfg[section])
|
37 | 36 |
|
38 |
| - |
| 37 | +# Define input |
| 38 | +screen = display.Display().screen().root.get_geometry() |
| 39 | +if cfg['input_type'] == 'screen': |
| 40 | + input = ScreenInput(0, 0, int(screen.width/2), int(screen.height/2)) |
| 41 | +elif cfg['input_type'] == 'video': |
| 42 | + input = VideoInput(cfg['input_video']) |
| 43 | +else: |
| 44 | + print('No valid input type given. Exit.') |
| 45 | + sys.exit() |
39 | 46 |
|
40 | 47 | # Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
|
41 | 48 | # See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
|
|
97 | 104 | detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
|
98 | 105 | num_detections = detection_graph.get_tensor_by_name('num_detections:0')
|
99 | 106 |
|
100 |
| - # for frame rate calculation |
101 |
| - start_time = time.time() |
102 |
| - x = 3 # displays the frame rate every x seconds |
103 |
| - counter = 0 |
| 107 | + # TODO: Usually FPS calculation lives in a separate thread. As is now, the interval is a minimum value for each iteration. |
| 108 | + fps = FPS(cfg['fps_interval']).start() |
104 | 109 |
|
105 | 110 | windowPlacedYet = False
|
106 | 111 |
|
107 |
| -# while(cap.isOpened()): |
108 |
| - while(True): |
109 |
| - |
110 |
| - dsp = display.Display() |
111 |
| - root = dsp.screen().root |
112 |
| - reso = root.get_geometry() |
113 |
| - W,H = int(reso.width/2),int(reso.height/2) |
114 |
| - #W,H = 600,600 |
115 |
| - raw = root.get_image(0, 0, W, H, X.ZPixmap, 0xffffffff) |
116 |
| - image = Image.frombytes("RGB", (W, H), raw.data, "raw", "RGBX") |
117 |
| - image_np = np.array(image); |
| 112 | + while(input.isActive()): |
| 113 | + ret, image_np = input.getImage() |
| 114 | + if not ret: |
| 115 | + print("No frames grabbed from input (anymore)! Exit.") |
| 116 | + break |
118 | 117 |
|
119 | 118 | # image_np_bgr = np.array(ImageGrab.grab(bbox=(0,0,600,600))) # grab(bbox=(10,10,500,500)) or just grab()
|
120 | 119 | # image_np = cv2.cvtColor(image_np_bgr, cv2.COLOR_BGR2RGB)
|
121 | 120 |
|
122 |
| -# ret, image_np = cap.read() |
123 |
| -# if not ret: |
124 |
| -# print("Video finished!") |
125 |
| -# break |
126 |
| - |
127 | 121 | # for image_path in TEST_IMAGE_PATHS:
|
128 | 122 | # image = Image.open(image_path)
|
129 | 123 | # the array based representation of the image will be used later in order to prepare the
|
|
150 | 144 | if cv2.waitKey(1) & 0xFF == ord('q'):
|
151 | 145 | break
|
152 | 146 | if not windowPlacedYet:
|
153 |
| - cv2.moveWindow('object detection', (int)(reso.width/3), (int)(reso.height/3)) |
| 147 | + cv2.moveWindow('object detection', (int)(screen.width/3), (int)(screen.height/3)) |
154 | 148 | windowPlacedYet = True
|
155 | 149 |
|
156 |
| - counter+=1 |
157 |
| - if (time.time() - start_time) > x : |
158 |
| - print("FPS: ", counter / (time.time() - start_time)) |
159 |
| - counter = 0 |
160 |
| - start_time = time.time() |
| 150 | + fps.update() |
| 151 | + |
| 152 | +fps.stop() |
| 153 | +print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed())) |
| 154 | +print('[INFO] approx. FPS: {:.2f}'.format(fps.fps())) |
161 | 155 |
|
162 |
| -#cap.release() |
| 156 | +input.cleanup() |
163 | 157 | cv2.destroyAllWindows()
|
0 commit comments