|
5 | 5 | import tarfile
|
6 | 6 | import tensorflow as tf
|
7 | 7 | import zipfile
|
8 |
| -from datetime import datetime |
| 8 | +from datetime import datetime, timedelta |
9 | 9 | from Xlib import display
|
10 | 10 | import cv2
|
11 | 11 | import yaml
|
|
19 | 19 | sys.path.append('../tensorflow_models/research/object_detection')
|
20 | 20 |
|
21 | 21 | from stuff.helper import FPS, Visualizer
|
22 |
| -from stuff.input import ScreenInput, VideoInput |
| 22 | +from stuff.input import ScreenInput, ScreenPyInput, VideoInput |
23 | 23 |
|
24 | 24 | # Load config values from config.obj_detect.sample.yml (as default values) updated by optional user-specific config.obj_detect.yml
|
25 | 25 | ## see also http://treyhunner.com/2016/02/how-to-merge-dictionaries-in-python/
|
26 | 26 | cfg = yaml.load(open("config/config.obj_detect.sample.yml", 'r'))
|
27 | 27 | if os.path.isfile("config/config.obj_detect.yml"):
|
28 | 28 | cfg_user = yaml.load(open("config/config.obj_detect.yml", 'r'))
|
29 | 29 | cfg.update(cfg_user)
|
30 |
| -#for section in cfg: |
31 |
| -# print(section, ":", cfg[section]) |
32 | 30 |
|
33 | 31 | # Define input
|
34 | 32 | screen = display.Display().screen().root.get_geometry()
|
35 | 33 | if cfg['input_type'] == 'screen':
|
36 | 34 | input = ScreenInput(0, 0, int(screen.width/2), int(screen.height/2))
|
| 35 | +elif cfg['input_type'] == 'screenpy': |
| 36 | + input = ScreenPyInput(0, 0, int(screen.width/2), int(screen.height/2)) |
37 | 37 | elif cfg['input_type'] == 'video':
|
38 | 38 | input = VideoInput(cfg['input_video'])
|
39 | 39 | else:
|
40 | 40 | print('No valid input type given. Exit.')
|
41 | 41 | sys.exit()
|
42 | 42 |
|
43 | 43 | # Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
|
44 |
| -# See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies. |
45 |
| - |
| 44 | +# See the detection model zoo(object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies. |
46 | 45 | # Path to frozen detection graph. This is the actual model that is used for the object detection.
|
47 | 46 | PATH_TO_CKPT = '../' + cfg['model_name'] + '/frozen_inference_graph.pb'
|
48 | 47 |
|
49 |
| - |
50 |
| - |
51 | 48 | # ## Download Model
|
52 | 49 | MODEL_FILE = cfg['model_name'] + cfg['model_dl_file_format']
|
53 | 50 | if not os.path.isfile(PATH_TO_CKPT):
|
|
72 | 69 | od_graph_def.ParseFromString(serialized_graph)
|
73 | 70 | tf.import_graph_def(od_graph_def, name='')
|
74 | 71 |
|
75 |
| -# # Detection |
76 |
| -PATH_TO_TEST_IMAGES_DIR = 'test_images' |
77 |
| -TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ] |
78 |
| - |
79 |
| -# Size, in inches, of the output images. |
80 |
| -IMAGE_SIZE = (12, 8) |
81 |
| - |
82 | 72 | with detection_graph.as_default():
|
83 | 73 | with tf.Session(graph=detection_graph) as sess:
|
84 | 74 | # Definite input and output Tensors for detection_graph
|
|
97 | 87 | vis = Visualizer(cfg['visualizer_enabled'])
|
98 | 88 |
|
99 | 89 | while(input.isActive()):
|
| 90 | + startTime=datetime.now() |
100 | 91 |
|
101 |
| -# startTime=datetime.now() |
102 |
| - |
103 |
| - ret, image_np = input.getImage() |
104 |
| - if not ret: |
105 |
| - print("No frames grabbed from input (anymore). Exit.") |
106 |
| - break |
| 92 | + ret, image_np = input.getImage() |
| 93 | + if not ret: |
| 94 | + print("No frames grabbed from input (anymore). Exit.") |
| 95 | + break |
107 | 96 |
|
108 |
| -# timeElapsed=datetime.now()-startTime |
109 |
| -# print('1 Time elpased (hh:mm:ss.ms) {}'.format(timeElapsed)) |
110 |
| -# startTime=datetime.now() |
| 97 | + timeElapsed=datetime.now()-startTime |
| 98 | +# print('1 Time elpased (hh:mm:ss.ms) {}'.format(timeElapsed)) |
| 99 | + startTime=datetime.now() |
111 | 100 |
|
112 |
| -# for image_path in TEST_IMAGE_PATHS: |
113 |
| -# image = Image.open(image_path) |
114 |
| - # the array based representation of the image will be used later in order to prepare the |
115 |
| - # result image with boxes and labels on it. |
116 |
| -# image_np = load_image_into_numpy_array(image) |
| 101 | + # Run the detection (expand dimensions since the model expects images to have shape: [1, None, None, 3]) |
| 102 | + image_np_expanded = np.expand_dims(image_np, axis=0) |
| 103 | + (boxes, scores, classes, num) = sess.run([detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded}) |
117 | 104 |
|
118 |
| - # Expand dimensions since the model expects images to have shape: [1, None, None, 3] |
119 |
| - image_np_expanded = np.expand_dims(image_np, axis=0) |
120 |
| - # Actual detection. |
121 |
| - (boxes, scores, classes, num) = sess.run( |
122 |
| - [detection_boxes, detection_scores, detection_classes, num_detections], |
123 |
| - feed_dict={image_tensor: image_np_expanded}) |
| 105 | +# print(boxes, scores, classes, num) |
124 | 106 |
|
125 |
| - ret = vis.show(image_np, boxes, classes, scores) |
126 |
| - if not ret: |
127 |
| - print("User asked to quit. Exit") |
128 |
| - break |
| 107 | + vis.draw(image_np, boxes, classes, scores) |
| 108 | + ret = vis.show(image_np) |
| 109 | + if not ret: |
| 110 | + print("User asked to quit. Exit") |
| 111 | + break |
129 | 112 |
|
130 |
| - fps.update() |
| 113 | + fps.update() |
131 | 114 |
|
132 | 115 | fps.stop()
|
133 | 116 | print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))
|
|
0 commit comments