|
1 |
| -##Upload soon |
| 1 | +# Final aim bot with multiprocessing |
| 2 | +### Original text version of tutorial you can visit [here](https://pylessons.com/). |
| 3 | + |
| 4 | +Welcome everyone to part 10 of our TensorFlow object detection API tutorial series. For now this will be the final tutorial of this CSGO aim bot video series, because right now I spent too much time on this tutorial. I managed to get best performance as I can for now, for further improvements I would need to investigate time to another detection methods what I will do in future. |
| 5 | + |
| 6 | +Before continuing on this tutorial I should mention, that I updated [9th](https://pylessons.com/Tensorflow-object-detection-grab-screen-multiprocessing/) tutorial code before merging it with CSGO TensorFlow detection code. So I updated [9th](https://pylessons.com/Tensorflow-object-detection-grab-screen-multiprocessing/) tutorial, added one more file in which we are grabbing screen using multiprocessing pipes. Comparing with multiprocessing queues performance is the same (33FPS), but I wanted to test different methods to share data between processes. Adding to multiprocessing pipes, they use communication one to one, and queues may be used as many to many. |
| 7 | + |
| 8 | +Continuing on this tutorial, I am not going to code explanation in this text tutorial part. I simply divided whole code into 3 parts: grabbing screen, making tensorflow detection and showing the screen. All these 3 parts were moved to multiprocessing processes. |
| 9 | + |
| 10 | +Here is the final code: |
| 11 | +``` |
| 12 | +# # Imports |
| 13 | +import multiprocessing |
| 14 | +from multiprocessing import Pipe |
| 15 | +import time |
| 16 | +import cv2 |
| 17 | +import mss |
| 18 | +import numpy as np |
| 19 | +import os |
| 20 | +import sys |
| 21 | +os.environ['CUDA_VISIBLE_DEVICES'] = '0' |
| 22 | +import tensorflow as tf |
| 23 | +from distutils.version import StrictVersion |
| 24 | +from collections import defaultdict |
| 25 | +from io import StringIO |
| 26 | +import pyautogui |
| 27 | +
|
| 28 | +
|
| 29 | +# title of our window |
| 30 | +title = "FPS benchmark" |
| 31 | +# set start time to current time |
| 32 | +start_time = time.time() |
| 33 | +# displays the frame rate every 2 second |
| 34 | +display_time = 2 |
| 35 | +# Set primarry FPS to 0 |
| 36 | +fps = 0 |
| 37 | +# Load mss library as sct |
| 38 | +sct = mss.mss() |
| 39 | +# Set monitor size to capture to MSS |
| 40 | +width = 800 |
| 41 | +height = 640 |
| 42 | +
|
| 43 | +monitor = {"top": 80, "left": 0, "width": width, "height": height} |
| 44 | +
|
| 45 | +# ## Env setup |
| 46 | +from object_detection.utils import ops as utils_ops |
| 47 | +from object_detection.utils import label_map_util |
| 48 | +from object_detection.utils import visualization_utils as vis_util |
| 49 | +
|
| 50 | +# # Model preparation |
| 51 | +PATH_TO_FROZEN_GRAPH = 'CSGO_frozen_inference_graph.pb' |
| 52 | +# List of the strings that is used to add correct label for each box. |
| 53 | +PATH_TO_LABELS = 'CSGO_labelmap.pbtxt' |
| 54 | +NUM_CLASSES = 4 |
| 55 | +
|
| 56 | +# ## Load a (frozen) Tensorflow model into memory. |
| 57 | +label_map = label_map_util.load_labelmap(PATH_TO_LABELS) |
| 58 | +categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True) |
| 59 | +category_index = label_map_util.create_category_index(categories) |
| 60 | +
|
| 61 | +detection_graph = tf.Graph() |
| 62 | +with detection_graph.as_default(): |
| 63 | + od_graph_def = tf.GraphDef() |
| 64 | + with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: |
| 65 | + serialized_graph = fid.read() |
| 66 | + od_graph_def.ParseFromString(serialized_graph) |
| 67 | + tf.import_graph_def(od_graph_def, name='') |
| 68 | +
|
| 69 | +def Shoot(mid_x, mid_y): |
| 70 | + x = int(mid_x*width) |
| 71 | + #y = int(mid_y*height) |
| 72 | + y = int(mid_y*height+height/9) |
| 73 | + pyautogui.moveTo(x,y) |
| 74 | + pyautogui.click() |
| 75 | +
|
| 76 | +def grab_screen(p_input): |
| 77 | + while True: |
| 78 | + #Grab screen image |
| 79 | + img = np.array(sct.grab(monitor)) |
| 80 | + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
| 81 | + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) |
| 82 | +
|
| 83 | + # Put image from pipe |
| 84 | + p_input.send(img) |
| 85 | +
|
| 86 | +def TensorflowDetection(p_output, p_input2): |
| 87 | + # Detection |
| 88 | + with detection_graph.as_default(): |
| 89 | + with tf.Session(graph=detection_graph) as sess: |
| 90 | + while True: |
| 91 | + # Get image from pipe |
| 92 | + image_np = p_output.recv() |
| 93 | + # Expand dimensions since the model expects images to have shape: [1, None, None, 3] |
| 94 | + image_np_expanded = np.expand_dims(image_np, axis=0) |
| 95 | + # Actual detection. |
| 96 | + image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') |
| 97 | + boxes = detection_graph.get_tensor_by_name('detection_boxes:0') |
| 98 | + scores = detection_graph.get_tensor_by_name('detection_scores:0') |
| 99 | + classes = detection_graph.get_tensor_by_name('detection_classes:0') |
| 100 | + num_detections = detection_graph.get_tensor_by_name('num_detections:0') |
| 101 | + # Visualization of the results of a detection. |
| 102 | + (boxes, scores, classes, num_detections) = sess.run( |
| 103 | + [boxes, scores, classes, num_detections], |
| 104 | + feed_dict={image_tensor: image_np_expanded}) |
| 105 | + vis_util.visualize_boxes_and_labels_on_image_array( |
| 106 | + image_np, |
| 107 | + np.squeeze(boxes), |
| 108 | + np.squeeze(classes).astype(np.int32), |
| 109 | + np.squeeze(scores), |
| 110 | + category_index, |
| 111 | + use_normalized_coordinates=True, |
| 112 | + line_thickness=2) |
| 113 | +
|
| 114 | + # Send detection image to pipe2 |
| 115 | + p_input2.send(image_np) |
| 116 | +
|
| 117 | + array_ch = [] |
| 118 | + array_c = [] |
| 119 | + array_th = [] |
| 120 | + array_t = [] |
| 121 | + for i,b in enumerate(boxes[0]): |
| 122 | + if classes[0][i] == 2: # ch |
| 123 | + if scores[0][i] >= 0.5: |
| 124 | + mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 |
| 125 | + mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 |
| 126 | + array_ch.append([mid_x, mid_y]) |
| 127 | + cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (0,0,255), -1) |
| 128 | + if classes[0][i] == 1: # c |
| 129 | + if scores[0][i] >= 0.5: |
| 130 | + mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 |
| 131 | + mid_y = boxes[0][i][0] + (boxes[0][i][2]-boxes[0][i][0])/6 |
| 132 | + array_c.append([mid_x, mid_y]) |
| 133 | + cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (50,150,255), -1) |
| 134 | + if classes[0][i] == 4: # th |
| 135 | + if scores[0][i] >= 0.5: |
| 136 | + mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 |
| 137 | + mid_y = (boxes[0][i][0]+boxes[0][i][2])/2 |
| 138 | + array_th.append([mid_x, mid_y]) |
| 139 | + cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (0,0,255), -1) |
| 140 | + if classes[0][i] == 3: # t |
| 141 | + if scores[0][i] >= 0.5: |
| 142 | + mid_x = (boxes[0][i][1]+boxes[0][i][3])/2 |
| 143 | + mid_y = boxes[0][i][0] + (boxes[0][i][2]-boxes[0][i][0])/6 |
| 144 | + array_t.append([mid_x, mid_y]) |
| 145 | + cv2.circle(image_np,(int(mid_x*width),int(mid_y*height)), 3, (50,150,255), -1) |
| 146 | +
|
| 147 | + team = "c" # shooting target |
| 148 | + if team == "c": |
| 149 | + if len(array_ch) > 0: |
| 150 | + Shoot(array_ch[0][0], array_ch[0][1]) |
| 151 | + if len(array_ch) == 0 and len(array_c) > 0: |
| 152 | + Shoot(array_c[0][0], array_c[0][1]) |
| 153 | + if team == "t": |
| 154 | + if len(array_th) > 0: |
| 155 | + Shoot(array_th[0][0], array_th[0][1]) |
| 156 | + if len(array_th) == 0 and len(array_t) > 0: |
| 157 | + Shoot(array_t[0][0], array_t[0][1]) |
| 158 | +
|
| 159 | +
|
| 160 | +def Show_image(p_output2): |
| 161 | + global start_time, fps |
| 162 | + while True: |
| 163 | + image_np = p_output2.recv() |
| 164 | + # Show image with detection |
| 165 | + cv2.imshow(title, image_np) |
| 166 | + # Bellow we calculate our FPS |
| 167 | + fps+=1 |
| 168 | + TIME = time.time() - start_time |
| 169 | + if (TIME) >= display_time : |
| 170 | + print("FPS: ", fps / (TIME)) |
| 171 | + fps = 0 |
| 172 | + start_time = time.time() |
| 173 | + # Press "q" to quit |
| 174 | + if cv2.waitKey(25) & 0xFF == ord("q"): |
| 175 | + cv2.destroyAllWindows() |
| 176 | + break |
| 177 | +
|
| 178 | +if __name__=="__main__": |
| 179 | + # Pipes |
| 180 | + p_output, p_input = Pipe() |
| 181 | + p_output2, p_input2 = Pipe() |
| 182 | +
|
| 183 | + # creating new processes |
| 184 | + p1 = multiprocessing.Process(target=grab_screen, args=(p_input,)) |
| 185 | + p2 = multiprocessing.Process(target=TensorflowDetection, args=(p_output,p_input2,)) |
| 186 | + p3 = multiprocessing.Process(target=Show_image, args=(p_output2,)) |
| 187 | +
|
| 188 | + # starting our processes |
| 189 | + p1.start() |
| 190 | + p2.start() |
| 191 | + p3.start() |
| 192 | +``` |
| 193 | + |
| 194 | +As a final result, I was quite happy that we can achieve more that 20 FPS. But when tensorflow receives images where we detect enemies here comes the bottleneck. FPS drops to 4-5 frames per seconds, and it becomes impossible to play this game for our bot. So in future I may come back to this project, when I find methods, how to detect our enemies faster. There is a way to use YOLO object detection model, it's quite fast and accurate but it's harder to implement (for now). |
| 195 | + |
| 196 | +Anyway I think I spent a lot of time working on this project, right now I will move to other more benefit project. In nearest future I am planning to make tutorial how to crack CAPTCHA's, how to use SELENIUM to make web surfing bots or AI forex trading bot. |
0 commit comments