From a6023a772d3b0f3d23e5cd0872614523723c6b1f Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 10 Jun 2022 13:07:38 +0200 Subject: [PATCH 1/3] Show graphically direction where the robot is heading on displayed image --- behavior_metrics/brains/f1/brain_f1_opencv.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/behavior_metrics/brains/f1/brain_f1_opencv.py b/behavior_metrics/brains/f1/brain_f1_opencv.py index 64b8687f..b07ab589 100644 --- a/behavior_metrics/brains/f1/brain_f1_opencv.py +++ b/behavior_metrics/brains/f1/brain_f1_opencv.py @@ -115,8 +115,8 @@ def execute(self): self.previous_timestamp = timestamp if (timestamp - self.previous_timestamp >= 0.085): self.previous_image = self.camera.getImage().data + image = self.previous_image ''' - #image = self.previous_image image = self.camera.getImage().data if image.shape == (3, 3, 3): @@ -144,9 +144,6 @@ def execute(self): image_mask = cv2.inRange(image_hsv, red_lower, red_upper) # image_eroded = cv2.erode(image_mask, kernel, iterations=3) - # show image in gui -> frame_0 - self.update_frame('frame_0', image) - rows, cols = image_mask.shape rows = rows - 1 # para evitar desbordamiento @@ -202,6 +199,17 @@ def execute(self): self.motors.sendW(w) self.motors.sendV(v) + # show image in gui -> frame_0 + import math + x1, y1 = int(image.shape[:2][1] / 2), image.shape[:2][0] # ancho, alto + length = 200 + angle = (90 + int(math.degrees(-w))) * 3.14 / 180.0 + x2 = int(x1 - length * math.cos(angle)) + y2 = int(y1 - length * math.sin(angle)) + line_thickness = 2 + cv2.line(image, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) + self.update_frame('frame_0', image) + v = np.interp(np.array([v]), (6.5, 24), (0, 1))[0] w = np.interp(np.array([w]), (-7.1, 7.1), (0, 1))[0] if self.previous_v != None: From aa65c9900dff524b801cb234acceb6d21ef9a9a8 Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 10 Jun 2022 13:17:29 +0200 Subject: [PATCH 2/3] Show directon for keras brains --- .../f1/brain_f1_keras_opencv_dataset.py | 17 ++++++++++-- .../f1/brain_f1_keras_seq_3_opencv_dataset.py | 26 ++++++++++++++----- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py b/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py index 143b5ebb..00795fa0 100644 --- a/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py +++ b/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py @@ -83,12 +83,13 @@ def execute(self): image = self.camera.getImage().data # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + base_image = image if self.cont == 1: self.first_image = image image = self.handler.transform_image(image, self.config['ImageTranform']) - self.update_frame('frame_0', image) + #self.update_frame('frame_0', image) try: if self.config['ImageCropped']: @@ -129,6 +130,18 @@ def execute(self): self.previous_v = prediction[0][0] self.previous_w = prediction[0][1] + # show image in gui -> frame_0 + import math + x1, y1 = int(base_image.shape[:2][1]/2), base_image.shape[:2][0] # ancho, alto + length = 200 + angle = (90 + int(math.degrees(-prediction_w))) * 3.14 / 180.0 + x2 = int(x1 - length * math.cos(angle)) + y2 = int(y1 - length * math.sin(angle)) + + line_thickness = 2 + cv2.line(base_image, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) + self.update_frame('frame_0', base_image) + # GradCAM from image i = np.argmax(prediction[0]) cam = GradCAM(self.net, i) @@ -138,4 +151,4 @@ def execute(self): self.update_frame('frame_1', output) except Exception as err: - print(err) \ No newline at end of file + print(err) diff --git a/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py b/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py index 0ccf1868..4642cd91 100644 --- a/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py +++ b/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py @@ -57,7 +57,7 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): self.third_image = [] if self.config['GPU'] is False: - os.environ["CUDA_VISIBLE_DEVICES"]="-1" + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" self.gpu_inference = True if tf.test.gpu_device_name() else False @@ -115,9 +115,10 @@ def execute(self): ''' image = self.camera.getImage().data + base_image = image if self.cont == 1: self.first_image = image - image = self.handler.transform_image(image,self.config['ImageTranform']) + image = self.handler.transform_image(image, self.config['ImageTranform']) try: if self.config['ImageCropped']: image = image[240:480, 0:640] @@ -125,7 +126,7 @@ def execute(self): img = cv2.resize(image, (self.config['ImageSize'][0], self.config['ImageSize'][1])) else: img = image - self.update_frame('frame_0', img) + # sself.update_frame('frame_0', img) if self.config['ImageNormalized']: AUGMENTATIONS_TEST = Compose([ Normalize() @@ -170,10 +171,9 @@ def execute(self): start_time = time.time() prediction = self.net.predict(img) self.inference_times.append(time.time() - start_time) - #prediction = prediction[0] if self.config['PredictionsNormalized']: - prediction_v = prediction[0][0]*(24 - (6.5)) + (6.5) - prediction_w = prediction[0][1]*(7.1 - (-7.1)) + (-7.1) + prediction_v = prediction[0][0] * (24 - (6.5)) + (6.5) + prediction_w = prediction[0][1] * (7.1 - (-7.1)) + (-7.1) else: prediction_v = prediction[0][0] prediction_w = prediction[0][1] @@ -181,10 +181,22 @@ def execute(self): self.motors.sendV(prediction_v) self.motors.sendW(prediction_w) + # show image in gui -> frame_0 + import math + x1, y1 = int(base_image.shape[:2][1] / 2), base_image.shape[:2][0] # ancho, alto + length = 200 + angle = (90 + int(math.degrees(-prediction_w))) * 3.14 / 180.0 + x2 = int(x1 - length * math.cos(angle)) + y2 = int(y1 - length * math.sin(angle)) + + line_thickness = 2 + cv2.line(base_image, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) + self.update_frame('frame_0', base_image) + if self.previous_v != None: a = np.array((prediction[0][0], prediction[0][1])) b = np.array((self.previous_v, self.previous_w)) - distance = np.linalg.norm(a-b) + distance = np.linalg.norm(a - b) self.suddenness_distance.append(distance) self.previous_v = prediction[0][0] self.previous_w = prediction[0][1] From 0b6d0ba74c00683f704c08ac6541bb301f890b9f Mon Sep 17 00:00:00 2001 From: sergiopaniego Date: Fri, 10 Jun 2022 13:44:43 +0200 Subject: [PATCH 3/3] Generalized function --- .../f1/brain_f1_keras_opencv_dataset.py | 24 +++++++------- .../f1/brain_f1_keras_seq_3_opencv_dataset.py | 24 +++++++------- behavior_metrics/brains/f1/brain_f1_opencv.py | 33 +++++++++++-------- 3 files changed, 44 insertions(+), 37 deletions(-) diff --git a/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py b/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py index 00795fa0..c2fdc429 100644 --- a/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py +++ b/behavior_metrics/brains/f1/brain_f1_keras_opencv_dataset.py @@ -67,13 +67,23 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): print("- Models path: " + PRETRAINED_MODELS) print("- Model: " + str(model)) - def update_frame(self, frame_id, data): + def update_frame(self, frame_id, data, angular_speed=None): """Update the information to be shown in one of the GUI's frames. Arguments: frame_id {str} -- Id of the frame that will represent the data data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) """ + if angular_speed: + import math + x1, y1 = int(data.shape[:2][1] / 2), data.shape[:2][0] # ancho, alto + length = 200 + angle = (90 + int(math.degrees(-angular_speed))) * 3.14 / 180.0 + x2 = int(x1 - length * math.cos(angle)) + y2 = int(y1 - length * math.sin(angle)) + + line_thickness = 2 + cv2.line(data, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) self.handler.update_frame(frame_id, data) def execute(self): @@ -130,17 +140,7 @@ def execute(self): self.previous_v = prediction[0][0] self.previous_w = prediction[0][1] - # show image in gui -> frame_0 - import math - x1, y1 = int(base_image.shape[:2][1]/2), base_image.shape[:2][0] # ancho, alto - length = 200 - angle = (90 + int(math.degrees(-prediction_w))) * 3.14 / 180.0 - x2 = int(x1 - length * math.cos(angle)) - y2 = int(y1 - length * math.sin(angle)) - - line_thickness = 2 - cv2.line(base_image, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) - self.update_frame('frame_0', base_image) + self.update_frame('frame_0', base_image, prediction_w) # GradCAM from image i = np.argmax(prediction[0]) diff --git a/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py b/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py index 4642cd91..f352d78f 100644 --- a/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py +++ b/behavior_metrics/brains/f1/brain_f1_keras_seq_3_opencv_dataset.py @@ -72,13 +72,23 @@ def __init__(self, sensors, actuators, model=None, handler=None, config=None): print("- Models path: " + PRETRAINED_MODELS) print("- Model: " + str(model)) - def update_frame(self, frame_id, data): + def update_frame(self, frame_id, data, angular_speed=None): """Update the information to be shown in one of the GUI's frames. Arguments: frame_id {str} -- Id of the frame that will represent the data data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) """ + if angular_speed: + import math + x1, y1 = int(data.shape[:2][1] / 2), data.shape[:2][0] # ancho, alto + length = 200 + angle = (90 + int(math.degrees(-angular_speed))) * 3.14 / 180.0 + x2 = int(x1 - length * math.cos(angle)) + y2 = int(y1 - length * math.sin(angle)) + + line_thickness = 2 + cv2.line(data, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) self.handler.update_frame(frame_id, data) def check_center(self, position_x): @@ -181,17 +191,7 @@ def execute(self): self.motors.sendV(prediction_v) self.motors.sendW(prediction_w) - # show image in gui -> frame_0 - import math - x1, y1 = int(base_image.shape[:2][1] / 2), base_image.shape[:2][0] # ancho, alto - length = 200 - angle = (90 + int(math.degrees(-prediction_w))) * 3.14 / 180.0 - x2 = int(x1 - length * math.cos(angle)) - y2 = int(y1 - length * math.sin(angle)) - - line_thickness = 2 - cv2.line(base_image, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) - self.update_frame('frame_0', base_image) + self.update_frame('frame_0', base_image, prediction_w) if self.previous_v != None: a = np.array((prediction[0][0], prediction[0][1])) diff --git a/behavior_metrics/brains/f1/brain_f1_opencv.py b/behavior_metrics/brains/f1/brain_f1_opencv.py index b07ab589..e6a305a6 100644 --- a/behavior_metrics/brains/f1/brain_f1_opencv.py +++ b/behavior_metrics/brains/f1/brain_f1_opencv.py @@ -50,8 +50,8 @@ def __init__(self, sensors, actuators, handler, config=None): self.cont = 0 self.iteration = 0 - #self.previous_timestamp = 0 - #self.previous_image = 0 + # self.previous_timestamp = 0 + # self.previous_image = 0 self.previous_v = None self.previous_w = None @@ -66,7 +66,23 @@ def __init__(self, sensors, actuators, handler, config=None): ''' time.sleep(2) - def update_frame(self, frame_id, data): + def update_frame(self, frame_id, data, angular_speed=None): + """Update the information to be shown in one of the GUI's frames. + + Arguments: + frame_id {str} -- Id of the frame that will represent the data + data {*} -- Data to be shown in the frame. Depending on the type of frame (rgbimage, laser, pose3d, etc) + """ + if angular_speed: + import math + x1, y1 = int(data.shape[:2][1] / 2), data.shape[:2][0] # ancho, alto + length = 200 + angle = (90 + int(math.degrees(-angular_speed))) * 3.14 / 180.0 + x2 = int(x1 - length * math.cos(angle)) + y2 = int(y1 - length * math.sin(angle)) + + line_thickness = 2 + cv2.line(data, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) self.handler.update_frame(frame_id, data) def collinear3(self, x1, y1, x2, y2, x3, y3): @@ -199,16 +215,7 @@ def execute(self): self.motors.sendW(w) self.motors.sendV(v) - # show image in gui -> frame_0 - import math - x1, y1 = int(image.shape[:2][1] / 2), image.shape[:2][0] # ancho, alto - length = 200 - angle = (90 + int(math.degrees(-w))) * 3.14 / 180.0 - x2 = int(x1 - length * math.cos(angle)) - y2 = int(y1 - length * math.sin(angle)) - line_thickness = 2 - cv2.line(image, (x1, y1), (x2, y2), (0, 0, 0), thickness=line_thickness) - self.update_frame('frame_0', image) + self.update_frame('frame_0', image, w) v = np.interp(np.array([v]), (6.5, 24), (0, 1))[0] w = np.interp(np.array([w]), (-7.1, 7.1), (0, 1))[0]