Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added PPMA/RA demo code #2

Merged
merged 10 commits into from Dec 4, 2019
@@ -0,0 +1,145 @@
# Automata Technologies - Andrea Antonello, 2019
import math
import operator
import numpy as np


def _convert_to_float(fl):
""" This method converts ONLY the numeric values of a string into floats """
try:
return float(fl)
except (ValueError, TypeError):
return fl


def _wrap_to_pi(angle):
""" This method wrap the input angle to 360 [deg]
angle : [deg] """
ang2pi = angle - (angle // (2 * np.pi)) * 2 * np.pi
if ang2pi > np.pi or ang2pi < - np.pi:
ang = ang2pi - np.sign(ang2pi) * 2 * np.pi
return ang


def _quaternion_multiply(quat1, quat0):
""" This method performs a standard quaternion multiplication
quat0 : [qR0, qV0], with qR0 being the real part of the quaternion
quat1 : [qR1, qV1], with qR1 being the real part of the quaternion """
w0, x0, y0, z0 = quat0
w1, x1, y1, z1 = quat1
return np.array([-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0], dtype=np.float64)


def _solve_fk(eva, joints):
""" This method solves the forward kinematics problem and extract the results directly as an array
joints : joint angles in [rad]
pos : cartesian position, with respect to robot's origin [m]
orient : orientation quaternion of the end effector """
fk_results = eva.calc_forward_kinematics(joints)
pos_json = fk_results['position']
orient_json = fk_results['orientation']
pos = [pos_json['x'], pos_json['y'], pos_json['z']]
orient = [orient_json['w'], orient_json['x'], orient_json['y'], orient_json['z']]
return pos, orient


def solve_ik_head_down(eva, guess, theta, xyz_absolute):
""" This method solves the inverse kinematics problem for the special case of the end-effector
pointing downwards, perpendicular to the ground.
guess : is the IK guess, a 1x6 array of joint angles in [rad]
theta : angular rotation of axis 6 [deg]
xyz_absolute : cartesian position, with respect to robot's origin [m] """
pos = [xyz_absolute[0], xyz_absolute[1], xyz_absolute[2]] # [m]
pos_json = {'x': (pos[0]), 'y': (pos[1]), 'z': (pos[2])} # [m]
orient_rel = [math.cos(np.deg2rad(theta) / 2), 0, 0, math.sin(np.deg2rad(theta) / 2)]
orient_abs = _quaternion_multiply([0, 0, 1, 0], orient_rel)
orient_json = {'w': (orient_abs[0]), 'x': (orient_abs[1]), 'y': (orient_abs[2]), 'z': (orient_abs[3])}
# Compute IK
result_ik = eva.calc_inverse_kinematics(guess, pos_json, orient_json)
success_ik = result_ik['ik']['result']
joints_ik = result_ik['ik']['joints']
return success_ik, joints_ik


def _read_tcp_ip(sock, objects):
This conversation was marked as resolved by frusciante8989

This comment has been minimized.

Copy link
@LouisBrunner

LouisBrunner Nov 11, 2019

Member

This function could have a more explicit name. Overall, the TCP camera code should be abstracted in a class for ease of use.

This comment has been minimized.

Copy link
@frusciante8989

frusciante8989 Nov 28, 2019

Author

I think it's a bit of a pain to abstract it as it is, since the parsing of the string is very specific to this blog usecase (tipe of decoding, length of the string, structure of the string, i.e.). I'd keep the Vision part as a class instead, since that is more agnostic to the user setup

""" This method reads and decodes the string sent from the camera """
result = sock.recv(4000)
string_read = result.decode('utf-8')
string_split = string_read.split(",")
camera_string_raw = list(string_split)
passed = False
camera_string = ['']
if len(camera_string_raw) is not 0:
if camera_string_raw[0] == 'start' and camera_string_raw[19] == 'end' and len(camera_string_raw) == 20:
camera_string_raw = [_convert_to_float(fl) for fl in camera_string_raw]
passes = [camera_string_raw[6], camera_string_raw[12], camera_string_raw[18]]
scores = [camera_string_raw[5], camera_string_raw[11], camera_string_raw[17]]
passed_score = [passes[0] * scores[0], passes[1] * scores[1], passes[2] * scores[2]]
max_index, max_value = max(enumerate(passed_score), key=operator.itemgetter(1))
select_obj = objects[max_index]
if max_value > 0:
passed = True
# Extract the best matching object from the string
camera_string = _extract_camera_serial(objects, select_obj, camera_string_raw)
return passed, camera_string


def _extract_camera_serial(objects, index, camera_string_raw):
""" This method extracts only the best matching object data from the entire string """
camera_string = ['', 0, 0, 0, 0, 0]
if index not in objects:
print('Wrong object in the list')
elif index is 'C':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[1]
camera_string[2] = camera_string_raw[2]
camera_string[3] = camera_string_raw[3]
camera_string[4] = camera_string_raw[4]
elif index is 'M':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[7]
camera_string[2] = camera_string_raw[8]
camera_string[3] = camera_string_raw[9]
camera_string[4] = camera_string_raw[10]
elif index is 'R':
camera_string[0] = 'start'
camera_string[1] = camera_string_raw[13]
camera_string[2] = camera_string_raw[14]
camera_string[3] = camera_string_raw[15]
camera_string[4] = camera_string_raw[16]
return camera_string


class EvaVision:
This conversation was marked as resolved by frusciante8989

This comment has been minimized.

Copy link
@LouisBrunner

LouisBrunner Nov 11, 2019

Member

Why is this a class and not just a free functions like the others?

""" This class performs the machine vision operations in order to obtain the object position in Eva's frame """
def __init__(self, eva, string, cal_zero, obj_height=0.0, surf_height=0.0, ee_length=0.0):
self.eva = eva
self.string = string
self.cal = cal_zero
self.obj = obj_height
self.surf = surf_height
self.ee = ee_length

def locate_object(self):
print('Pattern identified is: ', self.string[1])
# Relative object position in camera frame:
x_obj_rel_cam = 0.001*self.string[2] # transform X value from [mm] into [m]
y_obj_rel_cam = 0.001*self.string[3] # transform Y value from [mm] into [m]
# Compute relative object position in Eva's frame:
# Need to known Eva's frame rotation wrt to camera frame
# Convention: start from camera frame and rotate of ang [deg] to get to Eva's frame
ang_cam = 180 # [deg]
x_obj_rel = np.cos(np.deg2rad(ang_cam)) * x_obj_rel_cam + np.sin(np.deg2rad(ang_cam)) * y_obj_rel_cam # [m]
y_obj_rel = -np.sin(np.deg2rad(ang_cam)) * x_obj_rel_cam + np.cos(np.deg2rad(ang_cam)) * y_obj_rel_cam # [m]
# Compute absolute object position of calibration board origin in Eva's frame:
pos_cal = self.eva.calc_forward_kinematics(self.cal)['position']
# Compute absolute object position by summing the calibration board origin to the relative object position
x_obj_abs = x_obj_rel + pos_cal['x'] # [m]
y_obj_abs = y_obj_rel + pos_cal['y'] # [m]
# Compute absolute value of Z
z_obj_abs = abs(self.obj) + self.surf + abs(self.ee)
pos_abs = [x_obj_abs, y_obj_abs, z_obj_abs]
return pos_abs
@@ -0,0 +1,89 @@
# Automata Technologies - Andrea Antonello, 2019
import socket
from automata import Eva
from evaUtilities import *

# Connection to robot
host = <user_input>
This conversation was marked as resolved by frusciante8989

This comment has been minimized.

Copy link
@LouisBrunner

LouisBrunner Nov 11, 2019

Member

Maybe extract all the configuration values that are input from the user into another file for ease of use (having waypoint_home in the middle is easy to miss).
A user should also be able to run the example, even with bad settings.

This comment has been minimized.

Copy link
@frusciante8989

frusciante8989 Nov 11, 2019

Author

It makes sense to have the user parameters in another function. However, the idea over having the user to manually input the values is to avoid people crashing their robot into existing hardware when using the "bad settings"

token = <user_input>
eva = Eva(host, token)
eva.lock_status()

# Connection to camera
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server = <user_input>
port = <user_input>
sock.connect((server, port))

# Use-case parameters
objects = ['C', 'M', 'R']
object_heights = {'C': <user_input>, 'M': <user_input>, 'R': <user_input>} # object thicknesses [m]
end_effector_length = <user_input> # length of tool [m]
hover_height = <user_input> # elevation of idle z axis wrt to the object [m]
surface_height = <user_input> # elevation of the pickup surface wrt to the robot [m, signed]
joint_cal_zero = <user_input> # joints @ (0,0) of calibration board
joint_guess = <user_input> # joints guess for pickup/hover position

with eva.lock():
while True:
passed, cam_string = readTcpIp(sock) # String format = ['start', 'pattern', 'x_mm', 'y_mm', 'angle', 'pass']:
This conversation was marked as resolved by frusciante8989

This comment has been minimized.

Copy link
@LouisBrunner

LouisBrunner Nov 11, 2019

Member

There is some case issues between the two files, readTcpIp doesn't exist, but _read_tcp_ip does.
Overall, try to stick to snake_case as this is the standard in Python.

This comment has been minimized.

Copy link
@frusciante8989

frusciante8989 Nov 11, 2019

Author

correct, I'll fix the typo


if passed is True and len(cam_string) == 5 and cam_string[0] is 'start': # Successful string format
evaVision = EvaVision(eva, cam_string, joint_cal_zero, object_heights[cam_string[1]],
surface_height, end_effector_length)
xyz = evaVision.locate_object()
xyz_hover = xyz
xyz_hover[2] = xyz[2] + abs(hover_height) # add hover height to Z

# Compute IK for pickup and hover - special case with head down solution
success_IK_pickup, joints_IK_pickup = solve_ik_head_down(eva, joint_guess, cam_string[4], xyz)
success_IK_hover, joints_IK_hover = solve_ik_head_down(eva, joint_guess, cam_string[4], xyz_hover)

# Verify IK success
if 'success' in success_IK_hover and 'success' in success_IK_pickup:
perform_move = True
message = 'Successful IK'
print(message)
else:
perform_move = False
message = 'Failed IK. Position not reachable'
print(message)

if perform_move is True:
# Create the way-points, generate the tool-path, save it and execute it
waypoint_home = <user_input> # [rad] home position
waypoint_drop = <user_input> # [rad] drop-off position
waypoint_hover = joints_IK_hover # hover position right on top of object: offset in height (z_pickup)
waypoint_pickup = joints_IK_pickup # pickup position on surface of the object

toolpath_machine_vision = {
"metadata": {
"default_velocity": 1,
"next_label_id": 5,
"analog_modes": {"i0": "voltage", "i1": "voltage", "o0": "voltage", "o1": "voltage"}
},
"waypoints": [
{"label_id": 1, "joints": waypoint_home}, # index 0
{"label_id": 2, "joints": waypoint_hover}, # index 1
{"label_id": 3, "joints": waypoint_pickup}, # index 2
{"label_id": 4, "joints": waypoint_drop}, # index 3
],
"timeline": [
{"type": "home", "waypoint_id": 0},
{"type": "trajectory", "trajectory": "joint_space", "waypoint_id": 1},
{"type": "output-set", "io": {"location": "base", "type": "digital", "index": 0},
"value": True},
{"type": "trajectory", "trajectory": "linear", "waypoint_id": 2},
{"type": "wait", "condition": {"type": "time", "duration": 500}},
{"type": "trajectory", "trajectory": "joint_space", "waypoint_id": 1},
{"type": "trajectory", "trajectory": "joint_space", "waypoint_id": 3},
{"type": "output-set", "io": {"location": "base", "type": "digital", "index": 0},
"value": False},
{"type": "trajectory", "trajectory": "joint_space", "waypoint_id": 0},
]
}
eva.control_wait_for_ready()
eva.toolpaths_use(toolpath_machine_vision)
eva.control_run()
else:
print('No object correctly recognized')
ProTip! Use n and p to navigate between commits in a pull request.
You can’t perform that action at this time.