import json import sys import copy import torch import math import numpy as np import cv2 import imageio import imagesize from sklearn.model_selection import train_test_split sample_list = "UnityEyes/List.txt" def rotate(origin, point, angle): angle = -angle ox, oy = origin px, py = point qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy) qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy) return qx, qy def angle(p1, p2): p1 = np.array(p1) p2 = np.array(p2) a = np.arctan2(*(p2 - p1)[::-1]) return (a % (2 * np.pi)) def compensate(p1, p2): a = angle(p1, p2) return rotate(p1, p2, a), a def rotate_image(image, angle, center = None, scale = 1.0): (h, w) = image.shape[:2] angle = np.rad2deg(angle) if center is None: center = (w / 2, h / 2) M = cv2.getRotationMatrix2D((center[0], center[1]), angle, scale) rotated = cv2.warpAffine(image, M, (w, h)) return rotated def clamp_to_size(pt, w, h): x = pt[0] y = pt[1] if x < 0: x = 0 if y < 0: y = 0 if x >= w: x = w-1 if y >= h: y = h-1 return (int(x), int(y)) def corner_to_bbox(corners, w, h): ((cx1, cy1), (cx2, cy2)) = corners c1 = np.array([cx1, cy1]) c2 = np.array([cx2, cy2]) c2, angle = compensate(c1, c2) center = (c1 + c2) / 2.0 radius = 1.2 * np.linalg.norm(c1 - c2) / 2.0 upper_left = clamp_to_size(center - np.array([radius, radius]), w, h) lower_right = clamp_to_size(center + np.array([radius, radius]), w, h) return upper_left, lower_right, center, np.array([radius, radius]), c1, angle def json_postproc(arr, h): arr = [eval(s) for s in arr] return np.array([(x, h-y, z) for (x,y,z) in arr]) def read(): global sample_list dataset = [] lines = [] with open(sample_list) as fh: lines = fh.readlines() n = 0 for line in lines: n += 1 basename = line.strip() json_str = None with open(basename + ".json") as fh: json_str = fh.read() if json_str is None: continue data = json.loads(json_str) w, h = imagesize.get(basename + ".jpg") data["caruncle_2d"] = json_postproc(data["caruncle_2d"], h) data["interior_margin_2d"] = json_postproc(data["interior_margin_2d"], h) data["iris_2d"] = json_postproc(data["iris_2d"], h) inner_pt = data["caruncle_2d"][-1, 0:2] outer_pt = data["interior_margin_2d"][8, 0:2] pupil = np.mean(data["iris_2d"], axis=0)[0:2] eye = {} eye["Filename"] = f"UnityEyes/img/eye_{n:05d}.png" eye["UpperLeft"], eye["LowerRight"], eye["Center"], eye["Radius"], eye["Reference"], eye["Angle"] = corner_to_bbox(((inner_pt[0], inner_pt[1]), (outer_pt[0], outer_pt[1])), w, h) eye["Pupil"] = rotate(eye["Reference"], tuple(pupil), eye["Angle"]) im = imageio.imread(basename + ".jpg", pilmode="RGB")[:,:,::-1] im = rotate_image(im, eye["Angle"], eye["Reference"]) im = cv2.flip(im, 1) eye["_UpperLeft"] = np.array((w - eye["LowerRight"][0], eye["UpperLeft"][1])) eye["_LowerRight"] = np.array((w - eye["UpperLeft"][0], eye["LowerRight"][1])) eye["UpperLeft"] = eye["_UpperLeft"] eye["LowerRight"] = eye["_LowerRight"] eye["Pupil"] = np.array((w - eye["Pupil"][0], eye["Pupil"][1])) eye["LowerRight"] = np.array(eye["LowerRight"]) + 1 #print(f'im = im[{eye["UpperLeft"][0]}:{eye["LowerRight"][0]}, {eye["UpperLeft"][1]}:{eye["LowerRight"][1]}, :]') im = im[eye["UpperLeft"][1]:eye["LowerRight"][1], eye["UpperLeft"][0]:eye["LowerRight"][0], :] eye["LowerRight"] = np.array(eye["LowerRight"]) - eye["UpperLeft"] eye["Pupil"] = np.array(eye["Pupil"]) - eye["UpperLeft"] eye["UpperLeft"] = np.array(eye["UpperLeft"]) - np.array(eye["UpperLeft"]) eye["Filename"] = f"UnityEyes/img/eye_{n:05d}.png" cv2.imwrite(eye["Filename"], im) dataset.append(eye) return dataset train, test = train_test_split(read(), test_size=0.2, random_state=42) print("train: " + str(len(train))) print("test: " + str(len(test))) torch.save({"train": train, "validation": test}, "UnityEyes/dataset.pth")