import logging logging.basicConfig(level=logging.INFO) import numpy as np import cv2 #import pyrealsense2 as rs import os import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.append(ROOT_DIR) # config sys.path.append(os.path.join(ROOT_DIR, 'utils')) # utils sys.path.append(os.path.join(ROOT_DIR, 'libs')) # libs from model_pose_ren import ModelPoseREN import util from util import get_center_fast as get_center import numpy as np import cv2 import sys from pylibfreenect2 import Freenect2, SyncMultiFrameListener from pylibfreenect2 import FrameType, Registration, Frame from pylibfreenect2 import createConsoleLogger, setGlobalLogger from pylibfreenect2 import LoggerLevel def show_results(img, results, cropped_image, dataset): img = np.minimum(img, 1500) img = (img - img.min()) / (img.max() - img.min()) img = np.uint8(img*255) # draw cropped image img[:96, :96] = (cropped_image+1)*255/2 img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) cv2.rectangle(img, (0, 0), (96, 96), (255, 0, 0), thickness=2) img_show = util.draw_pose(dataset, img, results) return img_show try: from pylibfreenect2 import OpenGLPacketPipeline pipeline = OpenGLPacketPipeline() except: try: from pylibfreenect2 import OpenCLPacketPipeline pipeline = OpenCLPacketPipeline() except: from pylibfreenect2 import CpuPacketPipeline pipeline = CpuPacketPipeline() print("Packet pipeline:", type(pipeline).__name__) # Create and set logger logger = createConsoleLogger(LoggerLevel.Debug) setGlobalLogger(logger) fn = Freenect2() num_devices = fn.enumerateDevices() if num_devices == 0: print("No device connected!") sys.exit(1) serial = fn.getDeviceSerialNumber(0) device = fn.openDevice(serial, pipeline=pipeline) listener = SyncMultiFrameListener( FrameType.Color | FrameType.Ir | FrameType.Depth) # Register listeners device.setColorFrameListener(listener) device.setIrAndDepthFrameListener(listener) device.start() # NOTE: must be called after device.start() registration = Registration(device.getIrCameraParams(), device.getColorCameraParams()) undistorted = Frame(512, 424, 4) registered = Frame(512, 424, 4) # Optinal parameters for registration # set True if you need need_bigdepth = False need_color_depth_map = False bigdepth = Frame(1920, 1082, 4) if need_bigdepth else None color_depth_map = np.zeros((424, 512), np.int32).ravel() \ if need_color_depth_map else None dataset = 'hands17'#hands17 nyu fx, fy, ux, uy = 369.713, 369.713, 254.652, 205.019 lower_ = 1 upper_ = 4500 hand_model = ModelPoseREN(dataset, lambda img: get_center(img, lower=lower_, upper=upper_), param=(fx, fy, ux, uy), use_gpu=True) while True: frames = listener.waitForNewFrame() color = frames["color"] ir = frames["ir"] depth = frames["depth"] registration.apply(color, depth, undistorted, registered, bigdepth=bigdepth, color_depth_map=color_depth_map) # NOTE for visualization: # cv2.imshow without OpenGL backend seems to be quite slow to draw all # things below. Try commenting out some imshow if you don't have a fast # visualization backend. new_depth=np.zeros([480,640],dtype='float32') #print(depth.asarray().shape ) #print(new_depth.shape) cv2.imshow("ir", ir.asarray() / 65535.) cv2.imshow("depth", depth.asarray() / 4500.) new_depth[0:424,0:512]=depth.asarray() cv2.imshow('new_depth',new_depth) new_depth[new_depth == 0] = new_depth.max() results, cropped_image = hand_model.detect_image(new_depth) img_show = show_results(new_depth, results, cropped_image, dataset) cv2.imshow('result', img_show) listener.release(frames) key = cv2.waitKey(delay=1) if key == ord('q'): break device.stop() device.close() sys.exit(0)