diff --git a/Readme.txt b/Readme.txt index b47bb556..0949348d 100644 --- a/Readme.txt +++ b/Readme.txt @@ -69,21 +69,24 @@ After landmark detection is done clm_model stores the landmark locations and loc Head Pose: - // Head pose is stored in the following format (X, Y, Z, rot_x, roty_y, rot_z), translation is in millimeters with respect to camera and rotation is in radians around X,Y,Z axes with the convention R = Rx * Ry * Rz, left-handed positive sign, the rotation can be either with respect to camera or the camera plane (for visualisation we want rotation with respect to camera plane) + // Head pose is stored in the following format (X, Y, Z, rot_x, roty_y, rot_z) + // translation is in millimeters with respect to camera centre + // Rotation is in radians around X,Y,Z axes with the convention R = Rx * Ry * Rz, left-handed positive sign + // The rotation can be either with respect to camera or world coordinates (for visualisation we want rotation with respect to world coordinates) There are four methods in total that can return the head pose //Getting the head pose w.r.t. camera assuming orthographic projection Vec6d GetPoseCamera(CLM& clm_model, double fx, double fy, double cx, double cy, CLMParameters& params); - //Getting the head pose w.r.t. camera plane assuming orthographic projection - Vec6d GetPoseCameraPlane(CLM& clm_model, double fx, double fy, double cx, double cy, CLMParameters& params); + //Getting the head pose w.r.t. world coordinates assuming orthographic projection + Vec6d GetPoseWorld(CLM& clm_model, double fx, double fy, double cx, double cy, CLMParameters& params); //Getting the head pose w.r.t. camera with a perspective camera correction Vec6d GetCorrectedPoseCamera(CLM& clm_model, double fx, double fy, double cx, double cy, CLMParameters& params); - //Getting the head pose w.r.t. camera plane with a perspective camera correction - Vec6d GetCorrectedPoseCameraPlane(CLM& clm_model, double fx, double fy, double cx, double cy, CLMParameters& params); + //Getting the head pose w.r.t. world coordinates with a perspective camera correction + Vec6d GetCorrectedPoseWorld(CLM& clm_model, double fx, double fy, double cx, double cy, CLMParameters& params); // fx,fy,cx,cy are camera callibration parameters needed to infer the 3D position of the head with respect to camera, a good assumption for webcams providing 640x480 images is 500, 500, img_width/2, img_height/2 @@ -114,7 +117,7 @@ Parameters for output -of3D , the file format is as follows: frame_number, timestamp(seconds), confidence, detection_success, X_1, X_2 ... X_n, Y_1, Y_2, ... Y_n, Z_1, Z_2, ... Z_n -ov - -cp <1/0, should rotation be measured with respect to the camera plane or camera, see Head pose section for more details> + -world_coord <1/0, should rotation be measured with respect to the world coordinates or camera, see Head pose section for more details> Model parameters (apply to images and videos) -mloc @@ -169,7 +172,7 @@ Parameters for output -simalignvid , outputs similarity aligned faces to a video (need HFYU video codec to read it) -simaligndir , same as above but instead of video the aligned faces are put in a directory - -cp <1/0>, should rotation be measured with respect to the camera plane or camera, see Head pose section for more details> + -world_coord <1/0>, should rotation be measured with respect to the camera or world coordinates, see Head pose section for more details> Additional parameters for output diff --git a/exe/FeatureExtraction/FeatureExtraction.cpp b/exe/FeatureExtraction/FeatureExtraction.cpp index a8bbb01f..fed8920c 100644 --- a/exe/FeatureExtraction/FeatureExtraction.cpp +++ b/exe/FeatureExtraction/FeatureExtraction.cpp @@ -414,7 +414,7 @@ void visualise_tracking(Mat& captured_image, const CLMTracker::CLM& clm_model, c // A rough heuristic for box around the face width int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0); - Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseCameraPlane(clm_model, fx, fy, cx, cy); + Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); // Draw it in reddish if uncertain, blueish if certain CLMTracker::DrawBox(captured_image, pose_estimate_to_draw, Scalar((1 - vis_certainty)*255.0, 0, vis_certainty * 255), thickness, fx, fy, cx, cy); @@ -465,9 +465,9 @@ int main (int argc, char **argv) // Get the input output file parameters - // Indicates that rotation should be with respect to camera plane or with respect to camera - bool use_camera_plane_pose; - CLMTracker::get_video_input_output_params(files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files, use_camera_plane_pose, arguments); + // Indicates that rotation should be with respect to camera or world coordinates + bool use_world_coordinates; + CLMTracker::get_video_input_output_params(files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files, use_world_coordinates, arguments); bool video_input = true; bool verbose = true; @@ -915,9 +915,9 @@ int main (int argc, char **argv) // Work out the pose of the head from the tracked model Vec6d pose_estimate_CLM; - if(use_camera_plane_pose) + if(use_world_coordinates) { - pose_estimate_CLM = CLMTracker::GetCorrectedPoseCameraPlane(clm_model, fx, fy, cx, cy); + pose_estimate_CLM = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); } else { diff --git a/exe/MultiTrackCLM/MultiTrackCLM.cpp b/exe/MultiTrackCLM/MultiTrackCLM.cpp index bc612431..b3be86de 100644 --- a/exe/MultiTrackCLM/MultiTrackCLM.cpp +++ b/exe/MultiTrackCLM/MultiTrackCLM.cpp @@ -142,8 +142,8 @@ int main (int argc, char **argv) clm_parameters.push_back(clm_params); // Get the input output file parameters - bool use_camera_plane_pose; - CLMTracker::get_video_input_output_params(files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files, use_camera_plane_pose, arguments); + bool use_world_coords; + CLMTracker::get_video_input_output_params(files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files, use_world_coords, arguments); // Get camera parameters CLMTracker::get_camera_params(device, fx, fy, cx, cy, arguments); @@ -397,7 +397,7 @@ int main (int argc, char **argv) int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0); // Work out the pose of the head from the tracked model - Vec6d pose_estimate_CLM = CLMTracker::GetCorrectedPoseCameraPlane(clm_models[model], fx, fy, cx, cy); + Vec6d pose_estimate_CLM = CLMTracker::GetCorrectedPoseWorld(clm_models[model], fx, fy, cx, cy); // Draw it in reddish if uncertain, blueish if certain CLMTracker::DrawBox(disp_image, pose_estimate_CLM, Scalar((1-detection_certainty)*255.0,0, detection_certainty*255), thickness, fx, fy, cx, cy); diff --git a/exe/SimpleCLM/SimpleCLM.cpp b/exe/SimpleCLM/SimpleCLM.cpp index ab84579d..0b572517 100644 --- a/exe/SimpleCLM/SimpleCLM.cpp +++ b/exe/SimpleCLM/SimpleCLM.cpp @@ -120,7 +120,7 @@ void visualise_tracking(Mat& captured_image, Mat_& depth_image, const CLM // A rough heuristic for box around the face width int thickness = (int)std::ceil(2.0* ((double)captured_image.cols) / 640.0); - Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseCameraPlane(clm_model, fx, fy, cx, cy); + Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); // Draw it in reddish if uncertain, blueish if certain CLMTracker::DrawBox(captured_image, pose_estimate_to_draw, Scalar((1 - vis_certainty)*255.0, 0, vis_certainty * 255), thickness, fx, fy, cx, cy); @@ -173,9 +173,9 @@ int main (int argc, char **argv) // Get the input output file parameters - // Indicates that rotation should be with respect to camera plane or with respect to camera - bool use_camera_plane_pose; - CLMTracker::get_video_input_output_params(files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files, use_camera_plane_pose, arguments); + // Indicates that rotation should be with respect to world or camera coordinates + bool use_world_coordinates; + CLMTracker::get_video_input_output_params(files, depth_directories, pose_output_files, tracked_videos_output, landmark_output_files, landmark_3D_output_files, use_world_coordinates, arguments); // The modules that are being used for tracking CLMTracker::CLM clm_model(clm_parameters.model_location); @@ -381,9 +381,9 @@ int main (int argc, char **argv) // Work out the pose of the head from the tracked model Vec6d pose_estimate_CLM; - if(use_camera_plane_pose) + if(use_world_coordinates) { - pose_estimate_CLM = CLMTracker::GetCorrectedPoseCameraPlane(clm_model, fx, fy, cx, cy); + pose_estimate_CLM = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); } else { diff --git a/exe/SimpleCLMImg/SimpleCLMImg.cpp b/exe/SimpleCLMImg/SimpleCLMImg.cpp index f9d837bf..9bc0cfd1 100644 --- a/exe/SimpleCLMImg/SimpleCLMImg.cpp +++ b/exe/SimpleCLMImg/SimpleCLMImg.cpp @@ -310,7 +310,7 @@ int main (int argc, char **argv) // Loading image Mat read_image = imread(file, -1); - + // Loading depth file if exists (optional) Mat_ depth_image; @@ -342,6 +342,7 @@ int main (int argc, char **argv) fy = fx; } + // if no pose defined we just use a face detector if(bounding_boxes.empty()) { @@ -368,7 +369,7 @@ int main (int argc, char **argv) bool success = CLMTracker::DetectLandmarksInImage(grayscale_image, depth_image, face_detections[face], clm_model, clm_parameters); // Estimate head pose and eye gaze - Vec6d headPose = CLMTracker::GetPoseCamera(clm_model, fx, fy, cx, cy); + Vec6d headPose = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); // Gaze tracking, absolute gaze direction Point3f gazeDirection0(0, 0, -1); @@ -425,7 +426,7 @@ int main (int argc, char **argv) if (clm_parameters.track_gaze) { - Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseCameraPlane(clm_model, fx, fy, cx, cy); + Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); // Draw it in reddish if uncertain, blueish if certain CLMTracker::DrawBox(read_image, pose_estimate_to_draw, Scalar(255.0, 0, 0), 3, fx, fy, cx, cy); @@ -480,7 +481,7 @@ int main (int argc, char **argv) CLMTracker::DetectLandmarksInImage(grayscale_image, bounding_boxes[i], clm_model, clm_parameters); // Estimate head pose and eye gaze - Vec6d headPose = CLMTracker::GetPoseCamera(clm_model, fx, fy, cx, cy); + Vec6d headPose = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); // Gaze tracking, absolute gaze direction Point3f gazeDirection0(0, 0, -1); @@ -515,7 +516,7 @@ int main (int argc, char **argv) if (clm_parameters.track_gaze) { - Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseCameraPlane(clm_model, fx, fy, cx, cy); + Vec6d pose_estimate_to_draw = CLMTracker::GetCorrectedPoseWorld(clm_model, fx, fy, cx, cy); // Draw it in reddish if uncertain, blueish if certain CLMTracker::DrawBox(read_image, pose_estimate_to_draw, Scalar(255.0, 0, 0), 3, fx, fy, cx, cy); diff --git a/exe/SimpleCLMImg/SimpleCLMImg_vs2013.vcxproj b/exe/SimpleCLMImg/SimpleCLMImg_vs2013.vcxproj index 4b319aa0..bb1cb391 100644 --- a/exe/SimpleCLMImg/SimpleCLMImg_vs2013.vcxproj +++ b/exe/SimpleCLMImg/SimpleCLMImg_vs2013.vcxproj @@ -81,7 +81,7 @@ true true WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) - $(SolutionDir)\lib\local\CLM\include;%(AdditionalIncludeDirectories) + $(SolutionDir)\lib\local\CLM\include;$(SolutionDir)\lib\local\FaceAnalyser\include;%(AdditionalIncludeDirectories) false StreamingSIMDExtensions2 diff --git a/lib/local/CLM/include/CLMTracker.h b/lib/local/CLM/include/CLMTracker.h index 0bb3ebb6..e4ba7e6f 100644 --- a/lib/local/CLM/include/CLMTracker.h +++ b/lib/local/CLM/include/CLMTracker.h @@ -91,17 +91,18 @@ namespace CLMTracker //================================================================ // Helper function for getting head pose from CLM parameters - // The head pose returned is in camera space, however, the orientation can be either with respect to camera itself or the camera plane + // Return the current estimate of the head pose, this can be either in camera or world coordinate space // The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z] Vec6d GetPoseCamera(const CLM& clm_model, double fx, double fy, double cx, double cy); - Vec6d GetPoseCameraPlane(const CLM& clm_model, double fx, double fy, double cx, double cy); + Vec6d GetPoseWorld(const CLM& clm_model, double fx, double fy, double cx, double cy); - // Getting a head pose estimate from the currently detected landmarks, with appropriate correction due to orthographic camera issue + // Getting a head pose estimate from the currently detected landmarks, with appropriate correction for perspective // This is because rotation estimate under orthographic assumption is only correct close to the centre of the image - // These methods attempt to correct for that (Experimental) + // These methods attempt to correct for that + // The pose returned can be either in camera or world coordinates // The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z] Vec6d GetCorrectedPoseCamera(const CLM& clm_model, double fx, double fy, double cx, double cy); - Vec6d GetCorrectedPoseCameraPlane(const CLM& clm_model, double fx, double fy, double cx, double cy); + Vec6d GetCorrectedPoseWorld(const CLM& clm_model, double fx, double fy, double cx, double cy); //=========================================================================== diff --git a/lib/local/CLM/include/CLM_utils.h b/lib/local/CLM/include/CLM_utils.h index b7afc0c7..64b78a3e 100644 --- a/lib/local/CLM/include/CLM_utils.h +++ b/lib/local/CLM/include/CLM_utils.h @@ -70,7 +70,7 @@ namespace CLMTracker // Helper functions for parsing the inputs //============================================================================================= void get_video_input_output_params(vector &input_video_file, vector &depth_dir, - vector &output_pose_file, vector &output_video_file, vector &output_landmark_files, vector &output_3D_landmark_files, bool& camera_plane_pose, vector &arguments); + vector &output_pose_file, vector &output_video_file, vector &output_landmark_files, vector &output_3D_landmark_files, bool& world_coordinates_pose, vector &arguments); void get_camera_params(int &device, float &fx, float &fy, float &cx, float &cy, vector &arguments); diff --git a/lib/local/CLM/src/CLMTracker.cpp b/lib/local/CLM/src/CLMTracker.cpp index 6732e6e9..434f98de 100644 --- a/lib/local/CLM/src/CLMTracker.cpp +++ b/lib/local/CLM/src/CLMTracker.cpp @@ -53,7 +53,7 @@ using namespace CLMTracker; using namespace cv; -// Getting a head pose estimate from the currently detected landmarks (rotation with respect to camera) +// Getting a head pose estimate from the currently detected landmarks (rotation with respect to point camera) // The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z] Vec6d CLMTracker::GetPoseCamera(const CLM& clm_model, double fx, double fy, double cx, double cy) { @@ -72,11 +72,11 @@ Vec6d CLMTracker::GetPoseCamera(const CLM& clm_model, double fx, double fy, doub } } -// Getting a head pose estimate from the currently detected landmarks (rotation with respect to camera plane) +// Getting a head pose estimate from the currently detected landmarks (rotation in world coordinates) // The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z] -Vec6d CLMTracker::GetPoseCameraPlane(const CLM& clm_model, double fx, double fy, double cx, double cy) +Vec6d CLMTracker::GetPoseWorld(const CLM& clm_model, double fx, double fy, double cx, double cy) { - if(!clm_model.detected_landmarks.empty() && clm_model.params_global[0] != 0 && clm_model.tracking_initialised) + if(!clm_model.detected_landmarks.empty() && clm_model.params_global[0] != 0) { double Z = fx / clm_model.params_global[0]; @@ -107,11 +107,11 @@ Vec6d CLMTracker::GetPoseCameraPlane(const CLM& clm_model, double fx, double fy, // Getting a head pose estimate from the currently detected landmarks, with appropriate correction due to orthographic camera issue // This is because rotation estimate under orthographic assumption is only correct close to the centre of the image -// This method returns a corrected pose estimate with respect to the camera plane (Experimental) +// This method returns a corrected pose estimate with respect to world coordinates (Experimental) // The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z] -Vec6d CLMTracker::GetCorrectedPoseCameraPlane(const CLM& clm_model, double fx, double fy, double cx, double cy) +Vec6d CLMTracker::GetCorrectedPoseWorld(const CLM& clm_model, double fx, double fy, double cx, double cy) { - if(!clm_model.detected_landmarks.empty() && clm_model.params_global[0] != 0 && clm_model.tracking_initialised) + if(!clm_model.detected_landmarks.empty() && clm_model.params_global[0] != 0) { // This is used as an initial estimate for the iterative PnP algorithm double Z = fx / clm_model.params_global[0]; @@ -152,9 +152,8 @@ Vec6d CLMTracker::GetCorrectedPoseCameraPlane(const CLM& clm_model, double fx, d } } -// Getting a head pose estimate from the currently detected landmarks, with appropriate correction due to orthographic camera issue -// This is because rotation estimate under orthographic assumption is only correct close to the centre of the image -// This method returns a corrected pose estimate with respect to a point camera (NOTE not the camera plane) (Experimental) +// Getting a head pose estimate from the currently detected landmarks, with appropriate correction due to perspective projection +// This method returns a corrected pose estimate with respect to a point camera (NOTE not the world coordinates) (Experimental) // The format returned is [Tx, Ty, Tz, Eul_x, Eul_y, Eul_z] Vec6d CLMTracker::GetCorrectedPoseCamera(const CLM& clm_model, double fx, double fy, double cx, double cy) { diff --git a/lib/local/CLM/src/CLM_utils.cpp b/lib/local/CLM/src/CLM_utils.cpp index 3c7467cf..409f6525 100644 --- a/lib/local/CLM/src/CLM_utils.cpp +++ b/lib/local/CLM/src/CLM_utils.cpp @@ -98,7 +98,7 @@ void create_directories(string output_path) // Extracting the following command line arguments -f, -fd, -op, -of, -ov (and possible ordered repetitions) void get_video_input_output_params(vector &input_video_files, vector &depth_dirs, - vector &output_pose_files, vector &output_video_files, vector &output_2d_landmark_files, vector &output_3D_landmark_files, bool& camera_plane_pose, vector &arguments) + vector &output_pose_files, vector &output_video_files, vector &output_2d_landmark_files, vector &output_3D_landmark_files, bool& world_coordinates, vector &arguments) { bool* valid = new bool[arguments.size()]; @@ -107,8 +107,8 @@ void get_video_input_output_params(vector &input_video_files, vector &input_video_files, vector 0.6); +cd('../'); + +%% Gaze +cd('Gaze Experiments'); +extract_mpii_gaze_test +assert(median_error < 9.5) +cd('../'); + +%% Demos +cd('Demos'); +run_demo_images; +run_demo_videos; +run_demo_video_multi; +feature_extraction_demo_vid; +feature_extraction_demo_img_seq; +gaze_extraction_demo_vid; +cd('../'); +toc \ No newline at end of file diff --git a/matlab_runners/Gaze Experiments/extract_mpii_gaze_test.m b/matlab_runners/Gaze Experiments/extract_mpii_gaze_test.m index bc1ad9fe..d6a9f31f 100644 --- a/matlab_runners/Gaze Experiments/extract_mpii_gaze_test.m +++ b/matlab_runners/Gaze Experiments/extract_mpii_gaze_test.m @@ -92,4 +92,4 @@ f = fopen('mpii_1500_errs.txt', 'w'); fprintf(f, 'Mean error, median error\n'); fprintf(f, '%.3f, %.3f\n', mean_error, median_error); -fclose(f); +fclose(f); \ No newline at end of file diff --git a/matlab_runners/Gaze Experiments/mpii_1500_errs.mat b/matlab_runners/Gaze Experiments/mpii_1500_errs.mat index ae5789b5..3b7eb023 100644 Binary files a/matlab_runners/Gaze Experiments/mpii_1500_errs.mat and b/matlab_runners/Gaze Experiments/mpii_1500_errs.mat differ diff --git a/matlab_runners/Gaze Experiments/mpii_1500_errs.txt b/matlab_runners/Gaze Experiments/mpii_1500_errs.txt index b9fa6ae4..f9f6fa73 100644 --- a/matlab_runners/Gaze Experiments/mpii_1500_errs.txt +++ b/matlab_runners/Gaze Experiments/mpii_1500_errs.txt @@ -1,2 +1,2 @@ Mean error, median error -9.952, 9.325 +9.955, 9.334 diff --git a/matlab_runners/Gaze Experiments/run_mpii_gaze_test.m b/matlab_runners/Gaze Experiments/run_mpii_gaze_test.m deleted file mode 100644 index ed73a16f..00000000 --- a/matlab_runners/Gaze Experiments/run_mpii_gaze_test.m +++ /dev/null @@ -1,110 +0,0 @@ -clear - -curr_dir = cd('.'); - -% Replace this with your downloaded 300-W train data -if(exist([getenv('USERPROFILE') '/Dropbox/AAM/eye_clm/mpii_data/'], 'file')) - database_root = [getenv('USERPROFILE') '/Dropbox/AAM/eye_clm/mpii_data/']; -else - fprintf('MPII gaze dataset not found\n'); -end -output_loc = './gaze_estimates_MPII/'; -if(~exist(output_loc, 'dir')) - mkdir(output_loc); -end - -%% Perform actual gaze predictions -command = sprintf('"../../Release/FeatureExtraction.exe" -fx 1028 -fy 1028 '); -p_dirs = dir([database_root, 'p*']); - -parfor p=1:numel(p_dirs) - tic - - all_files = dir([database_root, p_dirs(p).name, '/*.jpg']); - - batch = 25; - - for i=1:batch:numel(all_files) - - command_c = command; - for k=i:i+batch - if(k > numel(all_files)) - break; - end - file = [database_root, p_dirs(p).name, '/', all_files(k).name]; - - input_loc = ['-f "', file, '" ']; - command_c = cat(2, command_c, input_loc); - out_file = [' -ogaze "', output_loc, '/', all_files(k).name '.gaze.txt" ']; - command_c = cat(2, command_c, out_file); - - end - - command_c = cat(2, command_c, ' -clmwild '); - dos(command_c); - - end - toc - -end -%% - -% Extract the results -predictions_l = zeros(750, 3); -predictions_r = zeros(750, 3); -gt_l = zeros(750, 3); -gt_r = zeros(750, 3); - -angle_err_l = zeros(750,1); -angle_err_r = zeros(750,1); - -p_dirs = dir([database_root, 'p*']); -curr = 1; -for p=1:numel(p_dirs) - load([database_root, p_dirs(p).name, '/Data.mat']); - - for i=1:size(filenames, 1) - - fname = sprintf('%s/%d_%d_%d_%d_%d_%d_%d.jpg.gaze.txt', output_loc,... - filenames(i,1), filenames(i,2), filenames(i,3), filenames(i,4),... - filenames(i,5), filenames(i,6), filenames(i,7)); - try - A = dlmread(fname, ',', 'E2..J2'); - valid = dlmread(fname, ',', 'D2..D2'); - catch - A = zeros(1,6); - A(1,3) = -1; - A(1,6) = -1; - end - - head_rot = headpose(i,1:3); - - predictions_r(curr,:) = A(1:3); - predictions_l(curr,:) = A(4:6); - - if(~valid) - predictions_r(curr,:) = [0,0,-1]; - predictions_l(curr,:) = [0,0,-1]; - end - - gt_r(curr,:) = data.right.gaze(i,:)'; - gt_r(curr,:) = gt_r(curr,:) / norm(gt_r(curr,:)); - gt_l(curr,:) = data.left.gaze(i,:)'; - gt_l(curr,:) = gt_l(curr,:) / norm(gt_l(curr,:)); - - angle_err_l(curr) = acos(predictions_l(curr,:) * gt_l(curr,:)') * 180/pi; - angle_err_r(curr) = acos(predictions_r(curr,:) * gt_r(curr,:)') * 180/pi; - - curr = curr + 1; - end - -end -all_errors = cat(1, angle_err_l, angle_err_r); -mean_error = mean(all_errors); -median_error = median(all_errors); -save('mpii_1500_errs.mat', 'all_errors', 'mean_error', 'median_error'); - -f = fopen('mpii_1500_errs.txt', 'w'); -fprintf(f, 'Mean error, median error\n'); -fprintf(f, '%.3f, %.3f\n', mean_error, median_error); -fclose(f); diff --git a/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.mat b/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.mat index b5a585f5..3516c53b 100644 Binary files a/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.mat and b/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.mat differ diff --git a/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.txt b/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.txt index 6392813d..46d21827 100644 --- a/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.txt +++ b/matlab_runners/Head Pose Experiments/results/Pose_clm_ccnf_v1.txt @@ -1,4 +1,4 @@ Dataset and model, pitch, yaw, roll, mean, median biwi error ccnf general: 7.976, 5.625, 4.452, 6.018, 2.624 -bu error ccnf general: 2.758, 3.329, 2.250, 2.779, 1.963 +bu error ccnf general: 2.762, 4.103, 2.568, 3.145, 2.117 ict error ccnf general: 3.620, 3.609, 3.625, 3.618, 2.028 diff --git a/matlab_runners/Head Pose Experiments/results/Pose_clm_svr_v1.mat b/matlab_runners/Head Pose Experiments/results/Pose_clm_svr_v1.mat index 44be4db4..f72446dd 100644 Binary files a/matlab_runners/Head Pose Experiments/results/Pose_clm_svr_v1.mat and b/matlab_runners/Head Pose Experiments/results/Pose_clm_svr_v1.mat differ