From 97c1afe143bdfd21e6e165b82295f91ee90f1434 Mon Sep 17 00:00:00 2001 From: Mario Kleiner Date: Mon, 7 Sep 2015 07:26:09 +0200 Subject: [PATCH] VR support: Add OculusVRToolbox and associated basic enablement. Psychtoolbox/PsychHardware/OculusVRToolbox/ contains M-Files with functions for use of Oculus VR hardware. Currently the basic high-level support driver PsychOculusVR() which itself uses the PsychOculusVRCore mex file. A new pair of vertex- and fragment shaders OculusRiftCorrectionShader.frag.txt and OculusRiftCorrectionShader.vert.txt provides plugins for Screens imaging pipeline to do the necessary client distortion rendering to correct for the Rifts lens distortion, chromatic aberration and vignetting. PsychImaging supports a new task 'UseOculusVRHMD' to setup an onscreen window for display on the Oculus Rift. It does necessary stereo setup, panel fitter setup, imaging pipeline setup, and then calls into helper functions in PsychOculusVR.m to do the work of setting up shaders, display lists etc. for configuring the post-processing in PTB's imaging pipeline. OculusVRDemo.m is the start of a very hacky demo, used for testing and debugging during development. -> All of these will change substantially. This is just a backup commit to prevent loss of the current working state in case of mistakes or disasters. -> Currently we can display a basic properly undistorted 2D stereo view on the Rift DK2 under Linux. However proper scaling is not yet worked out and highly hacky. This needs work. Actual 3D rendering is missing. --- Psychtoolbox/PsychDemos/OculusVRDemo.m | 53 +++ .../PsychGLImageProcessing/PsychImaging.m | 105 +++++- .../OculusVRToolbox/PsychOculusVR.m | 334 ++++++++++++++++++ .../OculusRiftCorrectionShader.frag.txt | 16 + .../OculusRiftCorrectionShader.vert.txt | 50 +++ 5 files changed, 557 insertions(+), 1 deletion(-) create mode 100644 Psychtoolbox/PsychDemos/OculusVRDemo.m create mode 100644 Psychtoolbox/PsychHardware/OculusVRToolbox/PsychOculusVR.m create mode 100644 Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.frag.txt create mode 100644 Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.vert.txt diff --git a/Psychtoolbox/PsychDemos/OculusVRDemo.m b/Psychtoolbox/PsychDemos/OculusVRDemo.m new file mode 100644 index 0000000000..358ae638b4 --- /dev/null +++ b/Psychtoolbox/PsychDemos/OculusVRDemo.m @@ -0,0 +1,53 @@ +function OculusVRDemo +% OculusVRDemo - A very basic demo for the Oculus VR Rift DK2 + +% History: +% 05-Sep-2015 mk Written. + +% For debugging only: +%PsychDebugWindowConfiguration; + +% Setup unified keymapping and unit color range: +PsychDefaultSetup(2); + +% Select screen with highest id as Oculus output display: +screenid = max(Screen('Screens')); + +% Check if at least one Oculus HMD is connected and available: +if PsychOculusVR('GetCount') > 0 + % Yes. Open and initialize connection to first detected HMD: + fprintf('Opening the first connected Oculus VR headset.\n'); + oculus = PsychOculusVR('Open', 0); +else + % No. Open an emulated/simulated HMD for basic testing and debugging: + fprintf('No HMD detected. Opening a simulated HMD.\n'); + oculus = PsychOculusVR('Open', -1); +end + +% Setup default rendering parameters: +PsychOculusVR('SetupRenderingParameters', oculus); + +% Open our fullscreen onscreen window with black background clear color: +PsychImaging('PrepareConfiguration'); +PsychImaging('AddTask', 'General', 'UseOculusVRHMD', oculus); +[win, rect] = PsychImaging('OpenWindow', screenid, 0, [1680, 0, 1680+1080, 1920]) + +for eye = 0:1 + Screen('SelectStereoDrawBuffer', win, eye); + Screen('FillRect', win, [0 0 1]); + Screen('FrameRect', win, [1 1 0], [], 20); + Screen('FillOval', win, [1 1 1]); + Screen('TextSize', win, 200); + DrawFormattedText(win, sprintf('HELLO\nWORLD!\n%i', eye), 'center', 'center', [0 1 0]); + %Screen('FillOval', win, [0 1 0 1]); +end + +Screen('Flip', win); + +KbStrokeWait; +sca; + +% Close HMD, shutdown OculusVR driver: +PsychOculusVR('Close'); + +end diff --git a/Psychtoolbox/PsychGLImageProcessing/PsychImaging.m b/Psychtoolbox/PsychGLImageProcessing/PsychImaging.m index 058ad37cf2..c092fb466b 100644 --- a/Psychtoolbox/PsychGLImageProcessing/PsychImaging.m +++ b/Psychtoolbox/PsychGLImageProcessing/PsychImaging.m @@ -1191,6 +1191,27 @@ % RemapMouse() to perform the neccessary coordinate transformation. % % +% * 'UseOculusVRHMD' Display this onscreen window on a Oculus VR Head mounted +% display (HMD), e.g., the Rift DK1 or Rift DK2. This enables display of +% stereoscopic visual stimuli on Virtual reality headsets from Oculus VR. +% You need to have the Oculus VR runtime installed on your machine for this +% to work. +% +% Usage: +% +% 1. Open a connection to a Oculus HMD and get a handle for the device: +% hmd = PsychOculusVR('Open' ...); +% +% 2. Perform basic configuration of the HMD via PsychOculusVR. +% +% 3. Add a PsychImaging task for the HMD and pass in its device handle 'hmd': +% PsychImaging('AddTask', 'General', 'UseOculusVRHMD', hmd); +% +% This sequence will perform the necessary setup of panel fitter, stereo display +% mode and image post-processing for geometry correction, color aberration +% correction and vignette correction for a fullscreen window on the HMD. +% +% % * More actions will be supported in the future. If you can think of an % action of common interest not yet supported by this framework, please % file a feature request on our Wiki (Mainpage -> Feature Requests). @@ -1326,6 +1347,8 @@ % 17.09.2014 Add 'Native16BitFramebuffer' support for Linux + FOSS + AMD. (MK) % 03.11.2014 Make panelfitter compatible with Retina displays. (MK) % 04.11.2014 Add new task 'UseRetinaResolution' for Retina displays. (MK) +% 06.09.2015 Add basic support for "Client distortion rendering" on the Oculus VR +% Rift DK1/DK2 virtual reality headsets. (MK) persistent configphase_active; persistent reqs; @@ -1337,10 +1360,12 @@ % These flags are global - needed in subfunctions as well (ugly ugly coding): global ptb_outputformatter_icmAware; global isASideBySideConfig; +global maxreqarg; if isempty(configphase_active) configphase_active = 0; ptb_outputformatter_icmAware = 0; + maxreqarg = 10; end if nargin < 1 || isempty(cmd) @@ -1399,7 +1424,6 @@ % extend each requirement vector to some number of max elements, so all % rows in the cell array have the same length: x = varargin; - maxreqarg = 10; if length(x) < maxreqarg for i=length(x)+1:maxreqarg x{i}=''; @@ -2275,6 +2299,7 @@ global ptb_outputformatter_icmAware; global psych_gpgpuapi; global isASideBySideConfig; +global maxreqarg; % Reset flag to "no": isASideBySideConfig = 0; @@ -2618,6 +2643,56 @@ imagingMode = mor(imagingMode, kPsychNeedOtherStreamInput); end +% Want to use a Oculus VR Head mounted display (HMD), e.g., Rift DK1/DK2? +floc = find(mystrcmp(reqs, 'UseOculusVRHMD')); +if ~isempty(floc) + % Yes: We need a peculiar configuration, which involves the panelfitter + % to allow for a custom resolution of the virtual framebuffers for left + % eye and right eye - much higher than output resolution, so we have enough + % excess information to deal with geometric undistortion warps, color aberration, + % and dynamic display warping for head motion correction. We also need a + % special stereo processing shader that does geometric distortion correction, + % color aberration correction, vignetting correction, and dynamic display warping + % in one go, as processing speed is crucial for VR experience. + [rows cols] = ind2sub(size(reqs), floc(1)); + row = rows(1); + + % Extract first parameter - This should be the handle of the Oculus VR device: + oculusHandle = reqs{row, 3}; + + % Verify it is already open: + if ~PsychOculusVR('IsOpen', oculusHandle) + error('UseOculusVRHMD: Invalid Oculus HMD handle specified. No such device opened.'); + end + + % We must use stereomode 6, so we get separate draw buffers for left and + % right eye, and the stereo compositor (merger) to fuse both eyes into a + % single output framebuffer, but with all internal buffers at at least + % full output framebuffer resolution. This will generate anaglyph shaders + % which we will need to replace with a very special shader for the Oculus HMD: + stereoMode = 6; + + % We need fast backing store support for virtual framebuffers: + imagingMode = mor(imagingMode, kPsychNeedFastBackingStore); + + % Append our generated 'UsePanelFitter' task to setup the panelfitter for + % our needs at 'OpenWindow' time: + clientRes = PsychOculusVR('GetClientRenderbufferSize', oculusHandle); + x{1} = 'General'; + x{2} = 'UsePanelFitter'; + x{3} = clientRes; + x{4} = 'Full'; + + % Pad to maxreqarg arguments: + if length(x) < maxreqarg + for i=length(x)+1:maxreqarg + x{i}=''; + end + end + + reqs = [reqs ; x]; +end + % Display replication needed? if ~isempty(find(mystrcmp(reqs, 'MirrorDisplayTo2ndOutputHead'))) % Yes: Must use dual window output mode. This implies @@ -4107,6 +4182,34 @@ end % --- End of setup for stereo crosstalk reduction --- +% --- Custom processing setup for the stereo compositor --- + +% --- Oculus VR Headset support (e.g., Rift DK1/DK2 etc.) +floc = find(mystrcmp(reqs, 'UseOculusVRHMD')); +if ~isempty(floc) + [row col] = ind2sub(size(reqs), floc); + + % Extract first parameter - This should be the handle of the Oculus VR device: + oculusHandle = reqs{row, 3}; + + % Verify it is already open: + if ~PsychOculusVR('IsOpen', oculusHandle) + sca; + error('In UseOculusVRHMD: Invalid Oculus HMD handle specified. No such device opened.'); + end + + % Ok, perform setup after onscreen window is open, e.g., setting up the special + % shaders for the stereo compositor: + if ~PsychOculusVR('PerformPostWindowOpenSetup', oculusHandle, win) + sca; + error('In UseOculusVRHMD: Failed to setup image post-processing for the Oculus VR HMD.'); + end + + % Ready to rock the HMD! +end +% --- End of Oculus VR Headset support code. + + % --- FROM HERE ON ONLY OUTPUT FORMATTERS, NOTHING ELSE!!! --- % diff --git a/Psychtoolbox/PsychHardware/OculusVRToolbox/PsychOculusVR.m b/Psychtoolbox/PsychHardware/OculusVRToolbox/PsychOculusVR.m new file mode 100644 index 0000000000..85404ea71c --- /dev/null +++ b/Psychtoolbox/PsychHardware/OculusVRToolbox/PsychOculusVR.m @@ -0,0 +1,334 @@ +function varargout = PsychOculusVR(cmd, varargin) + +% Global GL handle for access to OpenGL constants needed in setup: +global GL; + +persistent hmd; + +if nargin < 1 || isempty(cmd) + help PsychOculusVR; + fprintf('\n\nAlso available are functions from PsychOculusVRCore:\n'); + PsychOculusVRCore; + return; +end + +% Open a HMD: +if strcmpi(cmd, 'Open') + handle = PsychOculusVRCore('Open', varargin{:}); + hmd{handle}.open = 1; + varargout{1} = handle; + return; +end + +if strcmpi(cmd, 'IsOpen') + handle = varargin{1}; + if (length(hmd) >= handle) && (handle > 0) && hmd{handle}.open + varargout{1} = 1; + else + varargout{1} = 0; + end + + return; +end + +if strcmpi(cmd, 'SetupRenderingParameters') + handle = varargin{1}; + + % Query parameters for left eye view: + [hmd{handle}.rbwidth, hmd{handle}.rbheight, vx, vy, vw, vh, ptx, pty, hsx, hsy, hsz, meshVL, meshIL, uvScale(1), uvScale(2), uvOffset(1), uvOffset(2), eyeRotStartMatrix, eyeRotEndMatrix] = PsychOculusVR ('GetFovTextureSize', handle, 0, varargin{2:end}); + %scatter(meshVL(1,:), meshVL(2,:)); + hmd{handle}.viewportLeft = [vx, vy, vw, vh]; + hmd{handle}.PixelsPerTanAngleAtCenterLeft = [ptx, pty]; + hmd{handle}.HmdToEyeViewOffsetLeft = [hsx, hsy, hsz]; + hmd{handle}.meshVerticesLeft = meshVL; + hmd{handle}.meshIndicesLeft = meshIL; + hmd{handle}.uvScaleLeft = uvScale; + hmd{handle}.uvOffsetLeft = uvOffset; + hmd{handle}.eyeRotStartMatrixLeft = eyeRotStartMatrix; + hmd{handle}.eyeRotEndMatrixLeft = eyeRotEndMatrix; + +scaleL=uvScale +offsetL=uvOffset + +%rotStartL = eyeRotStartMatrix +%rotEndL = eyeRotEndMatrix + + + % Query parameters for right eye view: + [hmd{handle}.rbwidth, hmd{handle}.rbheight, vx, vy, vw, vh, ptx, pty, hsx, hsy, hsz, meshVR, meshIR, uvScale(1), uvScale(2), uvOffset(1), uvOffset(2), eyeRotStartMatrix, eyeRotEndMatrix] = PsychOculusVR ('GetFovTextureSize', handle, 1, varargin{2:end}); + %scatter(meshVR(1,:), meshVR(2,:)); + hmd{handle}.viewportRight = [vx, vy, vw, vh]; + hmd{handle}.PixelsPerTanAngleAtCenterRight = [ptx, pty]; + hmd{handle}.HmdToEyeViewOffsetRight = [hsx, hsy, hsz]; + hmd{handle}.meshVerticesRight = meshVR; + hmd{handle}.meshIndicesRight = meshIR; + hmd{handle}.uvScaleRight = uvScale; + hmd{handle}.uvOffsetRight = uvOffset; + hmd{handle}.eyeRotStartMatrixRight = eyeRotStartMatrix; + hmd{handle}.eyeRotEndMatrixRight = eyeRotEndMatrix; + +scaleR=uvScale +offsetR=uvOffset + +%rotStartR = eyeRotStartMatrix +%rotEndR = eyeRotEndMatrix + + return; +end + +if strcmpi(cmd, 'GetClientRenderbufferSize') + handle = varargin{1}; + varargout{1} = [hmd{handle}.rbwidth, hmd{handle}.rbheight]; + return; +end + +if strcmpi(cmd, 'PerformPostWindowOpenSetup') + + % Must have global GL constants: + if isempty(GL) + varargout{1} = 0; + warning('PTB internal error in PsychOculusVR: GL struct not initialized?!?'); + return; + end + + % Oculus device handle: + handle = varargin{1}; + + % Onscreen window handle: + win = varargin{2}; + + [slot shaderid blittercfg voidptr glsl] = Screen('HookFunction', win, 'Query', 'StereoCompositingBlit', 'StereoCompositingShaderAnaglyph'); + if slot == -1 + varargout{1} = 0; + warning('Either the imaging pipeline is not enabled for given onscreen window, or it is not switched to Anaglyph stereo mode.'); + return; + end + + if glsl == 0 + varargout{1} = 0; + warning('Anaglyph shader is not operational for unknown reason. Sorry...'); + return; + end + + % Remove old standard anaglyph shader: + Screen('HookFunction', win, 'Remove', 'StereoCompositingBlit', slot); + + % Build the unwarp mesh display list within the OpenGL context of Screen(): + Screen('BeginOpenGL', win, 1); + + % Left eye setup: + % --------------- + + % Build a display list that corresponds to the current calibration, + % drawing the warp-mesh once, so it gets recorded in the display list: + gldLeft = glGenLists(1); + glNewList(gldLeft, GL.COMPILE); + + % Caution: Must *copy* the different rows with data into *separate* variables, so + % the vertex array pointers to the different variables actually point to something + % persistent! If we'd pass the meshVerticesLeft() subarrays directly to glTexCoordPointer + % and friends then Octave/Matlab would just create a temporary copy of the extracted + % rows, OpenGL would retrieve/assign pointers to those temporary copies, but then + % at the end of a glVertexPointer/glTexCoordPointer call, those temporary copies would + % go out of scope and Octave/Matlab would potentially garbage collect the variables again + % *before* the call to glDrawElements permanently records the content of the variables. + % The net results would be stale/dangling pointers, random data trash getting read from + % memory and recorded in the display list - and thereby corrupted rendering! This hazard + % doesn't exist within regular Octave/Matlab scripts, because the interpreter doesn't + % deal with memory pointers. It is a unique hazard from the combination of C memory + % pointers for OpenGL and Octave/Matlabs copy-on-write/data-sharing/garbage collection + % behaviour. When we are at it, lets also cast the data to single() precision floating + % point, to save some memory: + vertexpos = single(hmd{handle}.meshVerticesLeft(1:4, :)); + texR = single(hmd{handle}.meshVerticesLeft(5:6, :)); + texG = single(hmd{handle}.meshVerticesLeft(7:8, :)); + texB = single(hmd{handle}.meshVerticesLeft(9:10, :)); + + mintexxL = min(texR(1,:)) + maxtexxL = max(texR(1,:)) + mintexyL = min(texR(2,:)) + maxtexyL = max(texR(2,:)) + + % vertex xy encodes 2D position from rows 1 and 2, z encodes timeWarp interpolation factors + % from row 3 and w encodes vignette correction factors from row 4: + glEnableClientState(GL.VERTEX_ARRAY); + glVertexPointer(4, GL.FLOAT, 0, vertexpos); + + % Need separate texture coordinate sets for the three color channel to encode + % channel specific color aberration correction sampling: + + % TexCoord set 0 encodes coordinates for the Red color channel: + glClientActiveTexture(GL.TEXTURE0); + glEnableClientState(GL.TEXTURE_COORD_ARRAY); + glTexCoordPointer(2, GL.FLOAT, 0, texR); + + % TexCoord set 1 encodes coordinates for the Green color channel: + glClientActiveTexture(GL.TEXTURE1); + glEnableClientState(GL.TEXTURE_COORD_ARRAY); + glTexCoordPointer(2, GL.FLOAT, 0, texG); + + % TexCoord set 2 encodes coordinates for the Blue color channel: + glClientActiveTexture(GL.TEXTURE2); + glEnableClientState(GL.TEXTURE_COORD_ARRAY); + glTexCoordPointer(2, GL.FLOAT, 0, texB); + + % Draw the mesh. This records the content from all the variables persistently into + % the display list storage, so they can be freed afterwards: + glDrawElements(GL.TRIANGLES, length(hmd{handle}.meshIndicesLeft), GL.UNSIGNED_SHORT, uint16(hmd{handle}.meshIndicesLeft)); + + % Disable stuff, so we can release or recycle the variables: + glClientActiveTexture(GL.TEXTURE3); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glClientActiveTexture(GL.TEXTURE2); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glClientActiveTexture(GL.TEXTURE1); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glClientActiveTexture(GL.TEXTURE0); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glDisableClientState(GL.VERTEX_ARRAY); + + % Left eye display list done. + glEndList; + + % Right eye setup: + % --------------- + + % Build a display list that corresponds to the current calibration, + % drawing the warp-mesh once, so it gets recorded in the display list: + gldRight = glGenLists(1); + glNewList(gldRight, GL.COMPILE); +global texR; +global texG; +global texB; + vertexpos = single(hmd{handle}.meshVerticesRight(1:4, :)); + texR = single(hmd{handle}.meshVerticesRight(5:6, :)); + texG = single(hmd{handle}.meshVerticesRight(7:8, :)); + texB = single(hmd{handle}.meshVerticesRight(9:10, :)); + + mintexxR = min(texR(1,:)) + maxtexxR = max(texR(1,:)) + mintexyR = min(texR(2,:)) + maxtexyR = max(texR(2,:)) + + % vertex xy encodes 2D position from rows 1 and 2, z encodes timeWarp interpolation factors + % from row 3 and w encodes vignette correction factors from row 4: + glEnableClientState(GL.VERTEX_ARRAY); + glVertexPointer(4, GL.FLOAT, 0, vertexpos); + + % Need separate texture coordinate sets for the three color channel to encode + % channel specific color aberration correction sampling: + + % TexCoord set 0 encodes coordinates for the Red color channel: + glClientActiveTexture(GL.TEXTURE0); + glEnableClientState(GL.TEXTURE_COORD_ARRAY); + glTexCoordPointer(2, GL.FLOAT, 0, texR); + + % TexCoord set 1 encodes coordinates for the Green color channel: + glClientActiveTexture(GL.TEXTURE1); + glEnableClientState(GL.TEXTURE_COORD_ARRAY); + glTexCoordPointer(2, GL.FLOAT, 0, texG); + + % TexCoord set 2 encodes coordinates for the Blue color channel: + glClientActiveTexture(GL.TEXTURE2); + glEnableClientState(GL.TEXTURE_COORD_ARRAY); + glTexCoordPointer(2, GL.FLOAT, 0, texB); + + % Draw the mesh. This records the content from all the variables persistently into + % the display list storage, so they can be freed afterwards: + glDrawElements(GL.TRIANGLES, length(hmd{handle}.meshIndicesRight), GL.UNSIGNED_SHORT, uint16(hmd{handle}.meshIndicesRight)); + + % Disable stuff, so we can release or recycle the variables: + glClientActiveTexture(GL.TEXTURE3); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glClientActiveTexture(GL.TEXTURE2); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glClientActiveTexture(GL.TEXTURE1); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glClientActiveTexture(GL.TEXTURE0); + glDisableClientState(GL.TEXTURE_COORD_ARRAY); + + glDisableClientState(GL.VERTEX_ARRAY); + + % Right eye display list done. + glEndList; + + Screen('EndOpenGL', win); + + texwidth = RectWidth(Screen('Rect', win, 1)); + texheight = RectHeight(Screen('Rect', win, 1)); + + % Setup left eye shader: + glsl = LoadGLSLProgramFromFiles('OculusRiftCorrectionShader'); + glUseProgram(glsl); + glUniform1i(glGetUniformLocation(glsl, 'Image'), 0); + glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVOffset'), hmd{handle}.uvOffsetLeft(1) * texwidth, hmd{handle}.uvOffsetLeft(2) * texheight); + %glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVOffset'), texwidth/2, texheight/2); + glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVScale'), hmd{handle}.uvScaleLeft(1) * texwidth, hmd{handle}.uvScaleLeft(2) * texheight); + %glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVScale'), texwidth/2, texheight/2); + glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationStart'), 1, 1, diag([1 1 1 1])); + glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationEnd'), 1, 1, diag([1 1 1 1])); + %glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationStart'), 1, 1, hmd{handle}.eyeRotStartMatrixLeft); + %glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationEnd'), 1, 1, hmd{handle}.eyeRotEndMatrixLeft); + glUseProgram(0); + + % Insert it at former position of the old shader: + posstring = sprintf('InsertAt%iShader', slot); + + % xOffset and yOffset encode the viewport location and size for the left-eye vs. + % right eye view in the shared output window - or the source renderbuffer if both eyes + % would be rendered into a shared texture. However, the meshes provided by the SDK + % already encode proper left and right offsets for output, and the inputs are separate + % textures for left and right eye, so using the offset is not needed. Also our correction + % shader ignores the modelview matrix which would get updated with the "Offset:%i%i" blittercfg, + % instead is takes normalized device coordinates NDC directly from the distortion mesh. Iow, not + % only is xOffset/yOffset not needed, it would also be a no operation due to our specific shader. + % We leave this here for documentation for now, in case we need to change our ways of doing this. + leftViewPort = hmd{handle}.viewportLeft + % xOffset = hmd{handle}.viewportLeft(1) % Viewport x start. + % yOffset = hmd{handle}.viewportLeft(2) % Viewport y start. + % blittercfg = sprintf('Blitter:DisplayListBlit:Handle:%i:Bilinear:Offset:%i:%i', gldLeft, xOffset, yOffset); + blittercfg = sprintf('Blitter:DisplayListBlit:Handle:%i:Bilinear', gldLeft); + Screen('Hookfunction', win, posstring, 'StereoCompositingBlit', 'OculusVRClientCompositingShaderLeftEye', glsl, blittercfg); + + % Setup right eye shader: + glsl = LoadGLSLProgramFromFiles('OculusRiftCorrectionShader'); + glUseProgram(glsl); + glUniform1i(glGetUniformLocation(glsl, 'Image'), 1); + glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVOffset'), hmd{handle}.uvOffsetRight(1) * texwidth, hmd{handle}.uvOffsetRight(2) * texheight); + %glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVOffset'), texwidth/2, texheight/2); + glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVScale'), hmd{handle}.uvScaleRight(1) * texwidth, hmd{handle}.uvScaleRight(2) * texheight); + %glUniform2f(glGetUniformLocation(glsl, 'EyeToSourceUVScale'), texwidth/2, texheight/2); + + glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationStart'), 1, 1, diag([1 1 1 1])); + glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationEnd'), 1, 1, diag([1 1 1 1])); + %glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationStart'), 1, 1, hmd{handle}.eyeRotStartMatrixRight); + %glUniformMatrix4fv(glGetUniformLocation(glsl, 'EyeRotationEnd'), 1, 1, hmd{handle}.eyeRotEndMatrixRight); + glUseProgram(0); + + % Insert it at former position of the old shader: + posstring = sprintf('InsertAt%iShader', slot); + % See above for why xOffset/yOffset is not used here. + % xOffset = hmd{handle}.viewportRight(1) % Viewport x start. + % yOffset = hmd{handle}.viewportRight(2) % Viewport y start. + % blittercfg = sprintf('Blitter:DisplayListBlit:Handle:%i:Bilinear:Offset:%i:%i', gldRight, xOffset, yOffset); + blittercfg = sprintf('Blitter:DisplayListBlit:Handle:%i:Bilinear', gldRight); + Screen('Hookfunction', win, posstring, 'StereoCompositingBlit', 'OculusVRClientCompositingShaderRightEye', glsl, blittercfg); + + % Return success result code 1: + varargout{1} = 1; + return; +end + +% 'cmd' so far not dispatched? Let's assume it is a command +% meant for PsychOculusVRCore: +[ varargout{1:nargout} ] = PsychOculusVRCore(cmd, varargin{:}); +return; + +end diff --git a/Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.frag.txt b/Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.frag.txt new file mode 100644 index 0000000000..42fac90779 --- /dev/null +++ b/Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.frag.txt @@ -0,0 +1,16 @@ +/* Fragment shader for performing corrections for the Oculus VR Rift + * (w) 2015 by Mario Kleiner. Licensed under MIT license. + */ + +#extension GL_ARB_texture_rectangle : enable + +uniform sampler2DRect Image; +varying float vignette; + +void main() +{ + float r = texture2DRect(Image, gl_TexCoord[0].xy).r; + float g = texture2DRect(Image, gl_TexCoord[1].xy).g; + float b = texture2DRect(Image, gl_TexCoord[2].xy).b; + gl_FragColor = vignette * vec4(r, g, b, 1.0); +} diff --git a/Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.vert.txt b/Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.vert.txt new file mode 100644 index 0000000000..faeac01fed --- /dev/null +++ b/Psychtoolbox/PsychOpenGL/PsychGLSLShaders/OculusRiftCorrectionShader.vert.txt @@ -0,0 +1,50 @@ +/* Vertex shader for performing corrections for the Oculus VR Rift + * (w) 2015 by Mario Kleiner. Licensed under MIT license. + */ + +uniform vec2 EyeToSourceUVScale; +uniform vec2 EyeToSourceUVOffset; +uniform mat4 EyeRotationStart; +uniform mat4 EyeRotationEnd; + +varying float vignette; + +vec2 TimewarpTexCoord(vec2 TexCoord, mat4 rotMat) +{ + /* Vertex inputs are in TanEyeAngle space for the R,G,B channels (i.e. after chromatic + * aberration and distortion). These are now "real world" vectors in direction (x,y,1) + * relative to the eye of the HMD. Apply the 3x3 timewarp rotation to these vectors. + */ + vec3 transformed = vec3( (rotMat * vec4(TexCoord.xy, 1.0, 1.0)).xyz ); + + /* Project them back onto the Z=1 plane of the rendered images. */ + vec2 flattened = (transformed.xy / transformed.z); + + /* Scale them into ([0,0.5],[0,1]) or ([0.5,0],[0,1]) UV lookup space (depending on eye) */ + return(EyeToSourceUVScale * flattened + EyeToSourceUVOffset); +} + +void main(void) +{ + /* Linearly interpolate between the two eye rotation matrices, using the timewarpLerpFactor + * stored in the vertex z component: + */ + mat4 lerpedEyeRot; + float timewarpLerpFactor = gl_Vertex.z; + lerpedEyeRot[0] = mix(EyeRotationStart[0], EyeRotationEnd[0], timewarpLerpFactor); + lerpedEyeRot[1] = mix(EyeRotationStart[1], EyeRotationEnd[1], timewarpLerpFactor); + lerpedEyeRot[2] = mix(EyeRotationStart[2], EyeRotationEnd[2], timewarpLerpFactor); + lerpedEyeRot[3] = mix(EyeRotationStart[3], EyeRotationEnd[3], timewarpLerpFactor); + + /* Apply timewarp rotation to the texture coordinates of all three color channels: */ + /* These are individual texcoord sets for color aberration correction per channel. */ + gl_TexCoord[0].xy = TimewarpTexCoord(gl_MultiTexCoord0.xy, lerpedEyeRot); + gl_TexCoord[1].xy = TimewarpTexCoord(gl_MultiTexCoord1.xy, lerpedEyeRot); + gl_TexCoord[2].xy = TimewarpTexCoord(gl_MultiTexCoord2.xy, lerpedEyeRot); + + /* Position is vertex xy position: */ + gl_Position = vec4(gl_Vertex.xy, 0.5, 1.0); + + /* Vignette correction fade out factor is stored in vertex w component: */ + vignette = gl_Vertex.w; +}