diff --git a/Python/Product/PyKinect/.htpasswd b/Python/Product/PyKinect/.htpasswd deleted file mode 100644 index 5bfdec5279..0000000000 --- a/Python/Product/PyKinect/.htpasswd +++ /dev/null @@ -1 +0,0 @@ -test:$apr1$NHmlzuXN$m.BU/ZitJrrbIL5d.yZ/2/ \ No newline at end of file diff --git a/Python/Product/PyKinect/.pypirc b/Python/Product/PyKinect/.pypirc deleted file mode 100644 index 196c0ee65d..0000000000 --- a/Python/Product/PyKinect/.pypirc +++ /dev/null @@ -1,7 +0,0 @@ -[distutils] -index-servers = mock - -[mock] -repository: http://localhost:8080/ -username: test -password: test diff --git a/Python/Product/PyKinect/BuildWheel.ps1 b/Python/Product/PyKinect/BuildWheel.ps1 deleted file mode 100644 index 36b5712c78..0000000000 --- a/Python/Product/PyKinect/BuildWheel.ps1 +++ /dev/null @@ -1,112 +0,0 @@ -# Python Tools for Visual Studio -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -[CmdletBinding()] -param([switch] $release, [switch] $mockrelease, [switch] $upload) - -$versions = @( ` - @{cmd="-2.7"; suffix="win-amd64-2.7" }, ` - @{cmd="-2.7-32"; suffix="win32-2.7" } ` -) - -foreach ($ver in $versions) { - py $($ver.cmd) -c "import setuptools, wheel" - if (-not $?) { - Write-Error -EA:Stop "Python interpreter is not configured." - } -} - -$buildroot = (Split-Path -Parent $MyInvocation.MyCommand.Definition) -while ((Test-Path $buildroot) -and -not (Test-Path "$buildroot\build.root")) { - $buildroot = (Split-Path -Parent $buildroot) -} -Write-Output "Build Root: $buildroot" - -pushd $buildroot\Python\Product\PyKinect\PyKinect - -if (Get-Command tf -EA 0) { - tf edit * /r -} - -try { - $signedbuild = $release -or $mockrelease - if ($signedbuild) { - $approvers = "smortaz", "dinov", "stevdo", "pminaev", "gilbertw", "huvalo", "sitani", "crwilcox" - $approvers = @($approvers | Where-Object {$_ -ne $env:USERNAME}) - - $projectName = "PyKinect" - $projectUrl = "http://pytools.codeplex.com" - $projectKeywords = "PyKinect; Visual Studio; Python; Kinect" - - Push-Location (Split-Path -Parent $MyInvocation.MyCommand.Definition) - if ($mockrelease) { - Set-Variable -Name DebugPreference -Value "Continue" -Scope "global" - Import-Module -force $buildroot\Common\Setup\ReleaseMockHelpers.psm1 - } else { - Import-Module -force $buildroot\Common\Setup\ReleaseHelpers.psm1 - } - Pop-Location - } - - if ($release) { - $repo = "pypi" - } else { - $repo = "mock" - } - - $default = $($versions[0].cmd) - - "build", "dist", "pykinect.egg-info" | ?{ Test-Path $_ } | %{ rmdir -r -fo $_ } - - $jobs = @() - - if ($upload) { - py $default setup.py sdist register -r $repo upload -r $repo - } else { - py $default setup.py sdist - } - - if ($signedbuild) { - foreach ($ver in $versions) { - py $($ver.cmd) setup.py bdist_wheel - - $item = Get-Item ".\build\lib.$($ver.suffix)\pykinect\audio\PyKinectAudio.dll" - "Signing $item" - $jobs += begin_sign_files @(@{path=$item.FullName; name=$item.Name}) ` - $item.Directory ` - $approvers ` - $projectName $projectUrl ` - "lib.$($ver.suffix)\pykinect\audio\PyKinectAudio.dll" $projectKeywords ` - "authenticode" - } - - end_sign_files $jobs - } - - foreach ($ver in $versions) { - if ($upload) { - py $($ver.cmd) setup.py bdist_wheel upload -r $repo - } else { - py $($ver.cmd) setup.py bdist_wheel - } - } -} finally { - if (Get-Command tfpt -EA 0) { - tfpt uu /noget /noprompt . /r - } - - popd -} diff --git a/Python/Product/PyKinect/PyGameTemplate/KinectGame.pyproj b/Python/Product/PyKinect/PyGameTemplate/KinectGame.pyproj deleted file mode 100644 index 69091e7431..0000000000 --- a/Python/Product/PyKinect/PyGameTemplate/KinectGame.pyproj +++ /dev/null @@ -1,26 +0,0 @@ - - - Debug - 2.0 - $guid1$ - . - $safeprojectname$.py - - . - . - {2AF0F10D-7135-4994-9156-5D01C9C11B7E} - 2.7 - - - true - false - - - true - false - - - - - - \ No newline at end of file diff --git a/Python/Product/PyKinect/PyGameTemplate/KinectGame.vstemplate b/Python/Product/PyKinect/PyGameTemplate/KinectGame.vstemplate deleted file mode 100644 index 082086767c..0000000000 --- a/Python/Product/PyKinect/PyGameTemplate/KinectGame.vstemplate +++ /dev/null @@ -1,18 +0,0 @@ - - - PyGame using PyKinect - A project for a Kinect game using PyGame - - Python - 150 - 2 - false - KinectGame - true - - - - Program.py - - - diff --git a/Python/Product/PyKinect/PyGameTemplate/KinectGame.zip b/Python/Product/PyKinect/PyGameTemplate/KinectGame.zip deleted file mode 100644 index 30e0ba29cc..0000000000 Binary files a/Python/Product/PyKinect/PyGameTemplate/KinectGame.zip and /dev/null differ diff --git a/Python/Product/PyKinect/PyGameTemplate/Program.py b/Python/Product/PyKinect/PyGameTemplate/Program.py deleted file mode 100644 index a20b31659f..0000000000 --- a/Python/Product/PyKinect/PyGameTemplate/Program.py +++ /dev/null @@ -1,41 +0,0 @@ -# If this is your first PyKinect project, you may need to install PyKinect into -# your Python 2.7 library through the Tools->Python Tools->Samples->PyKinect -# menu. -from pykinect import nui - -import pygame -from pygame.color import THECOLORS -from pygame.locals import * - -KINECTEVENT = pygame.USEREVENT - -def post_frame(frame): - """Get skeleton events from the Kinect device and post them into the PyGame event queue""" - try: - pygame.event.post(pygame.event.Event(KINECTEVENT, skeletons = frame.SkeletonData)) - except: - # event queue full - pass - -if __name__ == '__main__': - WINSIZE = 640, 480 - pygame.init() - - # Initialize PyGame - screen = pygame.display.set_mode(WINSIZE,0,16) - pygame.display.set_caption('Python Kinect Game') - screen.fill(THECOLORS["black"]) - - with nui.Runtime() as kinect: - kinect.skeleton_engine.enabled = True - kinect.skeleton_frame_ready += post_frame - - # Main game loop - while True: - e = pygame.event.wait() - - if e.type == pygame.QUIT: - break - elif e.type == KINECTEVENT: - # process e.skeletons here - pass diff --git a/Python/Product/PyKinect/PyKinect/Grammar.xml b/Python/Product/PyKinect/PyKinect/Grammar.xml deleted file mode 100644 index a7d07ae2a9..0000000000 --- a/Python/Product/PyKinect/PyKinect/Grammar.xml +++ /dev/null @@ -1,19 +0,0 @@ - - - - - up - down - left - right - - - diff --git a/Python/Product/PyKinect/PyKinect/PyGameDemo.py b/Python/Product/PyKinect/PyKinect/PyGameDemo.py deleted file mode 100644 index e774d9905f..0000000000 --- a/Python/Product/PyKinect/PyKinect/PyGameDemo.py +++ /dev/null @@ -1,215 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -import thread -import itertools -import ctypes - -import pykinect -from pykinect import nui -from pykinect.nui import JointId - -import pygame -from pygame.color import THECOLORS -from pygame.locals import * - -KINECTEVENT = pygame.USEREVENT -DEPTH_WINSIZE = 320,240 -VIDEO_WINSIZE = 640,480 -pygame.init() - -SKELETON_COLORS = [THECOLORS["red"], - THECOLORS["blue"], - THECOLORS["green"], - THECOLORS["orange"], - THECOLORS["purple"], - THECOLORS["yellow"], - THECOLORS["violet"]] - -LEFT_ARM = (JointId.ShoulderCenter, - JointId.ShoulderLeft, - JointId.ElbowLeft, - JointId.WristLeft, - JointId.HandLeft) -RIGHT_ARM = (JointId.ShoulderCenter, - JointId.ShoulderRight, - JointId.ElbowRight, - JointId.WristRight, - JointId.HandRight) -LEFT_LEG = (JointId.HipCenter, - JointId.HipLeft, - JointId.KneeLeft, - JointId.AnkleLeft, - JointId.FootLeft) -RIGHT_LEG = (JointId.HipCenter, - JointId.HipRight, - JointId.KneeRight, - JointId.AnkleRight, - JointId.FootRight) -SPINE = (JointId.HipCenter, - JointId.Spine, - JointId.ShoulderCenter, - JointId.Head) - -skeleton_to_depth_image = nui.SkeletonEngine.skeleton_to_depth_image - -def draw_skeleton_data(pSkelton, index, positions, width = 4): - start = pSkelton.SkeletonPositions[positions[0]] - - for position in itertools.islice(positions, 1, None): - next = pSkelton.SkeletonPositions[position.value] - - curstart = skeleton_to_depth_image(start, dispInfo.current_w, dispInfo.current_h) - curend = skeleton_to_depth_image(next, dispInfo.current_w, dispInfo.current_h) - - pygame.draw.line(screen, SKELETON_COLORS[index], curstart, curend, width) - - start = next - -# recipe to get address of surface: http://archives.seul.org/pygame/users/Apr-2008/msg00218.html -if hasattr(ctypes.pythonapi, 'Py_InitModule4'): - Py_ssize_t = ctypes.c_int -elif hasattr(ctypes.pythonapi, 'Py_InitModule4_64'): - Py_ssize_t = ctypes.c_int64 -else: - raise TypeError("Cannot determine type of Py_ssize_t") - -_PyObject_AsWriteBuffer = ctypes.pythonapi.PyObject_AsWriteBuffer -_PyObject_AsWriteBuffer.restype = ctypes.c_int -_PyObject_AsWriteBuffer.argtypes = [ctypes.py_object, - ctypes.POINTER(ctypes.c_void_p), - ctypes.POINTER(Py_ssize_t)] - -def surface_to_array(surface): - buffer_interface = surface.get_buffer() - address = ctypes.c_void_p() - size = Py_ssize_t() - _PyObject_AsWriteBuffer(buffer_interface, - ctypes.byref(address), ctypes.byref(size)) - bytes = (ctypes.c_byte * size.value).from_address(address.value) - bytes.object = buffer_interface - return bytes - -def draw_skeletons(skeletons): - for index, data in enumerate(skeletons): - # draw the Head - HeadPos = skeleton_to_depth_image(data.SkeletonPositions[JointId.Head], dispInfo.current_w, dispInfo.current_h) - draw_skeleton_data(data, index, SPINE, 10) - pygame.draw.circle(screen, SKELETON_COLORS[index], (int(HeadPos[0]), int(HeadPos[1])), 20, 0) - - # drawing the limbs - draw_skeleton_data(data, index, LEFT_ARM) - draw_skeleton_data(data, index, RIGHT_ARM) - draw_skeleton_data(data, index, LEFT_LEG) - draw_skeleton_data(data, index, RIGHT_LEG) - - -def depth_frame_ready(frame): - if video_display: - return - - with screen_lock: - address = surface_to_array(screen) - frame.image.copy_bits(address) - del address - if skeletons is not None and draw_skeleton: - draw_skeletons(skeletons) - pygame.display.update() - - -def video_frame_ready(frame): - if not video_display: - return - - with screen_lock: - address = surface_to_array(screen) - frame.image.copy_bits(address) - del address - if skeletons is not None and draw_skeleton: - draw_skeletons(skeletons) - pygame.display.update() - -if __name__ == '__main__': - full_screen = False - draw_skeleton = True - video_display = False - - screen_lock = thread.allocate() - - screen = pygame.display.set_mode(DEPTH_WINSIZE,0,16) - pygame.display.set_caption('Python Kinect Demo') - skeletons = None - screen.fill(THECOLORS["black"]) - - kinect = nui.Runtime() - kinect.skeleton_engine.enabled = True - def post_frame(frame): - try: - pygame.event.post(pygame.event.Event(KINECTEVENT, skeletons = frame.SkeletonData)) - except: - # event queue full - pass - - kinect.skeleton_frame_ready += post_frame - - kinect.depth_frame_ready += depth_frame_ready - kinect.video_frame_ready += video_frame_ready - - kinect.video_stream.open(nui.ImageStreamType.Video, 2, nui.ImageResolution.Resolution640x480, nui.ImageType.Color) - kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth) - - print('Controls: ') - print(' d - Switch to depth view') - print(' v - Switch to video view') - print(' s - Toggle displaing of the skeleton') - print(' u - Increase elevation angle') - print(' j - Decrease elevation angle') - - # main game loop - done = False - - while not done: - e = pygame.event.wait() - dispInfo = pygame.display.Info() - if e.type == pygame.QUIT: - done = True - break - elif e.type == KINECTEVENT: - skeletons = e.skeletons - if draw_skeleton: - draw_skeletons(skeletons) - pygame.display.update() - elif e.type == KEYDOWN: - if e.key == K_ESCAPE: - done = True - break - elif e.key == K_d: - with screen_lock: - screen = pygame.display.set_mode(DEPTH_WINSIZE,0,16) - video_display = False - elif e.key == K_v: - with screen_lock: - screen = pygame.display.set_mode(VIDEO_WINSIZE,0,32) - video_display = True - elif e.key == K_s: - draw_skeleton = not draw_skeleton - elif e.key == K_u: - kinect.camera.elevation_angle = kinect.camera.elevation_angle + 2 - elif e.key == K_j: - kinect.camera.elevation_angle = kinect.camera.elevation_angle - 2 - elif e.key == K_x: - kinect.camera.elevation_angle = 2 - diff --git a/Python/Product/PyKinect/PyKinect/PyKinectPackage.pyproj b/Python/Product/PyKinect/PyKinect/PyKinectPackage.pyproj deleted file mode 100644 index 8ca5603065..0000000000 --- a/Python/Product/PyKinect/PyKinect/PyKinectPackage.pyproj +++ /dev/null @@ -1,60 +0,0 @@ - - - - Debug - 2.0 - {2ad00d4e-c501-40f4-9b5f-2531affff8a3} - . - tests.py - - - . - PyKinect - PyKinectPackage - PyKinect - False - 2af0f10d-7135-4994-9156-5d01c9c11b7e - 2.7 - Standard Python launcher - -v KinectTestCases.test_skeleton_engine - - - - - true - false - - - true - false - - - - - - - - - - - - - - - - - - - - - - PyKinectAudio - {8b213e91-39f8-4461-bc8e-fe0401ecbea5} - True - - - - - - - \ No newline at end of file diff --git a/Python/Product/PyKinect/PyKinect/pykinect/LICENSE.txt b/Python/Product/PyKinect/PyKinect/pykinect/LICENSE.txt deleted file mode 100644 index d9a10c0d8e..0000000000 --- a/Python/Product/PyKinect/PyKinect/pykinect/LICENSE.txt +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/Python/Product/PyKinect/PyKinect/pykinect/__init__.py b/Python/Product/PyKinect/PyKinect/pykinect/__init__.py deleted file mode 100644 index 88f65f6d84..0000000000 --- a/Python/Product/PyKinect/PyKinect/pykinect/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - diff --git a/Python/Product/PyKinect/PyKinect/pykinect/audio/__init__.py b/Python/Product/PyKinect/PyKinect/pykinect/audio/__init__.py deleted file mode 100644 index c4312ba65c..0000000000 --- a/Python/Product/PyKinect/PyKinect/pykinect/audio/__init__.py +++ /dev/null @@ -1,347 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -import os -import ctypes -from os import path - -_audiodll_path = os.path.join(os.environ['WINDIR'], 'System32', 'KinectAudio10.dll') - -_MAX_STR_LEN = 512 -_AUDIODLL = ctypes.WinDLL(_audiodll_path) - -_audio_path = path.join(path.dirname(__file__), 'PyKinectAudio.dll') -if not os.path.exists(_audio_path): - _audio_path = path.join(path.dirname(__file__), '..', '..', '..', '..', '..', '..', '..', 'Binaries', 'Debug11.0', 'PyKinectAudio.dll') - if not path.exists(_audio_path): - _audio_path = path.join(path.dirname(__file__), '..', '..', '..', '..', '..', '..', '..', 'Binaries', 'Debug10.0', 'PyKinectAudio.dll') - if not path.exists(_audio_path): - raise Exception('Cannot find PyKinectAudio.dll') - -_PYAUDIODLL = ctypes.CDLL(_audio_path) -_OpenKinectAudio = _PYAUDIODLL.OpenKinectAudio -_OpenKinectAudio.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)] -_OpenKinectAudio.restype = ctypes.HRESULT - -_OpenAudioStream = _PYAUDIODLL.OpenAudioStream -_OpenAudioStream.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp), ctypes.c_uint32] -_OpenAudioStream.restype = ctypes.HRESULT - -_SetDeviceProperty_Bool = _PYAUDIODLL.SetDeviceProperty_Bool -_SetDeviceProperty_Bool.argtypes = [ctypes.c_voidp, ctypes.c_uint32, ctypes.c_uint] -_SetDeviceProperty_Bool.restype = ctypes.HRESULT - -_SetDeviceProperty_Int = _PYAUDIODLL.SetDeviceProperty_Int -_SetDeviceProperty_Int.argtypes = [ctypes.c_voidp, ctypes.c_uint32, ctypes.c_int] -_SetDeviceProperty_Int.restype = ctypes.HRESULT - -_GetDeviceProperty_Bool = _PYAUDIODLL.GetDeviceProperty_Bool -_GetDeviceProperty_Bool.argtypes = [ctypes.c_voidp, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint)] -_GetDeviceProperty_Bool.restype = ctypes.HRESULT - -_GetDeviceProperty_Int = _PYAUDIODLL.GetDeviceProperty_Int -_GetDeviceProperty_Int.argtypes = [ctypes.c_voidp, ctypes.c_uint32, ctypes.POINTER(ctypes.c_int)] -_GetDeviceProperty_Int.restype = ctypes.HRESULT - - -#class AecQualityMetrics(ctypes.Structure): -# _fields_ = [ -# ('timestamp', ctypes.c_longlong), -# ('convergence_flag', ctypes.c_byte), -# ('mic_clipped_flag', ctypes.c_byte), -# ('mic_silence_flag', ctypes.c_byte), -# ('pstv_feadback_flag', ctypes.c_byte), -# ('spk_clipped_flag', ctypes.c_byte), -# ('spk_mute_flag', ctypes.c_byte), -# ('glitch_flag', ctypes.c_byte), -# ('double_talk_flag', ctypes.c_byte), -# ('glitch_count', ctypes.c_ulong), -# ('mic_clip_count', ctypes.c_ulong), -# ('duration', ctypes.c_float), -# ('ts_variance', ctypes.c_float), -# ('ts_drift_rate', ctypes.c_float), -# ('voice_level', ctypes.c_float), -# ('noise_Level', ctypes.c_float), -# ('echo_return_loss_enhancement', ctypes.c_float), -# ('avg_echo_return_loss_enhancement', ctypes.c_float), -# ('reserved', ctypes.c_uint32), -# ] - -#_GetDeviceProperty_QualityMetrics = _PYAUDIODLL.GetDeviceProperty_QualityMetrics -#_GetDeviceProperty_QualityMetrics.argtypes = [ctypes.c_voidp, ctypes.c_uint32, ctypes.POINTER(AecQualityMetrics)] -#_GetDeviceProperty_QualityMetrics.restype = ctypes.HRESULT - -_ReadAudioStream = _PYAUDIODLL.ReadAudioStream -_ReadAudioStream.argtypes = [ctypes.c_voidp, ctypes.c_voidp, ctypes.c_uint32, ctypes.POINTER(ctypes.c_uint32)] -_ReadAudioStream.restype = ctypes.HRESULT - -_IUnknownRelease = _PYAUDIODLL.IUnknownRelease -_IUnknownRelease.argtypes = [ctypes.c_voidp] -_IUnknownRelease.restype = None - -_Recognize_Callback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p) - -_ReadCallback = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.c_voidp, ctypes.POINTER(ctypes.c_uint32)) - -_OLE32 = ctypes.WinDLL('ole32.dll') -_CoInitialize = _OLE32.CoInitialize -_CoInitialize.argtypes = [ctypes.c_voidp] -_CoInitialize.restype = ctypes.HRESULT - -_CoInitialize(None) - -class AudioDeviceInfo(ctypes.Structure): - """Describes a systems Kinect sensors. """ - _fields_ = [('device_name', ctypes.c_wchar * _MAX_STR_LEN), - ('device_id', ctypes.c_wchar * _MAX_STR_LEN), - ('device_index', ctypes.c_int), - ] - -_NuiGetMicrophoneArrayDevices = _AUDIODLL.NuiGetMicrophoneArrayDevices -_NuiGetMicrophoneArrayDevices.argtypes = [ctypes.POINTER(AudioDeviceInfo), ctypes.POINTER(ctypes.c_int)] -_NuiGetMicrophoneArrayDevices.restype = ctypes.HRESULT - -def GetKinectDeviceInfo(): - '''returns a sequence of AudioDeviceInfo objects describing the available Kinect audio devices''' - count = ctypes.c_int() - _NuiGetMicrophoneArrayDevices(None, 0, ctypes.byref(count)) - deviceArray = (AudioDeviceInfo * count.value)() - - _NuiGetMicrophoneArrayDevices(deviceArray, count.value, ctypes.byref(count)) - - return [arrayObj for arrayObj in deviceArray] - - -_MFPKEY_WMAAECMA_SYSTEM_MODE = 2 -_MFPKEY_WMAAECMA_DMO_SOURCE_MODE = 3 -_MFPKEY_WMAAECMA_DEVICE_INDEXES = 4 -_MFPKEY_WMAAECMA_FEATURE_MODE = 5 -_MFPKEY_WMAAECMA_FEATR_FRAME_SIZE = 6 -_MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH = 7 -_MFPKEY_WMAAECMA_FEATR_NS = 8 -_MFPKEY_WMAAECMA_FEATR_AGC = 9 -_MFPKEY_WMAAECMA_FEATR_AES = 10 -_MFPKEY_WMAAECMA_FEATR_VAD = 11 -_MFPKEY_WMAAECMA_FEATR_CENTER_CLIP = 12 -_MFPKEY_WMAAECMA_FEATR_NOISE_FILL = 13 -_MFPKEY_WMAAECMA_RETRIEVE_TS_STATS = 14 -_MFPKEY_WMAAECMA_QUALITY_METRICS = 15 -_MFPKEY_WMAAECMA_DEVICEPAIR_GUID = 0x11 -_MFPKEY_WMAAECMA_FEATR_MICARR_MODE = 0x12 -_MFPKEY_WMAAECMA_FEATR_MICARR_BEAM = 0x13 -_MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER = 0x15 -_MFPKEY_WMAAECMA_FEATR_MICARR_PREPROC = 20 - - -_AudioPropertySetters = { - bool : _SetDeviceProperty_Bool, - int : _SetDeviceProperty_Int, -} - -_AudioPropertyGetters = { - bool : (_GetDeviceProperty_Bool, ctypes.c_uint), - int : (_GetDeviceProperty_Int, ctypes.c_int), - #AecQualityMetrics: (_GetDeviceProperty_QualityMetrics, AecQualityMetrics), - } - -class _AudioSourceProperty(object): - """internal descriptor used for all of our properties""" - def __init__(self, index, prop_type, doc = None): - self.index = index - self.prop_type = prop_type - self.__doc__ = doc - - def __get__(self, inst, context): - getter_func, getter_type = _AudioPropertyGetters[self.prop_type] - value = getter_type() - getter_func(inst._dmo, self.index, ctypes.byref(value)) - return value.value - - -class _SettableAudioSourceProperty(_AudioSourceProperty): - def __set__(self, inst, value): - _AudioPropertySetters[self.prop_type](inst._dmo, self.index, value) - - -class MicArrayMode(object): - MicArrayAdaptiveBeam = 0x1100, - MicArrayExternBeam = 0x800, - MicArrayFixedBeam = 0x400, - MicArraySimpleSum = 0x100, - MicArraySingleBeam = 0x200, - MicArraySingleChan = 0 - - -class _AudioFile(object): - """provides a file-like object for reading from the kinect audio stream""" - - def __init__(self, stream): - self.closed = False - self.__ISpStreamFormat__ = stream - self._buffer = (ctypes.c_byte * 4096)() - self._buffered_bytes = 0 - self._buffer_start = 0 - - def __del__(self): - if self.__ISpStreamFormat__ is not None: - _IUnknownRelease(self.__ISpStreamFormat__) - - def close(self): - self.closed = True - _IUnknownRelease(self.__ISpStreamFormat__) - self.__ISpStreamFormat__ = None - - def flush(self): - pass - - def next(self): - raise NotImplementedError() - - def __iter__(self): - raise NotImplementedError() - - def read(self, size = 4096): - if self.closed: - raise IOError('Kinect audio source has been closed') - - to_read = size - res = [] - bytes_read = ctypes.c_uint32() - while to_read != 0: - if self._buffer_start != self._buffered_bytes: - to_append = min(to_read, self._buffered_bytes - self._buffer_start) - - res.append(ctypes.string_at(ctypes.addressof(self._buffer) + self._buffer_start, to_append)) - - self._buffer_start += to_append - to_read -= to_append - - if not to_read: - break - - # read more data... - _ReadAudioStream(self.__ISpStreamFormat__, self._buffer, len(self._buffer), ctypes.byref(bytes_read)) - self._buffer_start = 0 - self._buffered_bytes = bytes_read.value - - return ''.join(res) - - def readline(self, size = None): - raise NotImplementedError() - - def readlines(self, sizehint = None): - raise NotImplementedError() - - def xreadlines(self): - raise NotImplementedError() - - def seek(self, offset, whence): - raise NotImplementedError() - - def tell(self): - raise NotImplementedError() - - def truncate(self, size = None): - raise IOError('KinectAudio file not open for writing') - - def write(self, str): - raise IOError('KinectAudio file not open for writing') - - def writelines(self, sequence): - raise IOError('KinectAudio file not open for writing') - - @property - def name(self): - return 'KinectAudio' - - @property - def mode(self): - return 'rb' - -class KinectAudioSource(object): - def __init__(self, device = None): - self._dmo = None - dmo = ctypes.c_voidp() - if device is None: - from pykinect import nui - - device = nui.Runtime(nui.RuntimeOptions.uses_audio) - - _OpenKinectAudio(device._nui, ctypes.byref(dmo)) - self._dmo = dmo - self._file = None - self._device = device - - def __del__(self): - if self._dmo is not None: - _IUnknownRelease(self._dmo) - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - _IUnknownRelease(self._dmo) - self._dmo = None - self._device = None - - def start(self, readStaleThreshold = 500): - """Starts capturing audio from the Kinect sensor's microphone array into a buffer. Returns a file-like object that represents the audio stream, which is in 16khz, 16 bit PCM format. - -readStaleThreshold: Specifies how long to retain data in the buffer (in milliseconds). If you do not read from the stream for longer than this "stale data" threshold value the DMO discards any buffered audio. - - """ - if self._file is not None: - raise Exception('Capture already started') - - audio_stream = ctypes.c_voidp() - _OpenAudioStream(self._dmo, ctypes.byref(audio_stream), readStaleThreshold) - - self._file = _AudioFile(audio_stream) - return self._file - - def stop(self): - if self._file is not None: - self._file.close() - - acoustic_echo_suppression = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_AES, int) - automatic_gain_control = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_AGC, bool) - center_clip = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_CENTER_CLIP, bool) - echo_length = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH, int) - feature_mode = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATURE_MODE, bool) - frame_size = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_FRAME_SIZE, int) - gain_bounder = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER, bool) - mic_array_mode = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_MICARR_MODE, int) - mic_array_preprocess = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_MICARR_PREPROC, bool) - noise_fill = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_NOISE_FILL, bool) - noise_suppression = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_NS, int) - #quality_metrics = _AudioSourceProperty(_MFPKEY_WMAAECMA_QUALITY_METRICS, AecQualityMetrics) - retrieve_ts_stats = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_RETRIEVE_TS_STATS, bool) - source_mode = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_DMO_SOURCE_MODE, bool) - system_mode = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_SYSTEM_MODE, int) - voice_activity_detector = _SettableAudioSourceProperty(_MFPKEY_WMAAECMA_FEATR_VAD, int) - device_pair_guid = _AudioSourceProperty(_MFPKEY_WMAAECMA_DEVICEPAIR_GUID, object) # TODO: Read only, guid type - - # TODO: Properties - # speaker_index - # sound_source_position_confidence - # sound_source_position - # mic_array_beam_angle - - # TODO: Events - # beam_changed event - - pass - diff --git a/Python/Product/PyKinect/PyKinect/pykinect/nui/__init__.py b/Python/Product/PyKinect/PyKinect/pykinect/nui/__init__.py deleted file mode 100644 index 10b265701b..0000000000 --- a/Python/Product/PyKinect/PyKinect/pykinect/nui/__init__.py +++ /dev/null @@ -1,442 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -import ctypes -import os -import thread - -# basic initialization, Python specific infrastructure -_nuidll_path = os.path.join(os.environ['WINDIR'], 'System32', 'Kinect10.dll') -_NUIDLL = ctypes.WinDLL(_nuidll_path) - -class KinectError(WindowsError): - """Represents an error from a Kinect sensor""" - pass - -from pykinect.nui.structs import (ImageDigitalZoom, ImageFrame, ImageResolution, - ImageType, ImageViewArea, JointId, - JointTrackingState, PlanarImage, SkeletonData, - SkeletonFrame, SkeletonFrameQuality, - SkeletonQuality, SkeletonTrackingState, - TransformSmoothParameters, Vector, _Enumeration) - -from _interop import (_CreateEvent, _CloseHandle, _WaitForSingleObject, - _WaitForMultipleObjects, _WAIT_OBJECT_0, _INFINITE, - _SysFreeString, _NuiInstance, _NuiCreateSensorByIndex, - _NuiGetSensorCount) - - -_NUI_IMAGE_PLAYER_INDEX_SHIFT = 3 -_NUI_IMAGE_PLAYER_INDEX_MASK = ((1 << _NUI_IMAGE_PLAYER_INDEX_SHIFT)-1) -_NUI_IMAGE_DEPTH_MAXIMUM = ((4000 << _NUI_IMAGE_PLAYER_INDEX_SHIFT) | _NUI_IMAGE_PLAYER_INDEX_MASK) -_NUI_IMAGE_DEPTH_MINIMUM = (800 << _NUI_IMAGE_PLAYER_INDEX_SHIFT) -_NUI_IMAGE_DEPTH_NO_VALUE = 0 - -_NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS = 285.63 # Based on 320x240 pixel size. -_NUI_CAMERA_DEPTH_NOMINAL_INVERSE_FOCAL_LENGTH_IN_PIXELS = 3.501e-3 # (1/NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS) -_NUI_CAMERA_DEPTH_NOMINAL_DIAGONAL_FOV = 70.0 -_NUI_CAMERA_DEPTH_NOMINAL_HORIZONTAL_FOV = 58.5 -_NUI_CAMERA_DEPTH_NOMINAL_VERTICAL_FOV = 45.6 - -_NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS = 531.15 # Based on 640x480 pixel size. -_NUI_CAMERA_COLOR_NOMINAL_INVERSE_FOCAL_LENGTH_IN_PIXELS = 1.83e-3 # (1/NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS) -_NUI_CAMERA_COLOR_NOMINAL_DIAGONAL_FOV = 73.9 -_NUI_CAMERA_COLOR_NOMINAL_HORIZONTAL_FOV = 62.0 -_NUI_CAMERA_COLOR_NOMINAL_VERTICAL_FOV = 48.6 - -# the max # of NUI output frames you can hold w/o releasing -_NUI_IMAGE_STREAM_FRAME_LIMIT_MAXIMUM = 4 - -# return S_FALSE instead of E_NUI_FRAME_NO_DATA if NuiImageStreamGetNextFrame( ) doesn't have a frame ready and a timeout != INFINITE is used -_NUI_IMAGE_STREAM_FLAG_SUPPRESS_NO_FRAME_DATA = 0x00010000 - -####################################################### - -_NUI_SKELETON_MAX_TRACKED_COUNT = 2 -_NUI_SKELETON_INVALID_TRACKING_ID = 0 - -# Assuming a pixel resolution of 320x240 -# x_meters = (x_pixelcoord - 160) * NUI_CAMERA_DEPTH_IMAGE_TO_SKELETON_MULTIPLIER_320x240 * z_meters; -# y_meters = (y_pixelcoord - 120) * NUI_CAMERA_DEPTH_IMAGE_TO_SKELETON_MULTIPLIER_320x240 * z_meters; -_NUI_CAMERA_DEPTH_IMAGE_TO_SKELETON_MULTIPLIER_320x240 = _NUI_CAMERA_DEPTH_NOMINAL_INVERSE_FOCAL_LENGTH_IN_PIXELS - -# Assuming a pixel resolution of 320x240 -# x_pixelcoord = (x_meters) * NUI_CAMERA_SKELETON_TO_DEPTH_IMAGE_MULTIPLIER_320x240 / z_meters + 160; -# y_pixelcoord = (y_meters) * NUI_CAMERA_SKELETON_TO_DEPTH_IMAGE_MULTIPLIER_320x240 / z_meters + 120; -_NUI_CAMERA_SKELETON_TO_DEPTH_IMAGE_MULTIPLIER_320x240 = _NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS - -_FLT_EPSILON = 1.192092896e-07 - -class RuntimeOptions(object): - """Specifies the runtime options for a Kinect sensor. """ - uses_depth_and_player_index = UseDepthAndPlayerIndex = 0x01 - uses_color = UseColor = 0x02 - uses_skeletal_tracking = UseSkeletalTracking = 0x08 - uses_depth = UseDepth = 0x20 - uses_high_quality_color = 0x40 # implies COLOR stream will be from uncompressed YUY2 @ 15fps - uses_audio = UsesAudio = 0x10000000 - - -class Device(object): - """Represents a system's Kinect sensors.""" - _device_inst = None - - def __new__(cls): - if Device._device_inst is None: - Device._device_inst = object.__new__(Device) - return Device._device_inst - - @property - def count(self): - """The number of active Kinect sensors that are attached to the system.""" - return _NuiGetSensorCount() - - -class Runtime(object): - """Represents a Kinect sensor.""" - - def __init__(self, - nui_init_flags = RuntimeOptions.uses_color| RuntimeOptions.uses_depth | RuntimeOptions.uses_depth_and_player_index | RuntimeOptions.uses_skeletal_tracking, - index = 0): - """Creates a new runtime. By default initialized to the 1st installed kinect device and tracking all events""" - self._nui = self._skeleton_event = self._image_event = self._depth_event = None - self._nui = _NuiCreateSensorByIndex(index) - try: - self._nui.NuiInitialize(nui_init_flags) - except: - self._nui.NuiShutdown() - import traceback - - raise KinectError('Unable to create Kinect runtime %s' % (traceback.format_exc())) - - self.depth_frame_ready = _event() - self.skeleton_frame_ready = _event() - self.video_frame_ready = _event() - - self._skeleton_event = _CreateEvent(None, True, False, None) - self._image_event = _CreateEvent(None, True, False, None) - self._depth_event = _CreateEvent(None, True, False, None) - - self.camera = Camera(self) - self.skeleton_engine = SkeletonEngine(self) - self.depth_stream = ImageStream(self) - self.video_stream = ImageStream(self) - - thread.start_new_thread(self._event_thread, ()) - - def close(self): - """closes the current runtime""" - if self._nui is not None: - self._nui.NuiShutdown() - self._nui = None - - if self._skeleton_event is not None: - _CloseHandle(self._skeleton_event) - self._skeleton_event = None - - if self._image_event is not None: - _CloseHandle(self._image_event) - self._image_event = None - - if self._depth_event is not None: - _CloseHandle(self._depth_event) - self._depth_event = None - - def _check_closed(self): - if self._nui is None: - raise KinectError('Device closed') - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, *args): - self.close() - - @property - def instance_index(self): - self._check_closed() - - """Gets the index for this instance of Runtime.""" - return self._nui.InstanceIndex() - - - def _event_thread(self): - handles = (ctypes.c_voidp * 3)() - handles[0] = self._skeleton_event - handles[1] = self._depth_event - handles[2] = self._image_event - while 1: - wait = _WaitForMultipleObjects(3, handles, False, _INFINITE) - if wait == 0: - # skeleton data - try: - frame = self._nui.NuiSkeletonGetNextFrame(0) - except KinectError: - continue - - for curSkeleton in frame.SkeletonData: - if curSkeleton.eTrackingState != SkeletonTrackingState.NOT_TRACKED: - self.skeleton_frame_ready.fire(frame) - break - elif wait == 1: - # depth event - depth_frame = self._nui.NuiImageStreamGetNextFrame(self.depth_stream._stream, 0) - self.depth_frame_ready.fire(depth_frame) - self._nui.NuiImageStreamReleaseFrame(self.depth_stream._stream, depth_frame) - elif wait == 2: - # image event - depth_frame = self._nui.NuiImageStreamGetNextFrame(self.video_stream._stream, 0) - self.video_frame_ready.fire(depth_frame) - self._nui.NuiImageStreamReleaseFrame(self.video_stream._stream, depth_frame) - pass - else: - # wait failed in some form (abandoned, timeout, or failed), this ends our loop - # when we close our events. - break - - -class ImageStreamType(object): - """Specifies an image stream type. """ - depth = Depth = 0 - video = Video = 1 - invalid = Invalid = -1 - - -class ImageStream(object): - """Represents an image stream.""" - - def __init__(self, runtime): - self.runtime = runtime - self.resolution = ImageResolution.Invalid - self.height = self.width = 0 - self.stream_type = ImageStreamType.Invalid - self._stream = None - - def open(self, image_stream_type = 0, frame_limit = 2, resolution = ImageResolution.Resolution320x240, image_type = ImageType.Color): - if image_stream_type == ImageStreamType.Depth: - event_handle = self.runtime._depth_event - elif image_stream_type == ImageStreamType.Video: - event_handle = self.runtime._image_event - else: - raise ValueError("Unexpected image stream type: %r" % (image_stream_type, )) - - if resolution == ImageResolution.Resolution1280x1024: - self.width = 1280 - self.height = 1024 - elif resolution == ImageResolution.Resolution640x480: - self.width = 640 - self.height = 480 - elif resolution == ImageResolution.Resolution320x240: - self.width = 320 - self.height = 240 - elif resolution == ImageResolution.Resolution80x60: - self.width = 80 - self.height = 60 - else: - raise ValueError("Unexpected resolution: %r" % (resolution, )) - - self._stream = self.runtime._nui.NuiImageStreamOpen(image_type, resolution, 0, frame_limit, event_handle) - self.stream_type = image_stream_type - self.resolution = resolution - self.type = image_type - - def get_next_frame(self, milliseconds_to_wait = 0): - # TODO: Allow user to provide a NUI_IMAGE_FRAME ? - return self.runtime._nui.NuiImageStreamGetNextFrame(self._stream, milliseconds_to_wait) - - @staticmethod - def get_valid_resolutions(image_type): - if image_type == ImageType.Color: - return (ImageResolution.Resolution1280x1024, ImageResolution.Resolution640x480) - elif image_type == ImageType.Depth: - return (ImageResolution.Resolution640x480, ) - elif image_type == ImageType.DepthAndPlayerIndex: - return (ImageResolution.Resolution320x240, ) - elif image_type == ImageType.ColorYuv: - return (ImageResolution.Resolution640x480, ) - elif image_type == ImageType.ColorYuvRaw: - return (ImageResolution.Resolution640x480, ) - else: - raise KinectError("Unknown image_type: %r" % (image_type, )) - - -class SkeletonEngine(object): - """Represents the skeleton tracking engine. """ - - def __init__(self, runtime): - self.runtime = runtime - self._enabled = False - - def get_enabled(self): - return self._enabled - - def set_enabled(self, value): - if value: - self.runtime._nui.NuiSkeletonTrackingEnable(self.runtime._skeleton_event) - self._enabled = True - else: - self.runtime._nui.NuiSkeletonTrackingDisable(self.runtime._skeleton_event) - self._enabled = False - - enabled = property(get_enabled, set_enabled) - - def get_next_frame(self, timeout = -1): - res = self.runtime._nui.NuiSkeletonGetNextFrame(timeout) - assert isinstance(res, SkeletonFrame) - return res - - @staticmethod - def depth_image_to_skeleton(fDepthX, fDepthY, usDepthValue): - """returns Vector4""" - - ## - ## Depth is in meters in skeleton space. - ## The depth image pixel format has depth in millimeters shifted left by 3. - ## - - fSkeletonZ = (usDepthValue >> 3) / 1000.0 - - ## - ## Center of depth sensor is at (0,0,0) in skeleton space, and - ## and (160,120) in depth image coordinates. Note that positive Y - ## is up in skeleton space and down in image coordinates. - ## - - fSkeletonX = (fDepthX - 0.5) * (_NUI_CAMERA_DEPTH_IMAGE_TO_SKELETON_MULTIPLIER_320x240 * fSkeletonZ) * 320.0 - fSkeletonY = (0.5 - fDepthY) * (_NUI_CAMERA_DEPTH_IMAGE_TO_SKELETON_MULTIPLIER_320x240 * fSkeletonZ) * 240.0 - - ## - ## Return the result as a vector. - ## - - v4 = Vector() - v4.x = fSkeletonX - v4.y = fSkeletonY - v4.z = fSkeletonZ - v4.w = 1.0 - return v4 - - @staticmethod - def skeleton_to_depth_image(vPoint, scaleX = 1, scaleY = 1): - """Given a Vector4 returns X and Y coordinates fo display on the screen. Returns a tuple depthX, depthY""" - - if vPoint.z > _FLT_EPSILON: - ## - ## Center of depth sensor is at (0,0,0) in skeleton space, and - ## and (160,120) in depth image coordinates. Note that positive Y - ## is up in skeleton space and down in image coordinates. - ## - - pfDepthX = 0.5 + vPoint.x * ( _NUI_CAMERA_SKELETON_TO_DEPTH_IMAGE_MULTIPLIER_320x240 / vPoint.z ) / 320.0 - pfDepthY = 0.5 - vPoint.y * ( _NUI_CAMERA_SKELETON_TO_DEPTH_IMAGE_MULTIPLIER_320x240 / vPoint.z ) / 240.0 - - return pfDepthX * scaleX, pfDepthY * scaleY - - return 0.0, 0.0 - - -class Camera(object): - """Represents a Kinect sensor's camera.""" - - def __init__(self, runtime): - self.runtime = runtime - self.elevation_angle - - ElevationMaximum = 27 - ElevationMinimum = -27 - - def get_elevation_angle(self): - """Gets or sets the camera elevation angle. """ - return self.runtime._nui.NuiCameraElevationGetAngle() - - def set_elevation_angle(self, degrees): - """Gets or sets the camera elevation angle. """ - - self.runtime._nui.NuiCameraElevationSetAngle(degrees) - - elevation_angle = property(get_elevation_angle, set_elevation_angle) - - @property - def unique_device_name(self): - """Gets the camera's unique device name. """ - return self.runtime._nui.GetUniqueDeviceName() - - def get_color_pixel_coordinates_from_depth_pixel(self, color_resolution, view_area, depth_x, depth_y, depth_value): - """Returns the pixel coordinates in color image space that correspond to the specified pixel coordinates in depth image space. - -color_resolution: An ImageResolution value specifying the color image resolution. -view_area: An ImageViewArea structure containing the pan and zoom settings. If you provide this argument, you should pass in the view area from the image frame against which you are registering pixels, rather than manually instantiating and populating the structure. This helps ensure that your settings are valid. -depth_x: The x coordinate in depth image space. -depth_y: The y coordinate in depth image space. -depth_value The depth value in depth image space. - -Returns: color_x, color_y - the x and y coordinate in the color image space -""" - return self.runtime._nui.NuiImageGetColorPixelCoordinatesFromDepthPixel( - color_resolution, - view_area, - depth_x, - depth_y, - depth_value) - - -def TransformSmoothParameters(vPoint): - """returns depthX (float), depthY (float), depthValue (int)""" - - if vPoint.vector.z > _FLT_EPSILON: - - # Center of depth sensor is at (0,0,0) in skeleton space, and - # and (160,120) in depth image coordinates. Note that positive Y - # is up in skeleton space and down in image coordinates. - # - - pfDepthX = 0.5 + vPoint.vector.x * _NUI_CAMERA_SKELETON_TO_DEPTH_IMAGE_MULTIPLIER_320x240 / ( vPoint.vector.z * 320.0 ) - pfDepthY = 0.5 - vPoint.vector.y * _NUI_CAMERA_SKELETON_TO_DEPTH_IMAGE_MULTIPLIER_320x240 / ( vPoint.vector.z * 240.0 ) - - # - # Depth is in meters in skeleton space. - # The depth image pixel format has depth in millimeters shifted left by 3. - # - - pusDepthValue = int(vPoint.vector.z * 1000) << 3 - return pfDepthX, pfDepthY, pusDepthValue - - return 0.0, 0.0, 0 - - -class _event(object): - """class used for adding/removing/invoking a set of listener functions""" - __slots__ = ['handlers'] - - def __init__(self): - self.handlers = [] - - def __iadd__(self, other): - self.handlers.append(other) - return self - - def __isub__(self, other): - self.handlers.remove(other) - return self - - def fire(self, *args): - for handler in self.handlers: - handler(*args) - - diff --git a/Python/Product/PyKinect/PyKinect/pykinect/nui/_interop.py b/Python/Product/PyKinect/PyKinect/pykinect/nui/_interop.py deleted file mode 100644 index 48aa90c5ce..0000000000 --- a/Python/Product/PyKinect/PyKinect/pykinect/nui/_interop.py +++ /dev/null @@ -1,265 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -"""Contains low-level implementation details which aren't consumed directly by users of the Kinect API""" - -import ctypes -from pykinect.nui import KinectError, _NUIDLL -from pykinect.nui.structs import (ImageFrame, ImageResolution, ImageType, - ImageViewArea, SkeletonFrame, - TransformSmoothParameters, _Enumeration) - -_SEVERITY_ERROR = 1 - -class _KinectHRESULT(ctypes._SimpleCData): - """performs error checking and returns a custom error message for kinect HRESULTs""" - _type_ = "l" - @staticmethod - def _check_retval_(error): - if error < 0: - err = KinectError(error, 22) - error = error + 0xffffffff - error_msg = _KINECT_ERRORS.get(error, '') - if error_msg is not None: - err.strerror = err.message = error_msg - - raise err - -def _HRESULT_FROM_WIN32(error): - return 0x80070000 | error - -def _MAKE_HRESULT(sev, fac, code): - return (sev << 31) | (fac << 16) | code - - -class _PropsIndex(_Enumeration): - INDEX_UNIQUE_DEVICE_NAME = 0 - INDEX_LAST = 1 # don't use! - - -class _PropType(_Enumeration): - UNKNOWN = 0 # don't use - UINT = 1 # no need to return anything smaller than an int - FLOAT = 2 - BSTR = 3 # returns new BSTR. Use SysFreeString( BSTR ) when you're done - BLOB = 4 - - -############################################################################## -## -## Define NUI error codes derived from win32 errors -## - -_E_NUI_DEVICE_NOT_CONNECTED = _HRESULT_FROM_WIN32(1167) -_E_NUI_DEVICE_NOT_READY = _HRESULT_FROM_WIN32(21) -_E_NUI_ALREADY_INITIALIZED = _HRESULT_FROM_WIN32(1247) -_E_NUI_NO_MORE_ITEMS = _HRESULT_FROM_WIN32(259) - -## -## Define NUI specific error codes -## - -_FACILITY_NUI = 0x301 -_E_NUI_FRAME_NO_DATA = _MAKE_HRESULT(_SEVERITY_ERROR, _FACILITY_NUI, 1) -_E_NUI_STREAM_NOT_ENABLED = _MAKE_HRESULT(_SEVERITY_ERROR, _FACILITY_NUI, 2) -_E_NUI_IMAGE_STREAM_IN_USE = _MAKE_HRESULT(_SEVERITY_ERROR, _FACILITY_NUI, 3) -_E_NUI_FRAME_LIMIT_EXCEEDED = _MAKE_HRESULT(_SEVERITY_ERROR, _FACILITY_NUI, 4) -_E_NUI_FEATURE_NOT_INITIALIZED = _MAKE_HRESULT(_SEVERITY_ERROR, _FACILITY_NUI, 5) -_E_NUI_DATABASE_NOT_FOUND = _MAKE_HRESULT(_SEVERITY_ERROR, _FACILITY_NUI, 13) -_E_NUI_DATABASE_VERSION_MISMATCH = _MAKE_HRESULT(_SEVERITY_ERROR, _FACILITY_NUI, 14) - -_KINECT_ERRORS = { - _E_NUI_DEVICE_NOT_CONNECTED : 'Device not connected', - _E_NUI_DEVICE_NOT_READY : 'Device not ready', - _E_NUI_ALREADY_INITIALIZED : 'Device already initialized', - _E_NUI_NO_MORE_ITEMS : 'No more items', - _E_NUI_FRAME_NO_DATA : 'Frame has no data', - _E_NUI_STREAM_NOT_ENABLED : 'Stream is not enabled', - _E_NUI_IMAGE_STREAM_IN_USE : 'Image stream is already in use', - _E_NUI_FRAME_LIMIT_EXCEEDED : 'Frame limit exceeded', - _E_NUI_FEATURE_NOT_INITIALIZED : 'Feature not initialized', - _E_NUI_DATABASE_NOT_FOUND : 'Database not found', - _E_NUI_DATABASE_VERSION_MISMATCH : 'Database version mismatch', -} - -try: - c_bool = ctypes.c_bool -except: - c_bool = ctypes.c_uint - -_kernel32 = ctypes.WinDLL('kernel32') -_CreateEvent = _kernel32.CreateEventW -_CreateEvent.argtypes = [ctypes.c_voidp, ctypes.c_uint, ctypes.c_bool, ctypes.c_wchar_p] -_CreateEvent.restype = ctypes.c_voidp - -_CloseHandle = _kernel32.CloseHandle -_CloseHandle.argtypes = [ctypes.c_voidp] -_CloseHandle.restype = c_bool - -_WaitForSingleObject = _kernel32.WaitForSingleObject -_WaitForSingleObject.argtypes = [ctypes.c_voidp, ctypes.c_uint32] -_WaitForSingleObject.restype = ctypes.c_uint32 - -_WaitForMultipleObjects = _kernel32.WaitForMultipleObjects -_WaitForMultipleObjects.argtypes = [ctypes.c_uint32, ctypes.POINTER(ctypes.c_voidp), ctypes.c_uint, ctypes.c_uint32] -_WaitForMultipleObjects.restype = ctypes.c_uint32 - -_WAIT_OBJECT_0 = 0 -_INFINITE = 0xffffffff - -_oleaut32 = ctypes.WinDLL('oleaut32') -_SysFreeString = _oleaut32.SysFreeString -_SysFreeString.argtypes = [ctypes.c_voidp] -_SysFreeString.restype = ctypes.HRESULT - -class _NuiInstance(ctypes.c_voidp): - """this interface duplicates exactly the public DLL NUI**** methods that - work on just device #0. If you want to work with multiple devices, - use these methods off the INuiInstance, after getting a INuiInstance * from - the multiple-device methods below""" - - # vtable - _NuiInitialize = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32)(3, 'NuiInitialize') - _NuiShutdown = ctypes.WINFUNCTYPE(None)(4, 'NuiShutdown') - _NuiSetFrameAndEvent = ctypes.WINFUNCTYPE(_KinectHRESULT, ctypes.c_voidp, ctypes.c_uint32)(5, 'NuiSetFrameEndEvent') - _NuiImageStreamOpen = ctypes.WINFUNCTYPE(_KinectHRESULT, ImageType, ImageResolution, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp))(6, 'NuiImageStreamOpen') - _NuiImageStreamSetImageFrameFlags = 7 - _NuiImageStreamGetImageFrameFlags = 8 - _NuiImageStreamGetNextFrame = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_voidp, ctypes.c_uint32, ctypes.POINTER(ImageFrame))(9, 'NuiImageStreamGetNextFrame') - _NuiImageStreamReleaseFrame = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_voidp, ctypes.POINTER(ImageFrame))(10, 'NuiImageStreamReleaseFrame') - _NuiImageGetColorPixelCoordinatesFromDepthPixel = ctypes.WINFUNCTYPE(ctypes.HRESULT, - ImageResolution, - ctypes.POINTER(ImageViewArea), - ctypes.c_long, - ctypes.c_long, - ctypes.c_uint16, - ctypes.POINTER(ctypes.c_long), - ctypes.POINTER(ctypes.c_long))(11, 'NuiImageGetColorPixelCoordinatesFromDepthPixel') - _NuiImageGetColorPixelCoordinatesFromDepthPixelAtResolution = 12 - _NuiImageGetColorPixelCoordinateFrameFromDepthPixelFrameAtResolution = 13 - _NuiCameraElevationSetAngle = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_long)(14, 'NuiCameraElevationSetAngle') - _NuiCameraElevationGetAngle = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.POINTER(ctypes.c_long))(15, 'NuiCameraElevationGetAngle') - _NuiSkeletonTrackingEnable = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_voidp, ctypes.c_uint32)(16, 'NuiSkeletonTrackingEnable') - _NuiSkeletonTrackingDisable = ctypes.WINFUNCTYPE(ctypes.HRESULT)(17, 'NuiSkeletonTrackingDisable') - _NuiSkeletonSetTrackedSkeletons = 18 - _NuiSkeletonGetNextFrame = ctypes.WINFUNCTYPE(_KinectHRESULT, ctypes.c_uint32, ctypes.POINTER(SkeletonFrame))(19, 'NuiSkeletonGetNextFrame') - _NuiTransformSmooth = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.POINTER(SkeletonFrame), ctypes.POINTER(TransformSmoothParameters))(20, 'NuiTransformSmooth') - _NuiGetAudioSource = 21 - _InstanceIndex = ctypes.WINFUNCTYPE(ctypes.c_int)(22, 'InstanceIndex') - _NuiDeviceConnectionId = 23 - _NuiUniqueId = 24 - _NuiAudioArrayId = 25 - _NuiStatus = 26 - _NuiInitializationFlags = 27 - - def InstanceIndex(self): - """which instance # was it created with, in MSR_NuiCreateInstanceByIndex( )/etc?""" - print self.value - return _NuiInstance._InstanceIndex(self) - - def NuiInitialize(self, dwFlags = 0): - _NuiInstance._NuiInitialize(self, dwFlags) - - def NuiShutdown(self): - return _NuiInstance._NuiShutdown(self) - - def NuiImageStreamOpen(self, eImageType, eResolution, dwImageFrameFlags_NotUsed, dwFrameLimit, hNextFrameEvent = 0): - res = ctypes.c_voidp() - _NuiInstance._NuiImageStreamOpen(self, eImageType, eResolution, dwImageFrameFlags_NotUsed, dwFrameLimit, hNextFrameEvent, ctypes.byref(res)) - return res - - def NuiImageStreamGetNextFrame(self, hStream, dwMillisecondsToWait): - res = ImageFrame() - _NuiInstance._NuiImageStreamGetNextFrame(self, hStream, dwMillisecondsToWait, ctypes.byref(res)) - return res - - def NuiImageStreamReleaseFrame(self, hStream, pImageFrame): - _NuiInstance._NuiImageStreamReleaseFrame(self, hStream, pImageFrame) - - def NuiImageGetColorPixelCoordinatesFromDepthPixel(self, eColorResolution, pcViewArea, lDepthX, lDepthY, usDepthValue): - x, y = ctypes.c_long(), ctypes.c_long() - _NuiInstance._NuiImageGetColorPixelCoordinatesFromDepthPixel(self, eColorResolution, pcViewArea, lDepthX, lDepthY, usDepthValue, ctypes.byref(x), ctypes.byref(y)) - return x.value, y.value - - def NuiCameraElevationSetAngle(self, lAngleDegrees): - while 1: - try: - _NuiInstance._NuiCameraElevationSetAngle(self, lAngleDegrees) - return - except: - pass - - def NuiCameraElevationGetAngle(self): - res = ctypes.c_long() - _NuiInstance._NuiCameraElevationGetAngle(self, ctypes.byref(res)) - return res.value - - def NuiSkeletonTrackingEnable(self, hNextFrameEvent = 0, dwFlags = 0): - """Enables skeleton tracking. Succeeds or raises an exception. - - hNextFrameEvent: A handle to an application-allocated, manual reset event that will be set whenever a new frame - of skeleton data is available, and will be reset whenever the latest frame data is returned. - This can be None. - - dwFlags: Flags that control skeleton tracking, as a bitwise-OR combination SkeletonTracking values. - """ - _NuiInstance._NuiSkeletonTrackingEnable(self, hNextFrameEvent, dwFlags) - - def NuiSkeletonTrackingDisable(self): - _NuiInstance._NuiSkeletonTrackingDisable() - - def NuiSkeletonGetNextFrame(self, dwMillisecondsToWait): - frame = SkeletonFrame() - _NuiInstance._NuiSkeletonGetNextFrame(self, dwMillisecondsToWait, ctypes.byref(frame)) - return frame - - def NuiTransformSmooth(self, pSkeletonFrame, pSmoothingParams): - _NuiInstance._NuiTransformSmooth(self, pSkeletonFrame, pSmoothingParams) - - def GetUniqueDeviceName(self): - mem = ctypes.c_voidp() - # Size is currently not used, and when we get the unique device name we need to free the memory. - - _NuiInstance._MSR_NuiGetPropsBlob(self, _PropsIndex.INDEX_UNIQUE_DEVICE_NAME, ctypes.byref(mem), None) - res = ctypes.cast(mem, ctypes.c_wchar_p).value - _SysFreeString(mem) - return res - - def MSR_NuiGetPropsType(index): - return _NuiInstance._MSR_NuiGetPropsType(index) - - -##*********************** -## NUI enumeration function -##*********************** - -__NuiGetSensorCount = _NUIDLL.NuiGetSensorCount -__NuiGetSensorCount.argtypes = [ctypes.POINTER(ctypes.c_int)] -__NuiGetSensorCount.restype = ctypes.HRESULT - -def _NuiGetSensorCount(): - count = ctypes.c_int() - __NuiGetSensorCount(ctypes.byref(count)) - return count.value - -__NuiCreateSensorByIndex = _NUIDLL.NuiCreateSensorByIndex -__NuiCreateSensorByIndex.argtypes = [ctypes.c_int, ctypes.POINTER(_NuiInstance)] -__NuiCreateSensorByIndex.restype = ctypes.HRESULT - -def _NuiCreateSensorByIndex(index): - inst = _NuiInstance() - __NuiCreateSensorByIndex(index, ctypes.byref(inst)) - return inst diff --git a/Python/Product/PyKinect/PyKinect/pykinect/nui/structs.py b/Python/Product/PyKinect/PyKinect/pykinect/nui/structs.py deleted file mode 100644 index 55d4bf8a08..0000000000 --- a/Python/Product/PyKinect/PyKinect/pykinect/nui/structs.py +++ /dev/null @@ -1,631 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -"""defines the core data structures used for communicating w/ the Kinect APIs""" - -import ctypes -from ctypes import Array -from pykinect.nui import _NUIDLL - -NUI_SKELETON_COUNT = 6 - -class _EnumerationType(type(ctypes.c_int)): - """metaclass for an enumeration like type for ctypes""" - - def __new__(metacls, name, bases, dict): - cls = type(ctypes.c_int).__new__(metacls, name, bases, dict) - for key, value in cls.__dict__.items(): - if key.startswith('_') and key.endswith('_'): continue - - setattr(cls, key, cls(key, value)) - - return cls - - -class _Enumeration(ctypes.c_int): - """base class for enumerations""" - - __metaclass__ = _EnumerationType - def __init__(self, name, value): - self.name = name - ctypes.c_int.__init__(self, value) - - def __hash__(self): - return self.value - - def __int__(self): - return self.value - - def __index__(self): - return self.value - - def __repr__(self): - if hasattr(self, 'name'): - return "<%s.%s (%r)>" % (self.__class__.__name__, self.name, self.value) - - name = '??' - for x in type(self).__dict__: - if x.startswith('_') and x.endswith('_'): continue - - if getattr(self, x, None).value == self.value: - name = x - break - - return "<%s.%s (%r)>" % (self.__class__.__name__, name, self.value) - - def __eq__(self, other): - if type(self) is not type(other): - return self.value == other - - return self.value == other.value - - def __ne__(self, other): - if type(self) is not type(other): - return self.value != other - - return self.value != other.value - - -class Vector(ctypes.Structure): - """Represents vector data.""" - _fields_ = [('x', ctypes.c_float), - ('y', ctypes.c_float), - ('z', ctypes.c_float), - ('w', ctypes.c_float) - ] - - def __init__(self, x = 0.0, y = 0.0, z = 0.0, w = 0.0): - self.x = x - self.y = y - self.z = z - self.w = w - - def __eq__(self, other): - return (self.x == other.x and - self.y == other.y and - self.z == other.z and - self.w == other.w) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return '' % (self.x, self.y, self.z, self.w) - - -class Matrix4(Array): - """4x4 matrix. Can be accessed using matrix[0,0] ... matrix[3,3] or can be accessed using - matrix.M11 ... matrix.M44 for similarity to .NET and the C data structures. matrix[0,1] is - the same as matrix.M12. - - Used to provide bone rotation information. - """ - - _length_ = 16 - _type_ = ctypes.c_float - - def __getitem__(self, index): - return Array.__getitem__(self, index[1] + index[0] * 4) - - def __setitem__(self, index, value): - return Array.__setitem__(self, index[1] + index[0] * 4, value) - - def get_M11(self): return Array.__getitem__(0) - def set_M11(self, value): Array.__setitem__(0, value) - M11 = property(get_M11, set_M11) - - def get_M12(self): return Array.__getitem__(1) - def set_M12(self, value): Array.__setitem__(1, value) - M12 = property(get_M12, set_M12) - - def get_M13(self): return Array.__getitem__(2) - def set_M13(self, value): Array.__setitem__(2, value) - M13 = property(get_M13, set_M13) - - def get_M14(self): return Array.__getitem__(3) - def set_M14(self, value): Array.__setitem__(3, value) - M14 = property(get_M14, set_M14) - - def get_M21(self): return Array.__getitem__(4) - def set_M21(self, value): Array.__setitem__(4, value) - M21 = property(get_M21, set_M21) - - def get_M22(self): return Array.__getitem__(5) - def set_M22(self, value): Array.__setitem__(5, value) - M22 = property(get_M22, set_M22) - - def get_M23(self): return Array.__getitem__(6) - def set_M23(self, value): Array.__setitem__(6, value) - M23 = property(get_M23, set_M23) - - def get_M24(self): return Array.__getitem__(7) - def set_M24(self, value): Array.__setitem__(7, value) - M24 = property(get_M24, set_M24) - - def get_M31(self): return Array.__getitem__(8) - def set_M31(self, value): Array.__setitem__(8, value) - M31 = property(get_M31, set_M31) - - def get_M32(self): return Array.__getitem__(9) - def set_M32(self, value): Array.__setitem__(9, value) - M32 = property(get_M32, set_M32) - - def get_M33(self): return Array.__getitem__(10) - def set_M33(self, value): Array.__setitem__(10, value) - M33 = property(get_M33, set_M33) - - def get_M34(self): return Array.__getitem__(11) - def set_M34(self, value): Array.__setitem__(11, value) - M34 = property(get_M34, set_M34) - - def get_M41(self): return Array.__getitem__(12) - def set_M41(self, value): Array.__setitem__(12, value) - M41 = property(get_M41, set_M41) - - def get_M42(self): return Array.__getitem__(13) - def set_M42(self, value): Array.__setitem__(13, value) - M42 = property(get_M42, set_M42) - - def get_M43(self): return Array.__getitem__(14) - def set_M43(self, value): Array.__setitem__(14, value) - M43 = property(get_M43, set_M43) - - def get_M44(self): return Array.__getitem__(15) - def set_M44(self, value): Array.__setitem__(15, value) - M44 = property(get_M44, set_M44) - - -class _NuiLockedRect(ctypes.Structure): - _fields_ = [('pitch', ctypes.c_int32), - ('size', ctypes.c_int32), - ('bits', ctypes.c_voidp)] - - -class _NuiSurfaceDesc(ctypes.Structure): - _fields_ = [('width', ctypes.c_uint32), - ('height', ctypes.c_uint32) - ] - -class PlanarImage(ctypes.c_voidp): - """Represents a video image.""" - _BufferLen = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_int32)(3, 'BufferLen') - _Pitch = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_int32)(4, 'Pitch') - _LockRect = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint, ctypes.POINTER(_NuiLockedRect), ctypes.c_voidp, ctypes.c_uint32)(5, '_LockRect') - _GetLevelDesc = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.POINTER(_NuiSurfaceDesc))(6, '_GetLevelDesc') - _UnlockRect = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32)(7, '_UnlockRect') - - @property - def width(self): - desc = _NuiSurfaceDesc() - PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc)) - return desc.width - - @property - def height(self): - desc = _NuiSurfaceDesc() - PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc)) - return desc.height - - @property - def bytes_per_pixel(self): - return self.pitch / self.width - - @property - def bits(self): - buffer = (ctypes.c_byte * self.buffer_length)() - self.copy_bits(buffer) - return buffer - - def copy_bits(self, dest): - """copies the bits of the image to the provided destination address""" - desc = _NuiSurfaceDesc() - - PlanarImage._GetLevelDesc(self, 0, ctypes.byref(desc)) - - rect = _NuiLockedRect() - PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0) - ctypes.memmove(dest, rect.bits, desc.height * rect.pitch) - PlanarImage._UnlockRect(self, 0) - - @property - def buffer_length(self): - return self.width * self.height * self.bytes_per_pixel - - @property - def pitch(self): - rect = _NuiLockedRect() - PlanarImage._LockRect(self, 0, ctypes.byref(rect), None, 0) - res = rect.pitch - PlanarImage._UnlockRect(self, 0) - return res - - -class ImageType(_Enumeration): - """Specifies an image type. """ - depth_and_player_index = DepthAndPlayerIndex = 0 # USHORT - color = Color = 1 # RGB32 data - color_yuv = ColorYuv = 2 # YUY2 stream from camera h/w, but converted to RGB32 before user getting it. - color_yuv_raw = ColorYuvRaw = 3 # YUY2 stream from camera h/w. - depth = Depth = 4 # USHORT - - -class ImageResolution(_Enumeration): - """Specifies image resolution.""" - invalid = Invalid = -1 - resolution_80x60 = Resolution80x60 = 0 - resolution_320x240 = Resolution320x240 = 1 - resolution_640x480 = Resolution640x480 = 2 - resolution_1280x1024 = Resolution1280x1024 = 3 # for hires color only - - -class SkeletonTracking(_Enumeration): - suppress_no_frame_data = 0x00000001 # Prevents NuiSkeletonGetNextFrame from returning E_NUI_FRAME_NO_DATA errors. Instead, calls to NuiSkeletonGetNextFrame block until data is available or the timeout period passes. - title_sets_tracked_skeletons = 0x00000002 # Disables the default player selection mode and enables the title to manage which players have tracked skeletons. - enable_seated_support = 0x00000004 # Uses seated skeleton tracking mode. The 10 lower-body joints of each skeleton will not be tracked. - enable_in_near_range = 0x00000008 - -class ImageDigitalZoom(_Enumeration): - """Specifies the zoom factor.""" - - zoom_1x = Zoom1x = 0 # A zoom factor of 1.0. - zoom_2x = Zoom2x = 1 # A zoom factor of 2.0. - - -class ImageViewArea(ctypes.Structure): - """Specifies the image view area. """ - _fields_ = [('Zoom', ctypes.c_int), # An ImageDigitalZoom value that specifies the zoom factor. - ('CenterX', ctypes.c_long), # The horizontal offset from center, for panning. - ('CenterY', ctypes.c_long) # The vertical offset from center, for panning. - ] - - def get_zoom(self): - return self.Zoom - - def set_zoom(self, value): - self.Zoom = value - - zoom = property(get_zoom, set_zoom) - - def get_center_x(self): - return self.CenterX - - def set_center_x(self, value): - self.CenterX = value - - def get_center_y(self): - return self.CenterY - - center_x = property(get_center_x, set_center_x) - - def set_center_y(self, value): - self.CenterY = value - - center_y = property(get_center_y, set_center_y) - - -class ImageFrame(ctypes.Structure): - _fields_ = [('timestamp', ctypes.c_longlong), # The timestamp (in milliseconds) of the most recent frame. The clock starts when you call Initialize. - ('frame_number', ctypes.c_uint32), # Returns the frame number - ('type', ImageType), # An ImageType value that specifies the image type. - ('resolution', ImageResolution), # An ImageResolution value that specifies the image resolution. - ('image', PlanarImage), # A PlanarImage object that represents the image. - ('flags', ctypes.c_uint32), # flags, not used - ('view_area', ImageViewArea), # An ImageViewArea value that specifies the view area. - ] - - -class JointId(_Enumeration): - """Specifies the various skeleton joints. """ - hip_center = HipCenter = 0 - spine = Spine = 1 - shoulder_center = ShoulderCenter = 2 - head = Head = 3 - shoulder_left = ShoulderLeft = 4 - elbow_left = ElbowLeft = 5 - wrist_left = WristLeft = 6 - hand_left = HandLeft = 7 - shoulder_right = ShoulderRight = 8 - elbow_right = ElbowRight = 9 - wrist_right = WristRight = 10 - hand_right = HandRight = 11 - hip_left = HipLeft = 12 - knee_left = KneeLeft = 13 - ankle_left = AnkleLeft = 14 - foot_left = FootLeft = 15 - hip_right = HipRight = 16 - knee_right = KneeRight = 17 - ankle_right = AnkleRight = 18 - foot_right = FootRight = 19 - count = Count = 20 - -class SkeletonBoneRotation(ctypes.Structure): - _fields_ = [('rotation_matrix', Matrix4), - ('rotation_quaternion', Vector)] - - def __repr__(self): - return '' % (self.rotation_matrix, self.rotation_quaternion) - -class SkeletonBoneOrientation(ctypes.Structure): - _fields_ = [('end_joint', JointId), - ('start_joint', JointId), - ('hierarchical_rotation', SkeletonBoneRotation), - ('absolute_rotation', SkeletonBoneRotation), - ] - - def __repr__(self): - return '' % (self.end_joint, self.start_joint, self.hierarchical_rotation, self.absolute_rotation) - - -class JointTrackingState(_Enumeration): - """Specifies the joint tracking state. """ - not_tracked = NOT_TRACKED = 0 - inferred = INFERRED = 1 - tracked = TRACKED = 2 - - -class SkeletonTrackingState(_Enumeration): - """Specifies a skeleton's tracking state.""" - not_tracked = NOT_TRACKED = 0 - position_only = POSITION_ONLY = 1 - tracked = TRACKED = 2 - -class SkeletonFrameQuality(_Enumeration): - """Specifies skeleton frame quality. """ - camera_motion = CameraMotion = 0x01 - extrapolated_floor = ExtrapolatedFloor = 0x02 - upper_body_skeleton = UpperBodySkeleton = 0x04 - seated_support_enabled = 0x08 - - -class SkeletonQuality(_Enumeration): - """Specifies how much of the skeleton is visible. """ - clipped_right = ClippedRight = 0x00000001 - clipped_left = ClippedLeft = 0x00000002 - clipped_top = ClippedTop = 0x00000004 - clipped_bottom = ClippedBottom = 0x00000008 - -NUI_SKELETON_POSITION_COUNT = JointId.Count.value - -class SkeletonData(ctypes.Structure): - """Contains data that characterizes a skeleton.""" - _fields_ = [('eTrackingState', SkeletonTrackingState), - ('dwTrackingID', ctypes.c_uint32), - ('dwEnrollmentIndex', ctypes.c_uint32), - ('dwUserIndex', ctypes.c_uint32), - ('Position', Vector), - ('SkeletonPositions', ctypes.ARRAY(Vector, NUI_SKELETON_POSITION_COUNT)), - ('eSkeletonPositionTrackingState', ctypes.ARRAY(JointTrackingState, NUI_SKELETON_POSITION_COUNT)), - ('Quality', SkeletonQuality), - ] - - def get_tracking_state(self): - return self.eTrackingState - - def set_tracking_state(self, value): - self.eTrackingState = value - - tracking_state = property(get_tracking_state, set_tracking_state) - - def get_tracking_id(self): - return self.dwTrackingID - - def set_tracking_id(self, value): - self.dwTrackingID = value - - tracking_id = property(get_tracking_id, set_tracking_id) - - def get_enrollment_index(self): - return self.dwEnrollmentIndex - - def set_enrollment_index(self, value): - self.dwEnrollmentIndex = value - - enrollment_index = property(get_enrollment_index, set_enrollment_index) - - def get_user_index(self): - return self.dwUserIndex - - def set_user_index(self, value): - self.dwUserIndex = value - - user_index = property(get_user_index, set_user_index) - - def get_position(self): - return self.Position - - def set_position(self, value): - self.Position = value - - position = property(get_position, set_position) - - def get_skeleton_positions(self): - return self.SkeletonPositions - - def set_skeleton_positions(self, value): - self.SkeletonPositions = value - - skeleton_positions = property(get_skeleton_positions, set_skeleton_positions) - - def get_skeleton_position_tracking_states(self): - return self.eSkeletonPositionTrackingState - - def set_skeleton_position_tracking_states(self, value): - self.eSkeletonPositionTrackingState = value - - skeleton_position_tracking_states = property(get_skeleton_position_tracking_states, - set_skeleton_position_tracking_states) - - def get_skeleton_quality(self): - return self.Quality - - def set_skeleton_quality(self, value): - self.Quality = value - - skeleton_quality = property(get_skeleton_quality, set_skeleton_quality) - - def calculate_bone_orientations(self): - """Calculate bone orientations for a skeleton. - - The function calculates hierarchical and absolute joint angles for the skeleton, which can - be used in animating an avatar (Avateering). The HipCenter joint is the root of the hierarchy, - and describes an absolute rotation in the right-hand camera coordinate system. All other - joints describe rotations relative to their parent joint orientation. The angles are returned - in the same order as the joints are defined. - - Returns a sequence of SkeletonBoneOrientation objects.""" - arr = (SkeletonBoneOrientation*JointId.Count)() - _NuiSkeletonCalculateBoneOrientations(self, arr) - return tuple(arr) - - def __repr__(self): - return '' % (self.eTrackingState, - self.dwTrackingID, - self.Position) - def __eq__(self, other): - if (self.tracking_state == other.tracking_state and - self.tracking_id == other.tracking_id and - self.enrollment_index == other.enrollment_index and - self.user_index == other.user_index and - self.position == other.position and - self.skeleton_quality == other.skeleton_quality): - - for i in range(len(self.skeleton_positions)): - if (self.skeleton_positions[i] != other.skeleton_positions[i] or - self.skeleton_position_tracking_states[i] != other.skeleton_position_tracking_states[i]): - return False - - return True - - return False - - def __ne__(self, other): - return not self.__eq__(other) - - def __nonzero__(self): - return self.tracking_state != SkeletonTrackingState.not_tracked - -_NuiSkeletonCalculateBoneOrientations = _NUIDLL.NuiSkeletonCalculateBoneOrientations -_NuiSkeletonCalculateBoneOrientations.argtypes = [ctypes.POINTER(SkeletonData), ctypes.POINTER(SkeletonBoneOrientation)] -_NuiSkeletonCalculateBoneOrientations.restype = ctypes.HRESULT - - -class SkeletonFrame(ctypes.Structure): - _pack_ = 16 - _fields_ = [('liTimeStamp', ctypes.c_longlong), - ('dwFrameNumber', ctypes.c_uint32), - ('Quality', SkeletonFrameQuality), - ('vFloorClipPlane', Vector), - ('vNormalToGravity', Vector), - ('SkeletonData', ctypes.ARRAY(SkeletonData, NUI_SKELETON_COUNT)), - ] - - def get_timestamp(self): - return self.liTimeStamp - - def set_timestamp(self, value): - self.liTimeStamp = value - - timestamp = property(get_timestamp, set_timestamp) - - def get_frame_number(self): - return self.dwFrameNumber - - def set_frame_number(self, value): - self.dwFrameNumber = value - - frame_number = property(get_frame_number, set_frame_number) - - def get_quality(self): - return self.Quality - - def set_quality(self, value): - self.Quality = value - - quality = property(get_quality, set_quality) - - def get_floor_clip_plane(self): - return self.vFloorClipPlane - - def set_floor_clip_plane(self, value): - self.vFloorClipPlane = value - - floor_clip_plane = property(get_floor_clip_plane, set_floor_clip_plane) - - def get_normal_to_gravity(self): - return self.vNormalToGravity - - def set_normal_to_gravity(self, value): - self.vNormalToGravity = value - - normal_to_gravity = property(get_normal_to_gravity, set_normal_to_gravity) - - def get_skeleton_data(self): - return self.SkeletonData - - def set_skeleton_data(self, value): - self.SkeletonData = value - - skeleton_data = property(get_skeleton_data, set_skeleton_data) - - -class TransformSmoothParameters(ctypes.Structure): - """Contains transform smoothing parameters. """ - _fields_ = [('fSmoothing', ctypes.c_float), - ('fCorrection', ctypes.c_float), - ('fPrediction', ctypes.c_float), - ('fJitterRadius', ctypes.c_float), - ('fMaxDeviationRadius', ctypes.c_float) - ] - - def get_smoothing(self): - return self.fSmoothing - - def set_smoothing(self, value): - self.fSmoothing = value - - smoothing = property(get_smoothing, set_smoothing) - - def get_correction(self): - return self.fCorrection - - def set_correction(self, value): - self.fCorrection = value - - correction = property(get_correction, set_correction) - - def get_prediction(self): - return self.fPrediction - - def set_prediction(self, value): - self.fPrediction = value - - prediction = property(get_prediction, set_prediction) - - def get_jitter_radius(self): - return self.fJitterRadius - - def set_jitter_radius(self, value): - self.fJitterRadius = value - - jitter_radius = property(get_jitter_radius, set_jitter_radius) - - def get_max_deviation_radius(self): - return self.fMaxDeviationRadius - - def set_max_deviation_radius(self, value): - self.fMaxDeviationRadius = value - - max_deviation_radius = property(get_max_deviation_radius, set_max_deviation_radius) diff --git a/Python/Product/PyKinect/PyKinect/setup.py b/Python/Product/PyKinect/PyKinect/setup.py deleted file mode 100644 index aff1896b2c..0000000000 --- a/Python/Product/PyKinect/PyKinect/setup.py +++ /dev/null @@ -1,136 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -import os -from warnings import warn -try: - from setuptools import setup, Extension - from setuptools.command.build_ext import build_ext -except ImportError: - from distutils.core import setup, Extension - from distutils.command.build_ext import build_ext - -import distutils.msvc9compiler -from distutils.sysconfig import get_config_var -from distutils.util import get_platform - -long_description = 'The pykinect package provides access to the Kinect device. The pykinect package includes both the "nui" and "audio" subpackages. The nui package provides interactions with the Kinect cameras including skeleton tracking, video camera, as well as the depth camera. The audio subpackage provides access to the Kinect devices microphones.' - -classifiers = [ - 'Development Status :: 5 - Production/Stable', - 'Environment :: Win32 (MS Windows)', - 'License :: OSI Approved :: Apache Software License', - 'Natural Language :: English', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: Microsoft :: Windows :: Windows 7', - 'Programming Language :: C++', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 2 :: Only', - 'Topic :: Games/Entertainment', - 'Topic :: Multimedia :: Graphics', - 'Topic :: Multimedia :: Graphics :: Capture', - 'Topic :: Multimedia :: Sound/Audio', -] - - -def _find_vcvarsall(version): - Reg = distutils.msvc9compiler.Reg - try: - from winreg import HKEY_LOCAL_MACHINE - except ImportError: - from _winreg import HKEY_LOCAL_MACHINE - for sxs_key in [ - Reg.read_values(HKEY_LOCAL_MACHINE, r'SOFTWARE\Wow6432Node\Microsoft\VisualStudio\SxS\VC7'), - Reg.read_values(HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\VisualStudio\SxS\VC7') - ]: - for vcpath in (sxs_key[v] for v in ['12.0', '11.0', '10.0'] if v in sxs_key): - if os.path.exists(os.path.join(vcpath, 'vcvarsall.bat')): - return os.path.join(vcpath, 'vcvarsall.bat') - return None - -# Replace the original find_vcvarsall with our version, which will find newer -# versions of MSVC. -distutils.msvc9compiler.find_vcvarsall = _find_vcvarsall - -class pykinect_build_ext(build_ext): - def initialize_options(self): - build_ext.initialize_options(self) - self.inplace = 1 - - def get_ext_filename(self, ext_name): - return build_ext.get_ext_filename(self, ext_name).replace(get_config_var('SO'), '.dll') - - def get_export_symbols(self, ext): - return ext.export_symbols - - def get_source_files(self): - filenames = build_ext.get_source_files(self) - - for ext in self.extensions: - filenames.extend(getattr(ext, 'headers', [])) - - return filenames - -kinectsdk_dir = os.environ.get('KINECTSDK10_DIR', '') -if kinectsdk_dir: - kinectsdk_inc = os.path.join(kinectsdk_dir, 'inc') - kinectsdk_lib = os.path.join(kinectsdk_dir, 'lib', distutils.msvc9compiler.PLAT_TO_VCVARS[get_platform()]) -else: - warn("Cannot find KINECTSDK10_DIR environment variable. You will need to install the Kinect for Windows SDK if building.") - -pykinectaudio_ext = Extension( - 'pykinect.audio.PyKinectAudio', - include_dirs=filter(None, ['src', kinectsdk_inc]), - libraries=['Msdmo', 'dmoguids', 'mf', 'mfuuid', 'mfplat', 'avrt', 'Kinect10'], - library_dirs=filter(None, [kinectsdk_lib]), - sources=[ - 'src\\stdafx.cpp', - 'src\\PyKinectAudio.cpp', - 'src\\AudioStream.cpp', - 'src\\MediaBuffer.cpp', - ], -) - -pykinectaudio_ext.headers=[ - 'src\\AudioStream.h', - 'src\\MediaBuffer.h', - 'src\\PyKinectAudio.h', - 'src\\stdafx.h', - 'src\\targetver.h', -] - - -setup_cfg = dict( - name='pykinect', - version='2.1', - description='PyKinect Module for interacting with the Kinect SDK', - long_description=long_description, - author='Microsoft Corporation', - author_email='ptvshelp@microsoft.com', - url='http://pytools.codeplex.com/', - zip_safe=False, - packages=['pykinect', 'winspeech', 'pykinect.audio', 'pykinect.nui'], - platforms=["win32"], - classifiers=classifiers, - package_data={ - '': ['*.txt'], - }, - cmdclass={'build_ext': pykinect_build_ext}, - ext_modules=[pykinectaudio_ext] -) - -setup(**setup_cfg) diff --git a/Python/Product/PyKinect/PyKinect/src/AudioStream.cpp b/Python/Product/PyKinect/PyKinect/src/AudioStream.cpp deleted file mode 100644 index d2f2caf207..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/AudioStream.cpp +++ /dev/null @@ -1,394 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -#include "stdafx.h" -#include "PyKinectAudio.h" - -/*************************************************************************************** -* ISpStreamFormat implementation -* -*/ - -int TargetDurationInSec = 10; - -AudioStream::AudioStream(IMediaObject* mediaObject, DWORD readStaleThreshold) { - _refCount = 1; - _mediaObject = mediaObject; - InitializeCriticalSection(&_lock); - - // our wave format is fixed, so these all end up being constants... - _CurrentCaptureLength = 0; - _curBuffer = nullptr; - _CurrentReadIndex = 0; - _count = 0; - _shouldExit = false; - _readStaleThreshold = readStaleThreshold; - _readCallback = nullptr; - - DWORD threadId; - _captureThread = CreateThread(NULL, 0, CaptureThread, this, 0, &threadId); - if(_captureThread == nullptr) { - throw std::exception("Failed to create capture thread"); - } - - // our thread needs to keep us alive until we request that we exit... - AddRef(); - - mediaObject->AddRef(); -} - -AudioStream::AudioStream(ReadCallback callback) { - InitializeCriticalSection(&_lock); - - _readCallback = callback; - _curBuffer = nullptr; - _refCount = 1; - _mediaObject = nullptr; - _captureThread = nullptr; -} - -AudioStream::~AudioStream() { - // lock needs to be released before we delete this - LockHolder lock(this); - - // clean up all of the saved media buffers - - // buffers in flight should free themselves when they're released, we indicate this via - // clearing their parent which they'll check (and we'll check again if an external consumer - // of them Releases them while this is in flight) - for(auto front = this->_buffers.begin(); front != this->_buffers.end(); front++) { - (*front)->_parentStream = nullptr; - } - - // then delete the cached free buffers - while(!this->_freeBuffers.empty()) { - delete this->_freeBuffers.front(); - this->_freeBuffers.pop(); - } - - if(_mediaObject != nullptr) { - - _mediaObject->Release(); - } -} - -void AudioStream::FreeBuffer(MediaBuffer* buffer) { - LockHolder lock(this); - - // need to check parent stream here again because we now finally hold the lock - if(buffer->_parentStream == nullptr) { - delete buffer; - }else{ - _freeBuffers.push(buffer); - } -} - -MediaBuffer* AudioStream::GetBuffer() { - LockHolder lock(this); - - if(_freeBuffers.empty()) { - return new (nothrow) MediaBuffer(this); - }else{ - auto res = _freeBuffers.front(); - _freeBuffers.pop(); - res->ReInit(); - return res; - } -} - -DWORD WINAPI AudioStream::CaptureThread(LPVOID thisObj) { - CoInitializeEx(NULL, COINIT_MULTITHREADED); - - auto self = (AudioStream*)thisObj; - while(!self->_shouldExit) { - DMO_OUTPUT_DATA_BUFFER outputBuffer; - DWORD status; - auto buffer = self->GetBuffer(); - if(buffer == nullptr) { - continue; - } - - memset(&outputBuffer, 0, sizeof(DMO_OUTPUT_DATA_BUFFER )); - - outputBuffer.pBuffer = buffer; - - HRESULT hr = self->_mediaObject->ProcessOutput(0, 1, &outputBuffer, &status); - if(SUCCEEDED(hr)) { - LockHolder lock(self); - - self->_buffers.push_back(buffer); - } - } - - // we hold a ref to keep us alive, close us now. - CloseHandle(self->_captureThread); - self->Release(); - return 0; -} - -HRESULT STDMETHODCALLTYPE AudioStream::SetNotifySink( - /* [in] */ ISpNotifySink *pNotifySink) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::SetNotifyWindowMessage( - /* [in] */ HWND hWnd, - /* [in] */ UINT Msg, - /* [in] */ WPARAM wParam, - /* [in] */ LPARAM lParam) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::SetNotifyCallbackFunction( - /* [in] */ SPNOTIFYCALLBACK *pfnCallback, - /* [in] */ WPARAM wParam, - /* [in] */ LPARAM lParam) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::SetNotifyCallbackInterface( - /* [in] */ ISpNotifyCallback *pSpCallback, - /* [in] */ WPARAM wParam, - /* [in] */ LPARAM lParam) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::SetNotifyWin32Event( void) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::WaitForNotifyEvent( - /* [in] */ DWORD dwMilliseconds) { - return S_OK; -} - -HANDLE STDMETHODCALLTYPE AudioStream::GetNotifyEventHandle( void) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::AddEvents( - /* [in] */ const SPEVENT *pEventArray, - /* [in] */ ULONG ulCount) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::GetEventInterest( - /* [out] */ ULONGLONG *pullEventInterest) { - pullEventInterest = 0; - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::SetInterest( - /* [in] */ ULONGLONG ullEventInterest, - /* [in] */ ULONGLONG ullQueuedInterest) { - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::GetEvents( - /* [in] */ ULONG ulCount, - /* [size_is][out] */ SPEVENT *pEventArray, - /* [out] */ ULONG *pulFetched){ - *pulFetched = 0; - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::GetInfo( - /* [out] */ SPEVENTSOURCEINFO *pInfo) { - return E_FAIL; -} - -HRESULT STDMETHODCALLTYPE AudioStream::GetFormat( - GUID *pguidFormatId, - WAVEFORMATEX **ppCoMemWaveFormatEx) { - *ppCoMemWaveFormatEx = (WAVEFORMATEX*)CoTaskMemAlloc(sizeof(WAVEFORMATEX)); - if(*ppCoMemWaveFormatEx == nullptr) { - return E_OUTOFMEMORY; - } - - auto format = WAVEFORMATEX(); - format.cbSize = 0; - format.nChannels = 1; - format.nSamplesPerSec = 16000; - format.nAvgBytesPerSec = 32000; - format.nBlockAlign = 2; - format.wBitsPerSample = 16; - format.wFormatTag = WAVE_FORMAT_PCM; - - memcpy(*ppCoMemWaveFormatEx, &format, sizeof(WAVEFORMATEX)); - - static const GUID waveFormatGuid = - { 0xC31ADBAE, 0x527F, 0x4ff5, { 0xA2, 0x30, 0xF6, 0x2B, 0xB6, 0x1F, 0xF7, 0x0C } }; - - *pguidFormatId = waveFormatGuid; - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::QueryInterface( - /* [in] */ REFIID riid, - /* [iid_is][out] */ __RPC__deref_out void __RPC_FAR *__RPC_FAR *ppvObject) { - if(riid == __uuidof(IUnknown)) { - AddRef(); - *ppvObject = static_cast(static_cast(this)); - return S_OK; - }else if(riid == __uuidof(ISpStreamFormat)) { - AddRef(); - *ppvObject = static_cast(this); - return S_OK; - }else if(riid == __uuidof(ISpEventSink)) { - AddRef(); - *ppvObject = static_cast(this); - return S_OK; - }else if(riid == __uuidof(ISpEventSource)) { - AddRef(); - *ppvObject = static_cast(this); - return S_OK; - }else{ - *ppvObject = nullptr; - return E_NOINTERFACE; - } -}; - -ULONG STDMETHODCALLTYPE AudioStream::AddRef( void) { - return InterlockedIncrement(&_refCount); -} - -ULONG STDMETHODCALLTYPE AudioStream::Release( void) { - long refCount = InterlockedDecrement(&_refCount); - if(refCount == 0) { - delete this; - - return 0; - }else if(refCount == 1 && _captureThread != nullptr) { - // our capture thread holds the last ref count and should - // now exit. - _shouldExit = true; - } - return refCount; -} - -HRESULT STDMETHODCALLTYPE AudioStream::Read(__out_bcount_part(cb, *pcbRead) void *pv, ULONG cb,__out_opt ULONG *pcbRead) { - if(_readCallback != nullptr) { - // reading from a Python file like object... - return _readCallback(cb, pv, pcbRead); - } - - // reading from our own MediaBuffer queue - *pcbRead = 0; - ULONG bytesRead = 0; - while(bytesRead != cb) { - if(_CurrentReadIndex != _CurrentCaptureLength) { - // copy any bytes we have - auto toRead = min(cb - bytesRead, _CurrentCaptureLength - _CurrentReadIndex); - memcpy((BYTE*)pv + bytesRead, &_curBuffer->_buffer[_CurrentReadIndex], toRead); - _CurrentReadIndex += toRead; - - bytesRead += toRead; - } - - if(bytesRead != cb) { - // read more bytes - LockHolder lock(this); - if(_buffers.begin() != _buffers.end()) { - if(_curBuffer != nullptr) { - _curBuffer->Release(); - } - - _curBuffer = _buffers.front(); - _buffers.pop_front(); - - _CurrentCaptureLength = _curBuffer->_length; - _CurrentReadIndex = 0; - } - } - } - - if(pcbRead != nullptr) { - // optional - *pcbRead = bytesRead; - } - return S_OK; -} - -/* [local] */ HRESULT STDMETHODCALLTYPE AudioStream::Write( - /* [annotation] */ - __in_bcount(cb) const void *pv, - /* [in] */ ULONG cb, - /* [annotation] */ - __out_opt ULONG *pcbWritten) { - return E_NOTIMPL; -} - -/* [local] */ HRESULT STDMETHODCALLTYPE AudioStream::Seek( - /* [in] */ LARGE_INTEGER dlibMove, - /* [in] */ DWORD dwOrigin, - /* [annotation] */ - __out_opt ULARGE_INTEGER *plibNewPosition) { - if(plibNewPosition != nullptr) { - plibNewPosition->QuadPart = 0; - } - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::SetSize( - /* [in] */ ULARGE_INTEGER libNewSize) { - return E_NOTIMPL; -} - -/* [local] */ HRESULT STDMETHODCALLTYPE AudioStream::CopyTo( - /* [unique][in] */ IStream *pstm, - /* [in] */ ULARGE_INTEGER cb, - /* [annotation] */ - __out_opt ULARGE_INTEGER *pcbRead, - /* [annotation] */ - __out_opt ULARGE_INTEGER *pcbWritten) { - return E_NOTIMPL; -} - -HRESULT STDMETHODCALLTYPE AudioStream::Commit( - /* [in] */ DWORD grfCommitFlags) { - return E_NOTIMPL; -} - -HRESULT STDMETHODCALLTYPE AudioStream::Revert( void) { - return E_NOTIMPL; -} - -HRESULT STDMETHODCALLTYPE AudioStream::LockRegion( - /* [in] */ ULARGE_INTEGER libOffset, - /* [in] */ ULARGE_INTEGER cb, - /* [in] */ DWORD dwLockType) { - return E_NOTIMPL; -} - -HRESULT STDMETHODCALLTYPE AudioStream::UnlockRegion( - /* [in] */ ULARGE_INTEGER libOffset, - /* [in] */ ULARGE_INTEGER cb, - /* [in] */ DWORD dwLockType) { - return E_NOTIMPL; -} - -HRESULT STDMETHODCALLTYPE AudioStream::Stat( - /* [out] */ __RPC__out STATSTG *pstatstg, - /* [in] */ DWORD grfStatFlag) { - pstatstg->cbSize.QuadPart = INFINITE; - return S_OK; -} - -HRESULT STDMETHODCALLTYPE AudioStream::Clone( - /* [out] */ __RPC__deref_out_opt IStream **ppstm) { - return E_NOTIMPL; -} diff --git a/Python/Product/PyKinect/PyKinect/src/AudioStream.h b/Python/Product/PyKinect/PyKinect/src/AudioStream.h deleted file mode 100644 index 0d99606ea4..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/AudioStream.h +++ /dev/null @@ -1,184 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -#pragma once - -#include "stdafx.h" -#include -#include -#include // For configuring DMO properties (MFPKEY_WMAAECMA_SYSTEM_MODE, MFPKEY_WMAAECMA_DMO_SOURCE_MODE) -#include // MF_MT_* constants -#include -#include - -using namespace std; - -typedef HRESULT(__stdcall ReadCallback)(DWORD bytes, void* text, ULONG* pcbRead); - -class AudioStream : public ISpStreamFormat, public ISpEventSink, public ISpEventSource { -public: - IMediaObject* _mediaObject; - -private: - ULONG _refCount; - size_t _CurrentCaptureLength, _CurrentReadIndex; - MediaBuffer *_curBuffer; - int _count; - CRITICAL_SECTION _lock; - deque _buffers; - queue _freeBuffers; - bool _shouldExit; - DWORD _readStaleThreshold; - ReadCallback* _readCallback; - HANDLE _captureThread; - - class LockHolder { - AudioStream* _stream; - public: - LockHolder(AudioStream* stream) { - _stream = stream; - EnterCriticalSection(&_stream->_lock); - } - - ~LockHolder() { - LeaveCriticalSection(&_stream->_lock); - } - }; - -public: - AudioStream(IMediaObject* mediaObject, DWORD readStaleThreshold); - AudioStream(ReadCallback readCallback); - ~AudioStream(); - - // Frees a buffer, saving it in our queue of cached buffers if we're still running. - void FreeBuffer(MediaBuffer* buffer); - - // Gets a new buffer, pulling it from the cache if available, or creating a new buffer if not. - MediaBuffer* GetBuffer(); - - void Stop() { - _shouldExit = true; - } - - static DWORD WINAPI CaptureThread(LPVOID thisObj); - - virtual HRESULT STDMETHODCALLTYPE SetNotifySink(/* [in] */ ISpNotifySink *pNotifySink); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE SetNotifyWindowMessage( - /* [in] */ HWND hWnd, - /* [in] */ UINT Msg, - /* [in] */ WPARAM wParam, - /* [in] */ LPARAM lParam); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE SetNotifyCallbackFunction( - /* [in] */ SPNOTIFYCALLBACK *pfnCallback, - /* [in] */ WPARAM wParam, - /* [in] */ LPARAM lParam); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE SetNotifyCallbackInterface( - /* [in] */ ISpNotifyCallback *pSpCallback, - /* [in] */ WPARAM wParam, - /* [in] */ LPARAM lParam); - - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE SetNotifyWin32Event(void); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE WaitForNotifyEvent( - /* [in] */ DWORD dwMilliseconds); - - virtual /* [local] */ HANDLE STDMETHODCALLTYPE GetNotifyEventHandle(void); - - virtual HRESULT STDMETHODCALLTYPE AddEvents( - /* [in] */ const SPEVENT *pEventArray, - /* [in] */ ULONG ulCount); - - virtual HRESULT STDMETHODCALLTYPE GetEventInterest( - /* [out] */ ULONGLONG *pullEventInterest); - - virtual HRESULT STDMETHODCALLTYPE SetInterest( - /* [in] */ ULONGLONG ullEventInterest, - /* [in] */ ULONGLONG ullQueuedInterest); - - virtual HRESULT STDMETHODCALLTYPE GetEvents( - /* [in] */ ULONG ulCount, - /* [size_is][out] */ SPEVENT *pEventArray, - /* [out] */ ULONG *pulFetched); - - virtual HRESULT STDMETHODCALLTYPE GetInfo( - /* [out] */ SPEVENTSOURCEINFO *pInfo); - - virtual HRESULT STDMETHODCALLTYPE GetFormat(GUID *pguidFormatId, WAVEFORMATEX **ppCoMemWaveFormatEx); - - virtual HRESULT STDMETHODCALLTYPE QueryInterface(__in REFIID riid, __RPC__deref_out void __RPC_FAR *__RPC_FAR *ppvObject); - - virtual ULONG STDMETHODCALLTYPE AddRef(void); - - virtual ULONG STDMETHODCALLTYPE Release(void); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE Read( - /* [annotation] */ - __out_bcount_part(cb, *pcbRead) void *pv, - /* [in] */ ULONG cb, - /* [annotation] */ - __out_opt ULONG *pcbRead); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE Write( - /* [annotation] */ - __in_bcount(cb) const void *pv, - /* [in] */ ULONG cb, - /* [annotation] */ - __out_opt ULONG *pcbWritten); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE Seek( - /* [in] */ LARGE_INTEGER dlibMove, - /* [in] */ DWORD dwOrigin, - /* [annotation] */ - __out_opt ULARGE_INTEGER *plibNewPosition); - - virtual HRESULT STDMETHODCALLTYPE SetSize( - /* [in] */ ULARGE_INTEGER libNewSize); - - virtual /* [local] */ HRESULT STDMETHODCALLTYPE CopyTo( - /* [unique][in] */ IStream *pstm, - /* [in] */ ULARGE_INTEGER cb, - /* [annotation] */ - __out_opt ULARGE_INTEGER *pcbRead, - /* [annotation] */ - __out_opt ULARGE_INTEGER *pcbWritten); - - virtual HRESULT STDMETHODCALLTYPE Commit( - /* [in] */ DWORD grfCommitFlags); - - virtual HRESULT STDMETHODCALLTYPE Revert(void); - - virtual HRESULT STDMETHODCALLTYPE LockRegion( - /* [in] */ ULARGE_INTEGER libOffset, - /* [in] */ ULARGE_INTEGER cb, - /* [in] */ DWORD dwLockType); - - virtual HRESULT STDMETHODCALLTYPE UnlockRegion( - /* [in] */ ULARGE_INTEGER libOffset, - /* [in] */ ULARGE_INTEGER cb, - /* [in] */ DWORD dwLockType); - - virtual HRESULT STDMETHODCALLTYPE Stat( - /* [out] */ __RPC__out STATSTG *pstatstg, - /* [in] */ DWORD grfStatFlag); - - virtual HRESULT STDMETHODCALLTYPE Clone( - /* [out] */ __RPC__deref_out_opt IStream **ppstm); -}; \ No newline at end of file diff --git a/Python/Product/PyKinect/PyKinect/src/MediaBuffer.cpp b/Python/Product/PyKinect/PyKinect/src/MediaBuffer.cpp deleted file mode 100644 index 8513b677b2..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/MediaBuffer.cpp +++ /dev/null @@ -1,92 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -#include "stdafx.h" -#include "PyKinectAudio.h" - -// Implements IMediaBuffer - this just stores enough data for reading -MediaBuffer::MediaBuffer (AudioStream* parentStream) { - _parentStream = parentStream; - ReInit(); -} - -void MediaBuffer::ReInit() { - _refCount = 1; - _length = 0; -} - -HRESULT STDMETHODCALLTYPE MediaBuffer::SetLength( - DWORD cbLength) { - _length = cbLength; - return S_OK; -} - -HRESULT STDMETHODCALLTYPE MediaBuffer::GetMaxLength( - /* [annotation][out] */ - __out DWORD *pcbMaxLength) { - *pcbMaxLength = _max_buffer_length; - return S_OK; -} - -HRESULT STDMETHODCALLTYPE MediaBuffer::GetBufferAndLength( - /* [annotation][out] */ - __deref_opt_out_bcount(*pcbLength) BYTE **ppBuffer, - /* [annotation][out] */ - __out_opt DWORD *pcbLength) { - if(ppBuffer == nullptr || pcbLength == nullptr) { - return E_POINTER; - } - - *ppBuffer = _buffer; - *pcbLength = _length; - return S_OK; -} - -HRESULT STDMETHODCALLTYPE MediaBuffer::QueryInterface( - /* [in] */ REFIID riid, - /* [iid_is][out] */ __RPC__deref_out void __RPC_FAR *__RPC_FAR *ppvObject) { - if(riid == __uuidof(IUnknown)) { - AddRef(); - *ppvObject = static_cast(this); - return S_OK; - }else if(riid == __uuidof(IMediaBuffer)) { - AddRef(); - *ppvObject = static_cast(this); - return S_OK; - }else{ - *ppvObject = nullptr; - return E_NOINTERFACE; - } -}; - -ULONG STDMETHODCALLTYPE MediaBuffer::AddRef( void) { - return InterlockedIncrement(&_refCount); -} - -ULONG STDMETHODCALLTYPE MediaBuffer::Release( void) { - long refCount = InterlockedDecrement(&_refCount); - if(refCount == 0) { - auto parent = _parentStream; - if(parent == nullptr) { - delete this; - }else{ - parent->FreeBuffer(this); - } - return 0; - } - return refCount; -} diff --git a/Python/Product/PyKinect/PyKinect/src/MediaBuffer.h b/Python/Product/PyKinect/PyKinect/src/MediaBuffer.h deleted file mode 100644 index 939e5c140b..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/MediaBuffer.h +++ /dev/null @@ -1,46 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -#pragma once - -#include "stdafx.h" -#include -#include - -class AudioStream; - -class MediaBuffer : public IMediaBuffer { - static const unsigned int _max_buffer_length = 4096; - - ULONG _refCount; - -public: - AudioStream* _parentStream; - BYTE _buffer[_max_buffer_length]; - DWORD _length; - - MediaBuffer (AudioStream* parentStream); - - void ReInit(); - - virtual HRESULT STDMETHODCALLTYPE SetLength(DWORD cbLength); - virtual HRESULT STDMETHODCALLTYPE GetMaxLength(__out DWORD *pcbMaxLength); - virtual HRESULT STDMETHODCALLTYPE GetBufferAndLength(__deref_opt_out_bcount(*pcbLength) BYTE **ppBuffer, __out_opt DWORD *pcbLength); - virtual HRESULT STDMETHODCALLTYPE QueryInterface(__in REFIID riid, __RPC__deref_out void __RPC_FAR *__RPC_FAR *ppvObject); - virtual ULONG STDMETHODCALLTYPE AddRef( void); - virtual ULONG STDMETHODCALLTYPE Release( void); -}; \ No newline at end of file diff --git a/Python/Product/PyKinect/PyKinect/src/PyKinectAudio.cpp b/Python/Product/PyKinect/PyKinect/src/PyKinectAudio.cpp deleted file mode 100644 index 1235823432..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/PyKinectAudio.cpp +++ /dev/null @@ -1,472 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -#include "stdafx.h" -#include "PyKinectAudio.h" -#include -#include -#include - -using namespace std; - -typedef void (__stdcall _RecognizeCallback)(LPWSTR text); -typedef void (__stdcall _EnumRecognizersCallback)(LPWSTR id, LPWSTR description, ISpObjectToken* token); -#pragma comment(lib, "strmiids.lib") - -// Flat C API for exposing to Python -extern "C" { - __declspec(dllexport) HRESULT OpenKinectAudio(INuiSensor* pSensor, IMediaObject** ppDMO) { - IMediaObject* pDMO; - INuiAudioBeam* pAudioBeam; - HRESULT hr = pSensor->NuiGetAudioSource(&pAudioBeam); - if(FAILED(hr)) { - return hr; - } - - hr = pAudioBeam->QueryInterface(IID_IMediaObject, (void**)&pDMO); - if(FAILED(hr)) { - return hr; - } - - IPropertyStore* pPS = NULL; - hr = pDMO->QueryInterface(IID_IPropertyStore, (void**)&pPS); - if(FAILED(hr)) { - pDMO->Release(); - return hr; - } - - // Set MicArray DMO system mode with no echo cancellation. - // This must be set for the DMO to work properly - PROPVARIANT pvSysMode; - PropVariantInit(&pvSysMode); - pvSysMode.vt = VT_I4; - - pvSysMode.lVal = (LONG)(OPTIBEAM_ARRAY_ONLY); - hr = pPS->SetValue(MFPKEY_WMAAECMA_SYSTEM_MODE, pvSysMode); - PropVariantClear(&pvSysMode); - - // Put media object into filter mode so it can be used as a Media Foundation transform - PROPVARIANT pvSourceMode; - PropVariantInit(&pvSourceMode); - pvSourceMode.vt = VT_BOOL; - pvSourceMode.boolVal = VARIANT_TRUE; - hr = pPS->SetValue(MFPKEY_WMAAECMA_DMO_SOURCE_MODE, pvSourceMode); - pPS->Release(); - - if(FAILED(hr)) { - pDMO->Release(); - return hr; - } - - DMO_MEDIA_TYPE type; - memset(&type, 0, sizeof(DMO_MEDIA_TYPE)); - type.majortype = MFMediaType_Audio; - type.subtype = MFAudioFormat_PCM; - type.lSampleSize = 0; - type.bFixedSizeSamples = true; - type.bTemporalCompression = false; - type.formattype = FORMAT_WaveFormatEx; - type.cbFormat = sizeof(WAVEFORMATEX); - type.pbFormat = (BYTE*)CoTaskMemAlloc(sizeof(WAVEFORMATEX)); - if(type.pbFormat == nullptr) { - pDMO->Release(); - return E_OUTOFMEMORY; - } - - WAVEFORMATEX *waveformatex = (WAVEFORMATEX*)type.pbFormat; - waveformatex->wFormatTag = WAVE_FORMAT_PCM; - waveformatex->nChannels = 1; - waveformatex->nSamplesPerSec = 0x3e80; - waveformatex->nAvgBytesPerSec = 0x7d00; - waveformatex->nBlockAlign = 2; - waveformatex->wBitsPerSample = 0x10; - waveformatex->cbSize = 0x0; - - hr = pDMO->SetOutputType(0, &type, 0); - if(FAILED(hr)) { - pDMO->Release(); - return hr; - } - - hr = pDMO->AllocateStreamingResources(); - if(FAILED(hr)) { - pDMO->Release(); - return hr; - } - - *ppDMO = pDMO; - return S_OK; - } - - __declspec(dllexport) HRESULT OpenAudioStream(IMediaObject* pDMO, ISpStreamFormat** stream, DWORD readStaleThreshold) { - *stream = new AudioStream(pDMO, readStaleThreshold); - return S_OK; - } - - __declspec(dllexport) HRESULT ReadAudioStream(ISpStreamFormat* stream, void* data, ULONG cb, ULONG* pcbRead) { - return stream->Read(data, cb, pcbRead); - } - - __declspec(dllexport) void IUnknownRelease(IUnknown* obj) { - obj->Release(); - } - - __declspec(dllexport) HRESULT EnumRecognizers(_EnumRecognizersCallback callback) { - IEnumSpObjectTokens *enumTokens; - HRESULT hr = SpEnumTokens(SPCAT_RECOGNIZERS, NULL, NULL, &enumTokens); - if(FAILED(hr)) { - return hr; - } - - ISpObjectToken *token; - ULONG fetched; - while(SUCCEEDED(enumTokens->Next(1, &token, &fetched)) && fetched == 1) { - LPWSTR id = nullptr; - hr = token->GetId(&id); - - if(SUCCEEDED(hr)) { - LPWSTR description = nullptr; - hr = token->GetStringValue(L"", &description); - - if(SUCCEEDED(hr)) { - callback(id, description, token); - ::CoTaskMemFree(description); - }else{ - token->Release(); - } - - ::CoTaskMemFree(id); - - // token is now owned in Python - } else { - token->Release(); - } - - } - enumTokens->Release(); - return S_OK; - } - - __declspec(dllexport) HRESULT CreateRecognizer(ISpObjectToken* token, ISpRecoContext** ppContext) { - ISpRecognizer * reco; - HRESULT hr = CoCreateInstance(CLSID_SpInprocRecognizer, NULL, CLSCTX_INPROC_SERVER, IID_ISpRecognizer, (LPVOID*)&reco); - if(FAILED(hr)) { - return hr; - } - - if(token != nullptr) { - hr = reco->SetRecognizer(token); - if(FAILED(hr)) { - return hr; - } - } - - ISpRecoContext *context; - hr = reco->CreateRecoContext(&context); - if(FAILED(hr)) { - reco->Release(); - return hr; - } - - reco->Release(); - *ppContext = context; - return S_OK; - } - - __declspec(dllexport) HRESULT LoadGrammar(LPCWSTR filename, ISpRecoContext* context, ISpRecoGrammar** ppGrammar) { - ISpRecoGrammar* grammar; - HRESULT hr = context->CreateGrammar(1, &grammar); - if(FAILED(hr)) { - return hr; - } - ISpRecognizer* reco; - hr = context->GetRecognizer(&reco); - if(FAILED(hr)) { - return hr; - } - - hr = grammar->LoadCmdFromFile(filename, SPLO_STATIC); - if(FAILED(hr)) { - context->Release(); - reco->Release(); - return hr; - } - - hr = grammar->SetRuleState(NULL, NULL, SPRS_ACTIVE); - if(FAILED(hr)) { - context->Release(); - reco->Release(); - return hr; - } - - reco->Release(); - *ppGrammar = grammar; - return S_OK; - } - - __declspec(dllexport) HRESULT RecognizeOne(ISpRecoContext* pContext, DWORD timeout, _RecognizeCallback callback, _RecognizeCallback altCallback) { - HRESULT hr = pContext->WaitForNotifyEvent(timeout); - if(FAILED(hr)) { - return hr; - } - - SPEVENT curEvent; - ULONG fetched; - hr = pContext->GetEvents(1, &curEvent, &fetched); - if(FAILED(hr)) { - return hr; - } - - if(curEvent.eEventId == SPEI_RECOGNITION) { - - ISpRecoResult* result = reinterpret_cast(curEvent.lParam); - const USHORT MAX_ALTERNATES = 100; - ISpPhraseAlt* pcpPhraseAlt[MAX_ALTERNATES]; - ULONG altCount; - - SPPHRASE* phrase; - hr = result->GetPhrase(&phrase); - if(FAILED(hr)) { - return hr; - } - - WCHAR *pwszText = nullptr; - hr = result->GetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE, &pwszText, NULL); - if(!FAILED(hr)) { - callback(pwszText); - ::CoTaskMemFree(pwszText); - } - - hr = result->GetAlternates(phrase->Rule.ulFirstElement, - phrase->Rule.ulCountOfElements, - MAX_ALTERNATES, - pcpPhraseAlt, - &altCount); - - - if(SUCCEEDED(hr)) { - for(ULONG i = 0; iGetText(SP_GETWHOLEPHRASE, SP_GETWHOLEPHRASE, TRUE, &pwszText, NULL); - if (!FAILED(hr)) { - altCallback(pwszText); - // TODO: Could hold onto the phrase and send it back to Python so it can be committed. - ::CoTaskMemFree(pwszText); - } - } - - ::CoTaskMemFree(phrase); - } - - altCallback(nullptr); - - return S_OK; - } - - return S_FALSE; - } - - class CallbackInfo { - public: - _RecognizeCallback *Callback, *AltCallback; - ISpRecoContext* pContext; - HANDLE cancelHandle; - HANDLE waitHandle; - bool multiple; - }; - - DWORD WINAPI AsyncRecognizeThread(LPVOID param) { - CallbackInfo* cbInfo = (CallbackInfo*)param; - HANDLE handles[2] = {cbInfo->cancelHandle, cbInfo->waitHandle}; - - do { - auto waitIndex = ::WaitForMultipleObjects(2, handles, FALSE, INFINITE); - if(waitIndex == WAIT_OBJECT_0) { - return 0; - }else if(waitIndex == WAIT_OBJECT_0 + 1) { - RecognizeOne(cbInfo->pContext, 0, cbInfo->Callback, cbInfo->AltCallback); - } - }while(cbInfo->multiple); - - return 0; - } - - __declspec(dllexport) HRESULT RecognizeAsync(ISpRecoContext* pContext, bool multiple, _RecognizeCallback callback, _RecognizeCallback altCallback, HANDLE* pCancelHandle) { - HANDLE waitHandle = pContext->GetNotifyEventHandle(); - if(waitHandle == INVALID_HANDLE_VALUE) { - // "interface is not initialized" according to http://msdn.microsoft.com/en-us/library/ee450842(v=vs.85).aspx - return E_FAIL; - } - - HANDLE cancelHandle = ::CreateEventA(NULL, TRUE, FALSE, NULL); - if(cancelHandle == nullptr) { - return HRESULT_FROM_WIN32(GetLastError()); - } - - CallbackInfo* cbInfo = new (nothrow) CallbackInfo; - if(cbInfo == nullptr) { - ::CloseHandle(cancelHandle); - return E_OUTOFMEMORY; - } - - cbInfo->Callback = callback; - cbInfo->AltCallback = altCallback; - cbInfo->cancelHandle = cancelHandle; - cbInfo->pContext = pContext; - cbInfo->waitHandle = waitHandle; - cbInfo->multiple = multiple; - - DWORD threadId; - HANDLE threadHandle = ::CreateThread(NULL, 0, AsyncRecognizeThread, cbInfo, 0, &threadId); - if(threadHandle == nullptr) { - delete cbInfo; - ::CloseHandle(cancelHandle); - return HRESULT_FROM_WIN32(GetLastError()); - } - CloseHandle(threadHandle); - - - return S_OK; - } - - __declspec(dllexport) HRESULT StopRecognizeAsync(HANDLE cancelHandle) { - if(!::SetEvent(cancelHandle)) { - return HRESULT_FROM_WIN32(GetLastError()); - } - return S_OK; - } - - __declspec(dllexport) HRESULT SetInputFile(ISpRecoContext* pContext, ReadCallback readCallback) { - ISpRecognizer* reco; - HRESULT hr = pContext->GetRecognizer(&reco); - if(FAILED(hr)) { - return hr; - } - AudioStream* stream = new AudioStream(readCallback); - - hr = reco->SetInput(static_cast(stream), FALSE); - if(FAILED(hr)) { - reco->Release(); - stream->Release(); - return hr; - } - //reco->Release(); - return hr; - } - - __declspec(dllexport) HRESULT SetInputStream(ISpRecoContext* pContext, ISpStreamFormat* stream) { - ISpRecognizer* reco; - HRESULT hr = pContext->GetRecognizer(&reco); - if(FAILED(hr)) { - return hr; - } - - hr = reco->SetInput(stream, FALSE); - if(FAILED(hr)) { - reco->Release(); - return hr; - } - reco->Release(); - return hr; - } - - __declspec(dllexport) HRESULT SetDeviceProperty_Bool(IMediaObject* pDMO, DWORD index, bool value) { - IPropertyStore* pPS = NULL; - HRESULT hr = pDMO->QueryInterface(IID_IPropertyStore, (void**)&pPS); - if(FAILED(hr)) { - return hr; - } - - PROPVARIANT pvSourceMode; - PropVariantInit(&pvSourceMode); - pvSourceMode.vt = VT_BOOL; - pvSourceMode.boolVal = value ? VARIANT_TRUE : VARIANT_FALSE; - - PROPERTYKEY key = { { 0x6f52c567, 0x360, 0x4bd2, { 0x96, 0x17, 0xcc, 0xbf, 0x14, 0x21, 0xc9, 0x39 } }, index}; - - auto res = pPS->SetValue(key, pvSourceMode); - - pPS->Release(); - - return res; - } - - __declspec(dllexport) HRESULT SetDeviceProperty_Int(IMediaObject* pDMO, DWORD index, int value) { - IPropertyStore* pPS = NULL; - HRESULT hr = pDMO->QueryInterface(IID_IPropertyStore, (void**)&pPS); - if(FAILED(hr)) { - return hr; - } - - PROPVARIANT pvSourceMode; - PropVariantInit(&pvSourceMode); - pvSourceMode.vt = VT_I4; - pvSourceMode.iVal = value; - - PROPERTYKEY key = { { 0x6f52c567, 0x360, 0x4bd2, { 0x96, 0x17, 0xcc, 0xbf, 0x14, 0x21, 0xc9, 0x39 } }, index}; - - auto res = pPS->SetValue(key, pvSourceMode); - - pPS->Release(); - - return res; - } - - __declspec(dllexport) HRESULT GetDeviceProperty_Bool(IMediaObject* pDMO, DWORD index, bool* value) { - IPropertyStore* pPS = NULL; - HRESULT hr = pDMO->QueryInterface(IID_IPropertyStore, (void**)&pPS); - if(FAILED(hr)) { - return hr; - } - - PROPVARIANT pvSourceMode; - PropVariantInit(&pvSourceMode); - - PROPERTYKEY key = { { 0x6f52c567, 0x360, 0x4bd2, { 0x96, 0x17, 0xcc, 0xbf, 0x14, 0x21, 0xc9, 0x39 } }, index}; - - auto res = pPS->GetValue(key, &pvSourceMode); - pPS->Release(); - - if(SUCCEEDED(res)) { - *value = pvSourceMode.boolVal == VARIANT_TRUE; - } - return res; - } - - __declspec(dllexport) HRESULT GetDeviceProperty_Int(IMediaObject* pDMO, DWORD index, int* value) { - IPropertyStore* pPS = NULL; - HRESULT hr = pDMO->QueryInterface(IID_IPropertyStore, (void**)&pPS); - if(FAILED(hr)) { - return hr; - } - - PROPVARIANT pvSourceMode; - PropVariantInit(&pvSourceMode); - - PROPERTYKEY key = { { 0x6f52c567, 0x360, 0x4bd2, { 0x96, 0x17, 0xcc, 0xbf, 0x14, 0x21, 0xc9, 0x39 } }, index}; - - auto res = pPS->GetValue(key, &pvSourceMode); - - pPS->Release(); - - if(SUCCEEDED(res)) { - *value = pvSourceMode.intVal; - } - - return res; - } - -} diff --git a/Python/Product/PyKinect/PyKinect/src/PyKinectAudio.h b/Python/Product/PyKinect/PyKinect/src/PyKinectAudio.h deleted file mode 100644 index 1993d380aa..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/PyKinectAudio.h +++ /dev/null @@ -1,23 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -#pragma once - -#include "stdafx.h" - -#include "MediaBuffer.h" -#include "AudioStream.h" \ No newline at end of file diff --git a/Python/Product/PyKinect/PyKinect/src/stdafx.cpp b/Python/Product/PyKinect/PyKinect/src/stdafx.cpp deleted file mode 100644 index e018cb9830..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/stdafx.cpp +++ /dev/null @@ -1,25 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -// stdafx.cpp : source file that includes just the standard includes -// KinectAudio.pch will be the pre-compiled header -// stdafx.obj will contain the pre-compiled type information - -#include "stdafx.h" - -// TODO: reference any additional headers you need in STDAFX.H -// and not in this file diff --git a/Python/Product/PyKinect/PyKinect/src/stdafx.h b/Python/Product/PyKinect/PyKinect/src/stdafx.h deleted file mode 100644 index f04ace710b..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/stdafx.h +++ /dev/null @@ -1,40 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ -// stdafx.h : include file for standard system include files, -// or project specific include files that are used frequently, but -// are changed infrequently -// - -#pragma once - -#include "targetver.h" - -#include -#include - - - -// TODO: reference additional headers your program requires here - -template void SafeRelease(T **ppT) -{ - if (*ppT) - { - (*ppT)->Release(); - *ppT = NULL; - } -} diff --git a/Python/Product/PyKinect/PyKinect/src/targetver.h b/Python/Product/PyKinect/PyKinect/src/targetver.h deleted file mode 100644 index 780fb19fc7..0000000000 --- a/Python/Product/PyKinect/PyKinect/src/targetver.h +++ /dev/null @@ -1,25 +0,0 @@ -/* PyKinect -// Copyright(c) Microsoft Corporation -// All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the License); you may not use -// this file except in compliance with the License. You may obtain a copy of the -// License at http://www.apache.org/licenses/LICENSE-2.0 -// -// THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -// OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -// IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -// MERCHANTABLITY OR NON-INFRINGEMENT. -// -// See the Apache Version 2.0 License for specific language governing -// permissions and limitations under the License. -*/ - -#pragma once - -// Including SDKDDKVer.h defines the highest available Windows platform. - -// If you wish to build your application for a previous Windows platform, include WinSDKVer.h and -// set the _WIN32_WINNT macro to the platform you wish to support before including SDKDDKVer.h. - -#include diff --git a/Python/Product/PyKinect/PyKinect/test.pcm b/Python/Product/PyKinect/PyKinect/test.pcm deleted file mode 100644 index f3fb29d0a9..0000000000 Binary files a/Python/Product/PyKinect/PyKinect/test.pcm and /dev/null differ diff --git a/Python/Product/PyKinect/PyKinect/tests.py b/Python/Product/PyKinect/PyKinect/tests.py deleted file mode 100644 index ced4c1b3de..0000000000 --- a/Python/Product/PyKinect/PyKinect/tests.py +++ /dev/null @@ -1,309 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -import unittest -import ctypes -from unittest.main import TestProgram -from pykinect.nui import (Device, Runtime, KinectError, ImageResolution, - ImageStreamType, ImageType, ImageStream) -from pykinect.audio import KinectAudioSource, GetKinectDeviceInfo -from winspeech.recognition import SpeechRecognitionEngine, Grammar -import time -from pykinect.nui.structs import (SkeletonData, SkeletonTrackingState, Vector, - JointId, SkeletonQuality, JointTrackingState, - ImageViewArea, SkeletonFrameQuality, NUI_SKELETON_COUNT, - SkeletonFrame, TransformSmoothParameters) - - -class KinectTestCases(unittest.TestCase): - def test_device(self): - # our test cases run with only a single Kinect installed - d = Device() - self.assertEqual(d.count, 1) - - # device is a singleton - d2 = Device() - self.assertIs(d, d2) - - def test_runtime_creation(self): - # create the runtime, check the instance - with Runtime() as nui: - self.assertEqual(nui.instance_index, 0) - - # we should be able to create a 2nd runtime after the first is disposed - with Runtime() as nui2: - self.assertEqual(nui2.instance_index, 0) - - # accessing a disposed runtime should throw - self.assertRaises(KinectError, lambda: nui2.instance_index) - - #with Runtime() as nui: - # # creating a 2nd runtime w/ the 1st existing should throw - # self.assertRaises(KinectError, Runtime) - - def test_video_stream(self): - with Runtime() as nui: - nui.video_stream.open(ImageStreamType.Depth, 2, ImageResolution.Resolution640x480, ImageType.Color) - - # we can only open a single stream at a time - self.assertRaises( - KinectError, - nui.video_stream.open, - ImageStreamType.Video, - 2, - ImageResolution.Resolution1280x1024, - ImageType.Color - ) - - - valid_resolutions = { - ImageType.Color : (ImageResolution.Resolution1280x1024, ImageResolution.Resolution640x480), - ImageType.ColorYuv : (ImageResolution.Resolution640x480, ), - ImageType.Color : (ImageResolution.Resolution640x480, ), - ImageType.DepthAndPlayerIndex : (ImageResolution.Resolution320x240, ImageResolution.Resolution80x60), - ImageType.Depth : (ImageResolution.Resolution320x240, ImageResolution.Resolution640x480, ImageResolution.Resolution80x60), - } - - for image_type, resolution_list in valid_resolutions.items(): - for resolution in resolution_list: - with Runtime() as nui: - nui.video_stream.open(ImageStreamType.Video, 2, resolution, image_type) - - with Runtime() as nui: - invalid_resolutions = { - ImageType.Color : (ImageResolution.Resolution320x240, ImageResolution.Resolution80x60), - ImageType.DepthAndPlayerIndex : (ImageResolution.Resolution1280x1024, ImageResolution.Resolution640x480), - ImageType.ColorYuv : (ImageResolution.Resolution1280x1024, ImageResolution.Resolution320x240, ImageResolution.Resolution80x60), - ImageType.Color : (ImageResolution.Resolution320x240, ImageResolution.Resolution80x60), - ImageType.Depth : (ImageResolution.Resolution1280x1024, ), - } - - for image_type, resolution_list in invalid_resolutions.items(): - for resolution in resolution_list: - self.assertRaises( - KinectError, - nui.video_stream.open, - ImageStreamType.Video, - 2, - resolution, - image_type - ) - - def test_image_stream_get_valid_resolutions(self): - self.assertEqual(ImageStream.get_valid_resolutions(ImageType.DepthAndPlayerIndex), (ImageResolution.Resolution320x240, )) - self.assertEqual(ImageStream.get_valid_resolutions(ImageType.Color), (ImageResolution.Resolution1280x1024, ImageResolution.Resolution640x480)) - self.assertEqual(ImageStream.get_valid_resolutions(ImageType.ColorYuv), (ImageResolution.Resolution640x480, )) - self.assertEqual(ImageStream.get_valid_resolutions(ImageType.ColorYuvRaw), (ImageResolution.Resolution640x480, )) - self.assertEqual(ImageStream.get_valid_resolutions(ImageType.Depth), (ImageResolution.Resolution640x480, )) - self.assertRaises(KinectError, lambda: ImageStream.get_valid_resolutions(1000)) - - def test_skeleton_engine(self): - with Runtime() as nui: - self.assertEqual(nui.skeleton_engine.enabled, False) - nui.skeleton_engine.enabled = True - self.assertEqual(nui.skeleton_engine.enabled, True) - frame = nui.skeleton_engine.get_next_frame() - - def test_audio_source_file(self): - source = KinectAudioSource() - audio_file = source.start() - data = audio_file.read() - self.assertEqual(len(data), 4096) - - def test_audio_source_file_close(self): - with KinectAudioSource() as source: - audio_file = source.start() - audio_file.close() - - self.assertRaises(IOError, lambda: audio_file.read()) - - def test_audio_source_source_stop(self): - with KinectAudioSource() as source: - audio_file = source.start() - source.stop() - - self.assertRaises(IOError, lambda: audio_file.read()) - - def test_audio_source_properties(self): - with KinectAudioSource() as source: - source.feature_mode = True - self.assertEqual(source.feature_mode, True) - - attrs = [ - # name, default value new value - ('acoustic_echo_suppression', 1, 0), - ('automatic_gain_control', False, True), - ('center_clip', False, True), - ('echo_length', 256, 128), - ('frame_size', 256, 128), - ('gain_bounder', True, False), - ('mic_array_mode', 512, 256), - ('mic_array_preprocess', True, False), - ('noise_fill', False, True), - ('noise_suppression', 1, 0), - ('source_mode', True, False), - ('system_mode', 2, 1), - ('voice_activity_detector', 0, 0), - ] - - for name, default, new in attrs: - self.assertEqual(getattr(source, name), default) - setattr(source, name, new) - self.assertEqual(getattr(source, name), new) - - def test_recognize_audio(self): - pass - """ - with KinectAudioSource() as source: - audio_file = source.start() - - rec = SpeechRecognitionEngine() - grammar = rec.load_grammar('Grammar.xml') - - rec.set_input_to_audio_file(audio_file) - - res = rec.recognize_sync()""" - - def test_get_kinect_info(self): - devices = GetKinectDeviceInfo() - self.assertEqual(len(devices), 1) - device = devices[0] - self.assertEqual(device.device_name, 'Microphone Array (Kinect USB Audio)') - self.assertEqual(device.device_index, 1) - - def test_installed_recognizers(self): - recognizers = SpeechRecognitionEngine.installed_recognizers() - - recognizer = SpeechRecognitionEngine(recognizers[0]) - - def test_default_recognizer(self): - rec = SpeechRecognitionEngine() - grammar = rec.load_grammar('Grammar.xml') - input = file('test.pcm', 'rb') - rec.set_input_to_audio_file(input) - - self.assertEqual(rec.recognize_sync().text, 'down') - self.assertEqual(rec.recognize_sync().text, 'left') - self.assertEqual(rec.recognize_sync().text, 'right') - - def test_default_recognizer_async_one(self): - rec = SpeechRecognitionEngine() - grammar = rec.load_grammar('Grammar.xml') - input = file('test.pcm', 'rb') - rec.set_input_to_audio_file(input) - - recognized_values = [] - def recognized(result): - recognized_values.append(result.result.text) - - rec.speech_recognized += recognized - rec.recognize_async() - - time.sleep(5) - self.assertEqual(len(recognized_values), 1) - self.assertEqual(recognized_values[0], 'down') - - def test_default_recognizer_async_multiple(self): - rec = SpeechRecognitionEngine() - grammar = rec.load_grammar('Grammar.xml') - input = file('test.pcm', 'rb') - rec.set_input_to_audio_file(input) - - recognized_values = [] - def recognized(result): - recognized_values.append(result.result.text) - - rec.speech_recognized += recognized - rec.recognize_async(multiple=True) - - time.sleep(5) - self.assertEqual(len(recognized_values), 3) - self.assertEqual(recognized_values[0], 'down') - self.assertEqual(recognized_values[1], 'left') - self.assertEqual(recognized_values[2], 'right') - - def assertCtypesEquals(self, value1, value2): - if type(type(value1)) is type(ctypes.Array): - self.assertEqual(len(value1), len(value2)) - for i in range(len(value1)): - self.assertCtypesEquals(value1[i], value2[i]) - else: - self.assertEqual(value1, value2) - - def interop_prop_test(self, interop_obj, tests): - for friendly_name, interop_name, value in tests: - print friendly_name - setattr(interop_obj, friendly_name, value) - - self.assertCtypesEquals(getattr(interop_obj, friendly_name), value) - self.assertCtypesEquals(getattr(interop_obj, interop_name), value) - - def test_skeleton_data(self): - pos_arr = ctypes.ARRAY(Vector, JointId.count.value)() - pos_arr[0] = Vector(2,4,6,8) - joint_arr = ctypes.ARRAY(JointTrackingState, JointId.count.value)() - joint_arr[0] = JointTrackingState.inferred - - tests = [('tracking_state', 'eTrackingState', SkeletonTrackingState.tracked), - ('tracking_id', 'dwTrackingID', 1), - ('enrollment_index', 'dwEnrollmentIndex', 1), - ('user_index', 'dwUserIndex', 1), - ('position', 'Position', Vector(1, 2, 3, 4)), - ('skeleton_positions', 'SkeletonPositions', pos_arr), - ('skeleton_position_tracking_states', 'eSkeletonPositionTrackingState', joint_arr), - ('skeleton_quality', 'Quality', SkeletonQuality.clipped_bottom), - ] - - self.interop_prop_test(SkeletonData(), tests) - - def test_image_view_area(self): - tests = [('zoom', 'Zoom', 1), - ('center_x', 'CenterX', 1), - ('center_y', 'CenterY', 1), - ] - - self.interop_prop_test(ImageViewArea(), tests) - - def test_skeleton_frame(self): - skel_data = ctypes.ARRAY(SkeletonData, NUI_SKELETON_COUNT)() - sd = SkeletonData() - sd.user_index = 5 - skel_data[0] = sd - tests = [('timestamp', 'liTimeStamp', 1), - ('frame_number', 'dwFrameNumber', 2), - ('quality', 'Quality', SkeletonFrameQuality.camera_motion), - ('floor_clip_plane', 'vFloorClipPlane', Vector(2,4,6,8)), - ('normal_to_gravity', 'vNormalToGravity', Vector(1,2,3,4)), - ('skeleton_data', 'SkeletonData', skel_data), - ] - - self.interop_prop_test(SkeletonFrame(), tests) - - def test_TransformSmoothParameters(self): - tests = [('smoothing', 'fSmoothing', 1), - ('correction', 'fCorrection', 2), - ('prediction', 'fPrediction', 3), - ('jitter_radius', 'fJitterRadius', 4), - ('max_deviation_radius', 'fMaxDeviationRadius', 5), - ] - - self.interop_prop_test(TransformSmoothParameters(), tests) - pass - -if __name__ == '__main__': - unittest.main() - - diff --git a/Python/Product/PyKinect/PyKinect/winspeech/LICENSE.txt b/Python/Product/PyKinect/PyKinect/winspeech/LICENSE.txt deleted file mode 100644 index d9a10c0d8e..0000000000 --- a/Python/Product/PyKinect/PyKinect/winspeech/LICENSE.txt +++ /dev/null @@ -1,176 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/Python/Product/PyKinect/PyKinect/winspeech/__init__.py b/Python/Product/PyKinect/PyKinect/winspeech/__init__.py deleted file mode 100644 index 4008e63a82..0000000000 --- a/Python/Product/PyKinect/PyKinect/winspeech/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. diff --git a/Python/Product/PyKinect/PyKinect/winspeech/recognition.py b/Python/Product/PyKinect/PyKinect/winspeech/recognition.py deleted file mode 100644 index db480d2109..0000000000 --- a/Python/Product/PyKinect/PyKinect/winspeech/recognition.py +++ /dev/null @@ -1,239 +0,0 @@ -# PyKinect -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -import os -import ctypes -from os import path - -_audio_path = path.join(path.dirname(__file__), '..', 'pykinect', 'audio', 'PyKinectAudio.dll') -if not os.path.exists(_audio_path): - _audio_path = path.join(path.dirname(__file__), '..', '..', '..', '..', '..', '..', 'Binaries', 'Debug', 'PyKinectAudio.dll') - if not path.exists(_audio_path): - raise Exception('Cannot find PyKinectAudio.dll') - - -_PYAUDIODLL = ctypes.CDLL(_audio_path) - -_CreateRecognizer = _PYAUDIODLL.CreateRecognizer -_CreateRecognizer.argtypes = [ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)] -_CreateRecognizer.restype = ctypes.HRESULT - -_SetInputFile = _PYAUDIODLL.SetInputFile -_SetInputFile.argtypes = [ctypes.c_voidp, ctypes.c_voidp] -_SetInputFile.restype = ctypes.HRESULT - -_SetInputStream = _PYAUDIODLL.SetInputStream -_SetInputStream.argtypes = [ctypes.c_voidp, ctypes.c_voidp] -_SetInputStream.restype = ctypes.HRESULT - -_IUnknownRelease = _PYAUDIODLL.IUnknownRelease -_IUnknownRelease.argtypes = [ctypes.c_voidp] -_IUnknownRelease.restype = None - -_LoadGrammar = _PYAUDIODLL.LoadGrammar -_LoadGrammar.argtypes = [ctypes.c_wchar_p, ctypes.c_voidp, ctypes.POINTER(ctypes.c_voidp)] -_LoadGrammar.restype = ctypes.HRESULT - -_EnumRecognizers = _PYAUDIODLL.EnumRecognizers - -_ReadCallback = ctypes.WINFUNCTYPE(ctypes.HRESULT, ctypes.c_uint32, ctypes.c_voidp, ctypes.POINTER(ctypes.c_uint32)) -_Recognize_Callback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p) - -_RecognizeOne = _PYAUDIODLL.RecognizeOne -_RecognizeOne.argtypes = [ctypes.c_voidp, ctypes.c_uint32, _Recognize_Callback, _Recognize_Callback] -_RecognizeOne.restype = ctypes.HRESULT - -_RecognizeAsync = _PYAUDIODLL.RecognizeAsync -_RecognizeAsync.argtypes = [ctypes.c_voidp, ctypes.c_uint, _Recognize_Callback, _Recognize_Callback, ctypes.POINTER(ctypes.c_voidp)] -_RecognizeAsync.restype = ctypes.HRESULT - -_StopRecognizeAsync = _PYAUDIODLL.StopRecognizeAsync -_StopRecognizeAsync.argtypes = [ctypes.c_voidp] -_StopRecognizeAsync.restype = ctypes.HRESULT - -_EnumRecognizersCallback = ctypes.WINFUNCTYPE(None, ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_voidp) - -class Grammar(object): - """Represents a speech grammar constructed from an XML file""" - def __init__(self, filename): - self.filename = filename - - def __del__(self): - #_IUnknownRelease(self._reco_ctx) - _IUnknownRelease(self._grammar) - - -class RecognizerInfo(object): - def __init__(self, id, description, token): - self.id = id - self.description = description - self._token = token - - def __del__(self): - _IUnknownRelease(self._token) - - def __repr__(self): - return 'RecognizerInfo(%r, %r, ...)' % (self.id, self.description) - - -class RecognitionResult(object): - def __init__(self, text, alternates = None): - self.text = text - if alternates: - self.alternates = tuple(RecognitionResult(alt) for alt in alternates) - else: - self.alternates = () - - -class _event(object): - """class used for adding/removing/invoking a set of listener functions""" - __slots__ = ['handlers'] - - def __init__(self): - self.handlers = [] - - def __iadd__(self, other): - self.handlers.append(other) - return self - - def __isub__(self, other): - self.handlers.remove(other) - return self - - def fire(self, *args): - for handler in self.handlers: - handler(*args) - - -class RecognitionEventArgs(object): - """Provides information about speech recognition events.""" - - def __init__(self, result): - self.result = result - - -class SpeechRecognitionEngine(object): - """Provides the means to access and manage an in-process speech recognition engine.""" - - def __init__(self, recognizer = None): - self.speech_recognized = _event() - self._async_handle = None - - if isinstance(recognizer, str): - # TODO: Lookup by ID - pass - elif isinstance(recognizer, RecognizerInfo): - rec = ctypes.c_voidp() - _CreateRecognizer(recognizer._token, ctypes.byref(rec)) - self._rec = rec - elif recognizer is None: - rec = ctypes.c_voidp() - _CreateRecognizer(None, ctypes.byref(rec)) - self._rec = rec - else: - raise TypeError('Bad type for recognizer: ' + repr(recognizer)) - - def __del__(self): - # TODO: Need to shut down any listening threads - self.recognize_async_stop() - _IUnknownRelease(self._rec) - - def load_grammar(self, grammar): - if isinstance(grammar, str): - grammar_obj = Grammar(grammar) - else: - grammar_obj = grammar - - comGrammar = ctypes.c_voidp() - _LoadGrammar(grammar_obj.filename, self._rec, ctypes.byref(comGrammar)) - grammar_obj._grammar = comGrammar - return grammar_obj - - def set_input_to_audio_file(self, stream): - """sets the input to a Python file-like object which implements read""" - - stream_obj = getattr(stream, '__ISpStreamFormat__', None) - if stream_obj is not None: - # optimization: we can avoid going through Python to do the reading by passing - # the original ISpStreamFormat object through - _SetInputStream(self._rec, stream_obj) - else: - def reader(byteCount, buffer, bytesRead): - bytes = stream.read(byteCount) - ctypes.memmove(buffer, bytes, len(bytes)) - bytesRead.contents.value = len(bytes) - return 0 - - self._reader = _ReadCallback(reader) - _SetInputFile(self._rec, self._reader) - - def recognize_sync(self, timeout = 30000): - """attempts to recognize speech and returns the recognized text. - -By default times out after 30 seconds""" - res = [] - alts = [] - def callback(text): - res.append(text) - - def alt_callback(text): - if text is not None: - alts.append(text) - - _RecognizeOne(self._rec, timeout, _Recognize_Callback(callback), _Recognize_Callback(alt_callback)) - if res: - return RecognitionResult(res[0], alts) - - return None - - def recognize_async(self, multiple = False): - cur_result = [] - def callback(text): - cur_result.append(text) - - def alt_callback(text): - if text == None: - # send the event - result = RecognitionResult(cur_result[0], cur_result[1:]) - event_args = RecognitionEventArgs(result) - self.speech_recognized.fire(event_args) - del cur_result[:] - else: - cur_result.append(text) - - stop_listening_handle = ctypes.c_voidp() - - # keep alive our function pointers on ourselves... - self._async_callback = async_callback =_Recognize_Callback(callback) - self._async_alt_callback = async_alt_callback = _Recognize_Callback(alt_callback) - - _RecognizeAsync(self._rec, multiple, async_callback, async_alt_callback, ctypes.byref(stop_listening_handle)) - self._async_handle = stop_listening_handle - - def recognize_async_stop(self): - if self._async_handle is not None: - _StopRecognizeAsync(self._async_handle) - self._async_handle = None - - @staticmethod - def installed_recognizers(): - ids = [] - def callback(id, description, token): - ids.append(RecognizerInfo(id, description, token)) - _EnumRecognizers(_EnumRecognizersCallback(callback)) - - return ids - diff --git a/Python/Product/PyKinect/SkeletalViewer/Program.py b/Python/Product/PyKinect/SkeletalViewer/Program.py deleted file mode 100644 index 1ce8f9651a..0000000000 --- a/Python/Product/PyKinect/SkeletalViewer/Program.py +++ /dev/null @@ -1,16 +0,0 @@ -import pykinect -from pykinect import nui - - -pykinect.nui.NuiInitialize( - nui.NUI_INITIALIZE_FLAG_USES_DEPTH_AND_PLAYER_INDEX | - nui.NUI_INITIALIZE_FLAG_USES_SKELETON | - nui.NUI_INITIALIZE_FLAG_USES_COLOR) - -from pykinect.nui import imagecamera - - -pykinect.nui.NuiShutdown() - - -print('goodbye') \ No newline at end of file diff --git a/Python/Product/PyKinect/SkeletalViewer/SkeletalViewer.pyproj b/Python/Product/PyKinect/SkeletalViewer/SkeletalViewer.pyproj deleted file mode 100644 index 91ca201345..0000000000 --- a/Python/Product/PyKinect/SkeletalViewer/SkeletalViewer.pyproj +++ /dev/null @@ -1,29 +0,0 @@ - - - - Debug - 2.0 - {ed0300eb-5381-4745-af28-f83a3e914717} - . - Program.py - ..\PyKinect - . - SkeletalViewer - SkeletalViewer - SkeletalViewer - False - 2af0f10d-7135-4994-9156-5d01c9c11b7e - 2.7 - - - true - false - - - true - false - - - - - \ No newline at end of file diff --git a/Python/Product/PyKinect/TestPypiServer.ps1 b/Python/Product/PyKinect/TestPypiServer.ps1 deleted file mode 100644 index 226679d0cb..0000000000 --- a/Python/Product/PyKinect/TestPypiServer.ps1 +++ /dev/null @@ -1,71 +0,0 @@ -# Python Tools for Visual Studio -# Copyright(c) Microsoft Corporation -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the License); you may not use -# this file except in compliance with the License. You may obtain a copy of the -# License at http://www.apache.org/licenses/LICENSE-2.0 -# -# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS -# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY -# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, -# MERCHANTABLITY OR NON-INFRINGEMENT. -# -# See the Apache Version 2.0 License for specific language governing -# permissions and limitations under the License. - -$delete_pip = $false -$restore_pip_ini = $false -$delete_pip_ini = $false -$restore_pypirc = $false -$delete_pypirc = $false - -py -2.7 -m pip install pypiserver passlib - -try { - if (-not (Test-Path ~\pip)) { - mkdir ~\pip | Out-Null - $delete_pip = $true - } - - if (Test-Path ~\pip\pip.ini) { - Move-Item ~\pip\pip.ini ~\pip\pip.ini.bak - $restore_pip_ini = $true - } - - Copy-Item pip.ini ~\pip\pip.ini - $delete_pip_ini = $true - - if (Test-Path ~\.pypirc) { - Move-Item ~\.pypirc ~\.pypirc.bak - $restore_pypirc = $true - } - - Copy-Item .pypirc ~\.pypirc - $delete_pypirc = $true - - if (-not (Test-Path .\TestPackages)) { - mkdir .\TestPackages | Out-Null - } - - py -2.7 -m pypiserver -o --disable-fallback -p 8080 -P .htpasswd .\TestPackages - -} finally { - if ($delete_pypirc) { - Remove-Item ~\.pypirc -EA 0 - } - if ($restore_pypirc) { - Move-Item ~\.pypirc.bak ~\.pypirc -force -EA 0 - } - - if ($delete_pip_ini) { - Remove-Item ~\pip\pip.ini -EA 0 - } - if ($restore_pip_ini) { - Move-Item ~\pip\pip.ini.bak ~\pip\pip.ini -force -EA 0 - } - - if ($delete_pip) { - Remove-Item ~\pip -force -r - } -} \ No newline at end of file diff --git a/Python/Product/PyKinect/pip.ini b/Python/Product/PyKinect/pip.ini deleted file mode 100644 index b665488bfd..0000000000 --- a/Python/Product/PyKinect/pip.ini +++ /dev/null @@ -1,2 +0,0 @@ -[global] -index-url = http://localhost:8080/simple/