Permalink
Branch: master
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
1479 lines (1282 sloc) 63.3 KB
//------------------------------------------------------------------------------
// <copyright file="MainWindow.xaml.cs" company="Microsoft">
// Copyright (c) Microsoft Corporation. All rights reserved.
// </copyright>
//------------------------------------------------------------------------------
namespace Microsoft.Samples.Kinect.FaceBasics
{
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Diagnostics;
using System.Globalization;
using System.IO;
using System.Windows;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Media.Media3D;
using Microsoft.Kinect;
using Microsoft.Kinect.Face;
using OSCeleton;
/// <summary>
/// Interaction logic for MainWindow
/// </summary>
public partial class MainWindow : Window, INotifyPropertyChanged
{
private OSCeleton osceleton = new OSCeleton();
/// <summary>
/// Thickness of face bounding box and face points
/// </summary>
private const double DrawFaceShapeThickness = 8;
/// <summary>
/// Font size of face property text
/// </summary>
private const double DrawTextFontSize = 30;
/// <summary>
/// Radius of face point circle
/// </summary>
private const double FacePointRadius = 1.0;
/// <summary>
/// Text layout offset in X axis
/// </summary>
private const float TextLayoutOffsetX = -0.1f;
/// <summary>
/// Text layout offset in Y axis
/// </summary>
private const float TextLayoutOffsetY = -0.15f;
/// <summary>
/// Face rotation display angle increment in degrees
/// </summary>
private const double FaceRotationIncrementInDegrees = 5.0;
/// <summary>
/// Formatted text to indicate that there are no bodies/faces tracked in the FOV
/// </summary>
private FormattedText textFaceNotTracked = new FormattedText(
"No bodies or faces are tracked ...",
CultureInfo.GetCultureInfo("en-us"),
FlowDirection.LeftToRight,
new Typeface("Georgia"),
DrawTextFontSize,
Brushes.White);
/// <summary>
/// Text layout for the no face tracked message
/// </summary>
private Point textLayoutFaceNotTracked = new Point(10.0, 10.0);
/// <summary>
/// Drawing group for body rendering output
/// </summary>
private DrawingGroup drawingGroup;
/// <summary>
/// Drawing image that we will display
/// </summary>
private DrawingImage imageSource;
/// <summary>
/// Active Kinect sensor
/// </summary>
private KinectSensor kinectSensor = null;
/// <summary>
/// Coordinate mapper to map one type of point to another
/// </summary>
private CoordinateMapper coordinateMapper = null;
/// <summary>
/// Reader for body frames
/// </summary>
private BodyFrameReader bodyFrameReader = null;
/// <summary>
/// Array to store bodies
/// </summary>
private Body[] bodies = null;
/// <summary>
/// Number of bodies tracked
/// </summary>
private int bodyCount;
/// <summary>
/// Face frame sources
/// </summary>
private FaceFrameSource[] faceFrameSources = null;
/// <summary>
/// Face frame readers
/// </summary>
private FaceFrameReader[] faceFrameReaders = null;
/// <summary>
/// Storage for face frame results
/// </summary>
private FaceFrameResult[] faceFrameResults = null;
/// <summary>
/// Width of display (screen space)
/// </summary>
private int displayWidth;
/// <summary>
/// Height of display (screen space)
/// </summary>
private int displayHeight;
/// <summary>
/// Display rectangle
/// </summary>
private Rect displayRect;
/// <summary>
/// Width of display (depth space)
/// </summary>
private int displayDepthWidth;
/// <summary>
/// Height of display (depth space)
/// </summary>
private int displayDepthHeight;
/// <summary>
/// List of brushes for each face tracked
/// </summary>
private List<Brush> faceBrush;
/// <summary>
/// Current status text to display
/// </summary>
private string statusText = null;
// Body
private const double HandSize = 30;
private const double JointThickness = 3;
private const double ClipBoundsThickness = 10;
private const float InferredZPositionClamp = 0.1f;
private readonly Brush handClosedBrush = new SolidColorBrush(Color.FromArgb(128, 255, 0, 0));
private readonly Brush handOpenBrush = new SolidColorBrush(Color.FromArgb(128, 0, 255, 0));
private readonly Brush handLassoBrush = new SolidColorBrush(Color.FromArgb(128, 0, 0, 255));
private readonly Brush trackedJointBrush = new SolidColorBrush(Color.FromArgb(255, 68, 192, 68));
private readonly Brush inferredJointBrush = Brushes.Yellow;
private readonly Pen inferredBonePen = new Pen(Brushes.Gray, 1);
private List<Tuple<JointType, JointType>> bones;
private List<Pen> bodyColors;
// Depth
private const int MapDepthToByte = 8000 / 256;
private DepthFrameReader depthFrameReader = null;
private FrameDescription depthFrameDescription = null;
private WriteableBitmap depthBitmap = null;
private byte[] depthPixels = null;
// RGB
private ColorFrameReader colorFrameReader = null;
private WriteableBitmap colorBitmap = null;
// IR
private const float InfraredSourceValueMaximum = (float)ushort.MaxValue;
private const float InfraredSourceScale = 0.75f;
private const float InfraredOutputValueMinimum = 0.01f;
private const float InfraredOutputValueMaximum = 1.0f;
private InfraredFrameReader infraredFrameReader = null;
private FrameDescription infraredFrameDescription = null;
private WriteableBitmap infraredBitmap = null;
// Options
enum showModes
{
None, Color, Depth, IR
}
private bool drawBody = true;
private bool trackFace = false;
private showModes showMode = showModes.Depth;
/// <summary>
/// Initializes a new instance of the MainWindow class.
/// </summary>
public MainWindow()
{
// one sensor is currently supported
this.kinectSensor = KinectSensor.GetDefault();
// get the coordinate mapper
this.coordinateMapper = this.kinectSensor.CoordinateMapper;
// open the reader for the body frames
this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader();
// wire handler for body frame arrival
this.bodyFrameReader.FrameArrived += this.Reader_FrameArrived;
// set the maximum number of bodies that would be tracked by Kinect
this.bodyCount = this.kinectSensor.BodyFrameSource.BodyCount;
// allocate storage to store body objects
this.bodies = new Body[this.bodyCount];
// create a face frame source + reader to track each face in the FOV
this.faceFrameSources = new FaceFrameSource[this.bodyCount];
this.faceFrameReaders = new FaceFrameReader[this.bodyCount];
// allocate storage to store face frame results for each face in the FOV
this.faceFrameResults = new FaceFrameResult[this.bodyCount];
// populate face result colors - one for each face index
this.faceBrush = new List<Brush>()
{
Brushes.White,
Brushes.Orange,
Brushes.Green,
Brushes.Red,
Brushes.LightBlue,
Brushes.Yellow
};
// set IsAvailableChanged event notifier
this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;
// open the sensor
this.kinectSensor.Open();
// set the status text
this.StatusText = this.kinectSensor.IsAvailable ? Properties.Resources.RunningStatusText
: Properties.Resources.NoSensorStatusText;
// Create the drawing group we'll use for drawing
this.drawingGroup = new DrawingGroup();
// Create an image source that we can use in our image control
this.imageSource = new DrawingImage(this.drawingGroup);
// use the window object as the view model in this simple example
this.DataContext = this;
// initialize the components (controls) of the window
this.InitializeComponent();
}
/// <summary>
/// INotifyPropertyChangedPropertyChanged event to allow window controls to bind to changeable data
/// </summary>
public event PropertyChangedEventHandler PropertyChanged;
/// <summary>
/// Gets the bitmap to display
/// </summary>
public ImageSource ImageSource
{
get
{
return this.imageSource;
}
}
public ImageSource GetImageSource()
{
if (this.showMode == showModes.Depth)
{
return this.depthBitmap;
}
if (this.showMode == showModes.Color)
{
return this.colorBitmap;
}
if (this.showMode == showModes.IR)
{
return this.infraredBitmap;
}
return null;
}
/// <summary>
/// Gets or sets the current status text to display
/// </summary>
public string StatusText
{
get
{
return this.statusText;
}
set
{
if (this.statusText != value)
{
this.statusText = value;
// notify any bound elements that the text has changed
if (this.PropertyChanged != null)
{
this.PropertyChanged(this, new PropertyChangedEventArgs("StatusText"));
}
}
}
}
/// <summary>
/// Converts rotation quaternion to Euler angles
/// And then maps them to a specified range of values to control the refresh rate
/// </summary>
/// <param name="rotQuaternion">face rotation quaternion</param>
/// <param name="pitch">rotation about the X-axis</param>
/// <param name="yaw">rotation about the Y-axis</param>
/// <param name="roll">rotation about the Z-axis</param>
public static void ExtractFaceRotationInDegrees(Vector4 rotQuaternion, out int pitch, out int yaw, out int roll)
{
double x = rotQuaternion.X;
double y = rotQuaternion.Y;
double z = rotQuaternion.Z;
double w = rotQuaternion.W;
// convert face rotation quaternion to Euler angles in degrees
double yawD, pitchD, rollD;
pitchD = Math.Atan2(2 * ((y * z) + (w * x)), (w * w) - (x * x) - (y * y) + (z * z)) / Math.PI * 180.0;
yawD = Math.Asin(2 * ((w * y) - (x * z))) / Math.PI * 180.0;
rollD = Math.Atan2(2 * ((x * y) + (w * z)), (w * w) + (x * x) - (y * y) - (z * z)) / Math.PI * 180.0;
// clamp the values to a multiple of the specified increment to control the refresh rate
double increment = FaceRotationIncrementInDegrees;
pitch = (int)(Math.Floor((pitchD + ((increment / 2.0) * (pitchD > 0 ? 1.0 : -1.0))) / increment) * increment);
yaw = (int)(Math.Floor((yawD + ((increment / 2.0) * (yawD > 0 ? 1.0 : -1.0))) / increment) * increment);
roll = (int)(Math.Floor((rollD + ((increment / 2.0) * (rollD > 0 ? 1.0 : -1.0))) / increment) * increment);
}
/// <summary>
/// Execute start up tasks
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void MainWindow_Loaded(object sender, RoutedEventArgs e)
{
this.initOSCeleton();
this.initBodies();
}
/// <summary>
/// Execute shutdown tasks
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void MainWindow_Closing(object sender, CancelEventArgs e)
{
this.osceleton.Stop();
this.showMode = showModes.None;
UpdateObservers();
if (this.bodyFrameReader != null)
{
// BodyFrameReader is IDisposable
this.bodyFrameReader.Dispose();
this.bodyFrameReader = null;
}
if (this.kinectSensor != null)
{
this.kinectSensor.Close();
this.kinectSensor = null;
}
}
/// <summary>
/// Handles the face frame data arriving from the sensor
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void Reader_FaceFrameArrived(object sender, FaceFrameArrivedEventArgs e)
{
using (FaceFrame faceFrame = e.FrameReference.AcquireFrame())
{
if (faceFrame != null)
{
// get the index of the face source from the face source array
int index = this.GetFaceSourceIndex(faceFrame.FaceFrameSource);
// check if this face frame has valid face frame results
if (this.ValidateFaceBoxAndPoints(faceFrame.FaceFrameResult))
{
// store this face frame result to draw later
this.faceFrameResults[index] = faceFrame.FaceFrameResult;
}
else
{
// indicates that the latest face frame result from this reader is invalid
this.faceFrameResults[index] = null;
}
}
}
}
/// <summary>
/// Returns the index of the face frame source
/// </summary>
/// <param name="faceFrameSource">the face frame source</param>
/// <returns>the index of the face source in the face source array</returns>
private int GetFaceSourceIndex(FaceFrameSource faceFrameSource)
{
int index = -1;
for (int i = 0; i < this.bodyCount; i++)
{
if (this.faceFrameSources[i] == faceFrameSource)
{
index = i;
break;
}
}
return index;
}
/// <summary>
/// Handles the body frame data arriving from the sensor
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void Reader_FaceFrameArrived(object sender, BodyFrameArrivedEventArgs e)
{
using (var bodyFrame = e.FrameReference.AcquireFrame())
{
if (bodyFrame != null)
{
// update body data
bodyFrame.GetAndRefreshBodyData(this.bodies);
if (!this.trackFace)
return;
// iterate through each face source
for (int i = 0; i < this.bodyCount; i++)
{
// check if a valid face is tracked in this face source
if (this.faceFrameSources[i].IsTrackingIdValid)
{
// check if we have valid face frame results
if (this.faceFrameResults[i] != null)
{
// queue face frame results
this.osceleton.EnqueueFaceTracking(0, i, this.faceFrameResults[i]);
}
}
else
{
// check if the corresponding body is tracked
if (this.bodies[i].IsTracked)
{
// update the face frame source to track this body
this.faceFrameSources[i].TrackingId = this.bodies[i].TrackingId;
}
}
}
if (this.showMode == showModes.Color)
{
using (DrawingContext dc = this.drawingGroup.Open())
{
// Draw the background
ImageSource img = GetImageSource();
if (img == null)
dc.DrawRectangle(Brushes.Black, null, this.displayRect);
else
dc.DrawImage(img, this.displayRect);
bool drawFaceResult = false;
// iterate through each face source
for (int i = 0; i < this.bodyCount; i++)
{
// check if a valid face is tracked in this face source
if (this.faceFrameSources[i].IsTrackingIdValid)
{
// check if we have valid face frame results
if (this.faceFrameResults[i] != null)
{
// draw face frame results
this.DrawFaceFrameResults(i, this.faceFrameResults[i], dc);
drawFaceResult = true;
}
}
}
if (!drawFaceResult)
{
// if no faces were drawn then this indicates one of the following:
// a body was not tracked
// a body was tracked but the corresponding face was not tracked
// a body and the corresponding face was tracked though the face box or the face points were not valid
dc.DrawText(
this.textFaceNotTracked,
this.textLayoutFaceNotTracked);
}
this.drawingGroup.ClipGeometry = new RectangleGeometry(this.displayRect);
}
}
}
}
}
/// <summary>
/// Draws face frame results
/// </summary>
/// <param name="faceIndex">the index of the face frame corresponding to a specific body in the FOV</param>
/// <param name="faceResult">container of all face frame results</param>
/// <param name="drawingContext">drawing context to render to</param>
private void DrawFaceFrameResults(int faceIndex, FaceFrameResult faceResult, DrawingContext drawingContext)
{
// choose the brush based on the face index
Brush drawingBrush = this.faceBrush[0];
if (faceIndex < this.bodyCount)
{
drawingBrush = this.faceBrush[faceIndex];
}
Pen drawingPen = new Pen(drawingBrush, DrawFaceShapeThickness);
// draw the face bounding box
var faceBoxSource = faceResult.FaceBoundingBoxInColorSpace;
Rect faceBox = new Rect(faceBoxSource.Left, faceBoxSource.Top, faceBoxSource.Right - faceBoxSource.Left, faceBoxSource.Bottom - faceBoxSource.Top);
drawingContext.DrawRectangle(null, drawingPen, faceBox);
if (faceResult.FacePointsInColorSpace != null)
{
// draw each face point
foreach (PointF pointF in faceResult.FacePointsInColorSpace.Values)
{
drawingContext.DrawEllipse(null, drawingPen, new Point(pointF.X, pointF.Y), FacePointRadius, FacePointRadius);
}
}
string faceText = string.Empty;
// extract each face property information and store it in faceText
if (faceResult.FaceProperties != null)
{
foreach (var item in faceResult.FaceProperties)
{
faceText += item.Key.ToString() + " : ";
// consider a "maybe" as a "no" to restrict
// the detection result refresh rate
if (item.Value == DetectionResult.Maybe)
{
faceText += DetectionResult.No + "\n";
}
else
{
faceText += item.Value.ToString() + "\n";
}
}
}
// extract face rotation in degrees as Euler angles
if (faceResult.FaceRotationQuaternion != null)
{
int pitch, yaw, roll;
ExtractFaceRotationInDegrees(faceResult.FaceRotationQuaternion, out pitch, out yaw, out roll);
faceText += "FaceYaw : " + yaw + "\n" +
"FacePitch : " + pitch + "\n" +
"FacenRoll : " + roll + "\n";
}
// render the face property and face rotation information
Point faceTextLayout;
if (this.GetFaceTextPositionInColorSpace(faceIndex, out faceTextLayout))
{
drawingContext.DrawText(
new FormattedText(
faceText,
CultureInfo.GetCultureInfo("en-us"),
FlowDirection.LeftToRight,
new Typeface("Georgia"),
DrawTextFontSize,
drawingBrush),
faceTextLayout);
}
}
/// <summary>
/// Computes the face result text position by adding an offset to the corresponding
/// body's head joint in camera space and then by projecting it to screen space
/// </summary>
/// <param name="faceIndex">the index of the face frame corresponding to a specific body in the FOV</param>
/// <param name="faceTextLayout">the text layout position in screen space</param>
/// <returns>success or failure</returns>
private bool GetFaceTextPositionInColorSpace(int faceIndex, out Point faceTextLayout)
{
faceTextLayout = new Point();
bool isLayoutValid = false;
Body body = this.bodies[faceIndex];
if (body.IsTracked)
{
var headJoint = body.Joints[JointType.Head].Position;
CameraSpacePoint textPoint = new CameraSpacePoint()
{
X = headJoint.X + TextLayoutOffsetX,
Y = headJoint.Y + TextLayoutOffsetY,
Z = headJoint.Z
};
ColorSpacePoint textPointInColor = this.coordinateMapper.MapCameraPointToColorSpace(textPoint);
faceTextLayout.X = textPointInColor.X;
faceTextLayout.Y = textPointInColor.Y;
isLayoutValid = true;
}
return isLayoutValid;
}
/// <summary>
/// Validates face bounding box and face points to be within screen space
/// </summary>
/// <param name="faceResult">the face frame result containing face box and points</param>
/// <returns>success or failure</returns>
private bool ValidateFaceBoxAndPoints(FaceFrameResult faceResult)
{
bool isFaceValid = faceResult != null;
if (isFaceValid)
{
var faceBox = faceResult.FaceBoundingBoxInColorSpace;
if (faceBox != null)
{
// check if we have a valid rectangle within the bounds of the screen space
isFaceValid = (faceBox.Right - faceBox.Left) > 0 &&
(faceBox.Bottom - faceBox.Top) > 0 &&
faceBox.Right <= this.displayWidth &&
faceBox.Bottom <= this.displayHeight;
if (isFaceValid)
{
var facePoints = faceResult.FacePointsInColorSpace;
if (facePoints != null)
{
foreach (PointF pointF in facePoints.Values)
{
// check if we have a valid face point within the bounds of the screen space
bool isFacePointValid = pointF.X > 0.0f &&
pointF.Y > 0.0f &&
pointF.X < this.displayWidth &&
pointF.Y < this.displayHeight;
if (!isFacePointValid)
{
isFaceValid = false;
break;
}
}
}
}
}
}
return isFaceValid;
}
/// <summary>
/// Handles the event which the sensor becomes unavailable (E.g. paused, closed, unplugged).
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void Sensor_IsAvailableChanged(object sender, IsAvailableChangedEventArgs e)
{
if (this.kinectSensor != null)
{
// on failure, set the status text
this.StatusText = this.kinectSensor.IsAvailable ? Properties.Resources.RunningStatusText
: Properties.Resources.SensorNotAvailableStatusText;
}
}
// ===============================================================
// ===============================================================
// ===================== Copy from MS Projs ======================
// ===============================================================
// ===============================================================
private void initBodies()
{
// a bone defined as a line between two joints
this.bones = new List<Tuple<JointType, JointType>>();
// Torso
this.bones.Add(new Tuple<JointType, JointType>(JointType.Head, JointType.Neck));
this.bones.Add(new Tuple<JointType, JointType>(JointType.Neck, JointType.SpineShoulder));
this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineShoulder, JointType.SpineMid));
this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineMid, JointType.SpineBase));
this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineShoulder, JointType.ShoulderRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineShoulder, JointType.ShoulderLeft));
this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineBase, JointType.HipRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.SpineBase, JointType.HipLeft));
// Right Arm
this.bones.Add(new Tuple<JointType, JointType>(JointType.ShoulderRight, JointType.ElbowRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.ElbowRight, JointType.WristRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.WristRight, JointType.HandRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.HandRight, JointType.HandTipRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.WristRight, JointType.ThumbRight));
// Left Arm
this.bones.Add(new Tuple<JointType, JointType>(JointType.ShoulderLeft, JointType.ElbowLeft));
this.bones.Add(new Tuple<JointType, JointType>(JointType.ElbowLeft, JointType.WristLeft));
this.bones.Add(new Tuple<JointType, JointType>(JointType.WristLeft, JointType.HandLeft));
this.bones.Add(new Tuple<JointType, JointType>(JointType.HandLeft, JointType.HandTipLeft));
this.bones.Add(new Tuple<JointType, JointType>(JointType.WristLeft, JointType.ThumbLeft));
// Right Leg
this.bones.Add(new Tuple<JointType, JointType>(JointType.HipRight, JointType.KneeRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.KneeRight, JointType.AnkleRight));
this.bones.Add(new Tuple<JointType, JointType>(JointType.AnkleRight, JointType.FootRight));
// Left Leg
this.bones.Add(new Tuple<JointType, JointType>(JointType.HipLeft, JointType.KneeLeft));
this.bones.Add(new Tuple<JointType, JointType>(JointType.KneeLeft, JointType.AnkleLeft));
this.bones.Add(new Tuple<JointType, JointType>(JointType.AnkleLeft, JointType.FootLeft));
// populate body colors, one for each BodyIndex
this.bodyColors = new List<Pen>();
this.bodyColors.Add(new Pen(Brushes.Red, 6));
this.bodyColors.Add(new Pen(Brushes.Orange, 6));
this.bodyColors.Add(new Pen(Brushes.Green, 6));
this.bodyColors.Add(new Pen(Brushes.Blue, 6));
this.bodyColors.Add(new Pen(Brushes.Indigo, 6));
this.bodyColors.Add(new Pen(Brushes.Violet, 6));
}
/// <summary>
/// Handles the body frame data arriving from the sensor
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void Reader_FrameArrived(object sender, BodyFrameArrivedEventArgs e)
{
bool dataReceived = false;
using (BodyFrame bodyFrame = e.FrameReference.AcquireFrame())
{
if (bodyFrame != null)
{
if (this.bodies == null)
{
this.bodies = new Body[bodyFrame.BodyCount];
}
// The first time GetAndRefreshBodyData is called, Kinect will allocate each Body in the array.
// As long as those body objects are not disposed and not set to null in the array,
// those body objects will be re-used.
bodyFrame.GetAndRefreshBodyData(this.bodies);
for (int i = 0; i < this.bodyCount; i++)
this.osceleton.EnqueueBody(0, i, this.bodies[i]);
dataReceived = true;
}
}
if (!this.drawBody)
return;
using (DrawingContext dc = this.drawingGroup.Open())
{
// Draw the background
ImageSource img = GetImageSource();
if (img == null)
dc.DrawRectangle(Brushes.Black, null, this.displayRect);
else
dc.DrawImage(img, this.displayRect);
if (dataReceived)
{
int penIndex = 0;
foreach (Body body in this.bodies)
{
Pen drawPen = this.bodyColors[penIndex++];
if (body.IsTracked)
{
this.DrawClippedEdges(body, dc);
IReadOnlyDictionary<JointType, Joint> joints = body.Joints;
// convert the joint points to depth (display) space
Dictionary<JointType, Point> jointPoints = new Dictionary<JointType, Point>();
foreach (JointType jointType in joints.Keys)
{
// sometimes the depth(Z) of an inferred joint may show as negative
// clamp down to 0.1f to prevent coordinatemapper from returning (-Infinity, -Infinity)
CameraSpacePoint position = joints[jointType].Position;
if (position.Z < 0)
{
position.Z = InferredZPositionClamp;
}
if (this.showMode == showModes.None || this.showMode == showModes.Depth || this.showMode == showModes.IR)
{
DepthSpacePoint depthSpacePoint = this.coordinateMapper.MapCameraPointToDepthSpace(position);
jointPoints[jointType] = new Point(depthSpacePoint.X, depthSpacePoint.Y);
}
else
{
ColorSpacePoint colorSpacePoint = this.coordinateMapper.MapCameraPointToColorSpace(position);
jointPoints[jointType] = new Point(colorSpacePoint.X, colorSpacePoint.Y);
}
}
this.DrawBody(joints, jointPoints, dc, drawPen);
this.DrawHand(body.HandLeftState, jointPoints[JointType.HandLeft], dc);
this.DrawHand(body.HandRightState, jointPoints[JointType.HandRight], dc);
}
}
// prevent drawing outside of our render area
this.drawingGroup.ClipGeometry = new RectangleGeometry(this.displayRect);
}
}
}
/// <summary>
/// Draws a body
/// </summary>
/// <param name="joints">joints to draw</param>
/// <param name="jointPoints">translated positions of joints to draw</param>
/// <param name="drawingContext">drawing context to draw to</param>
/// <param name="drawingPen">specifies color to draw a specific body</param>
private void DrawBody(IReadOnlyDictionary<JointType, Joint> joints, IDictionary<JointType, Point> jointPoints, DrawingContext drawingContext, Pen drawingPen)
{
// Draw the bones
foreach (var bone in this.bones)
{
this.DrawBone(joints, jointPoints, bone.Item1, bone.Item2, drawingContext, drawingPen);
}
// Draw the joints
foreach (JointType jointType in joints.Keys)
{
Brush drawBrush = null;
TrackingState trackingState = joints[jointType].TrackingState;
if (trackingState == TrackingState.Tracked)
{
drawBrush = this.trackedJointBrush;
}
else if (trackingState == TrackingState.Inferred)
{
drawBrush = this.inferredJointBrush;
}
if (drawBrush != null)
{
drawingContext.DrawEllipse(drawBrush, null, jointPoints[jointType], JointThickness, JointThickness);
}
}
}
/// <summary>
/// Draws one bone of a body (joint to joint)
/// </summary>
/// <param name="joints">joints to draw</param>
/// <param name="jointPoints">translated positions of joints to draw</param>
/// <param name="jointType0">first joint of bone to draw</param>
/// <param name="jointType1">second joint of bone to draw</param>
/// <param name="drawingContext">drawing context to draw to</param>
/// /// <param name="drawingPen">specifies color to draw a specific bone</param>
private void DrawBone(IReadOnlyDictionary<JointType, Joint> joints, IDictionary<JointType, Point> jointPoints, JointType jointType0, JointType jointType1, DrawingContext drawingContext, Pen drawingPen)
{
Joint joint0 = joints[jointType0];
Joint joint1 = joints[jointType1];
// If we can't find either of these joints, exit
if (joint0.TrackingState == TrackingState.NotTracked ||
joint1.TrackingState == TrackingState.NotTracked)
{
return;
}
// We assume all drawn bones are inferred unless BOTH joints are tracked
Pen drawPen = this.inferredBonePen;
if ((joint0.TrackingState == TrackingState.Tracked) && (joint1.TrackingState == TrackingState.Tracked))
{
drawPen = drawingPen;
}
drawingContext.DrawLine(drawPen, jointPoints[jointType0], jointPoints[jointType1]);
}
/// <summary>
/// Draws a hand symbol if the hand is tracked: red circle = closed, green circle = opened; blue circle = lasso
/// </summary>
/// <param name="handState">state of the hand</param>
/// <param name="handPosition">position of the hand</param>
/// <param name="drawingContext">drawing context to draw to</param>
private void DrawHand(HandState handState, Point handPosition, DrawingContext drawingContext)
{
switch (handState)
{
case HandState.Closed:
drawingContext.DrawEllipse(this.handClosedBrush, null, handPosition, HandSize, HandSize);
break;
case HandState.Open:
drawingContext.DrawEllipse(this.handOpenBrush, null, handPosition, HandSize, HandSize);
break;
case HandState.Lasso:
drawingContext.DrawEllipse(this.handLassoBrush, null, handPosition, HandSize, HandSize);
break;
}
}
/// <summary>
/// Draws indicators to show which edges are clipping body data
/// </summary>
/// <param name="body">body to draw clipping information for</param>
/// <param name="drawingContext">drawing context to draw to</param>
private void DrawClippedEdges(Body body, DrawingContext drawingContext)
{
FrameEdges clippedEdges = body.ClippedEdges;
if (clippedEdges.HasFlag(FrameEdges.Bottom))
{
drawingContext.DrawRectangle(
Brushes.Red,
null,
new Rect(0, this.displayHeight - ClipBoundsThickness, this.displayWidth, ClipBoundsThickness));
}
if (clippedEdges.HasFlag(FrameEdges.Top))
{
drawingContext.DrawRectangle(
Brushes.Red,
null,
new Rect(0, 0, this.displayWidth, ClipBoundsThickness));
}
if (clippedEdges.HasFlag(FrameEdges.Left))
{
drawingContext.DrawRectangle(
Brushes.Red,
null,
new Rect(0, 0, ClipBoundsThickness, this.displayHeight));
}
if (clippedEdges.HasFlag(FrameEdges.Right))
{
drawingContext.DrawRectangle(
Brushes.Red,
null,
new Rect(this.displayWidth - ClipBoundsThickness, 0, ClipBoundsThickness, this.displayHeight));
}
}
/// <summary>
/// Handles the color frame data arriving from the sensor
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void Reader_ColorFrameArrived(object sender, ColorFrameArrivedEventArgs e)
{
// ColorFrame is IDisposable
using (ColorFrame colorFrame = e.FrameReference.AcquireFrame())
{
if (colorFrame != null)
{
FrameDescription colorFrameDescription = colorFrame.FrameDescription;
using (KinectBuffer colorBuffer = colorFrame.LockRawImageBuffer())
{
this.colorBitmap.Lock();
// verify data and write the new color frame data to the display bitmap
if ((colorFrameDescription.Width == this.colorBitmap.PixelWidth) && (colorFrameDescription.Height == this.colorBitmap.PixelHeight))
{
colorFrame.CopyConvertedFrameDataToIntPtr(
this.colorBitmap.BackBuffer,
(uint)(colorFrameDescription.Width * colorFrameDescription.Height * 4),
ColorImageFormat.Bgra);
this.colorBitmap.AddDirtyRect(new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight));
}
this.colorBitmap.Unlock();
}
}
}
}
/// <summary>
/// Handles the depth frame data arriving from the sensor
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void Reader_FrameArrived(object sender, DepthFrameArrivedEventArgs e)
{
bool depthFrameProcessed = false;
if (showMode != showModes.Depth) return;
using (DepthFrame depthFrame = e.FrameReference.AcquireFrame())
{
if (depthFrame != null)
{
// the fastest way to process the body index data is to directly access
// the underlying buffer
using (Microsoft.Kinect.KinectBuffer depthBuffer = depthFrame.LockImageBuffer())
{
// verify data and write the color data to the display bitmap
if (((this.depthFrameDescription.Width * this.depthFrameDescription.Height) == (depthBuffer.Size / this.depthFrameDescription.BytesPerPixel)) &&
(this.depthFrameDescription.Width == this.depthBitmap.PixelWidth) && (this.depthFrameDescription.Height == this.depthBitmap.PixelHeight))
{
// Note: In order to see the full range of depth (including the less reliable far field depth)
// we are setting maxDepth to the extreme potential depth threshold
ushort maxDepth = ushort.MaxValue;
// If you wish to filter by reliable depth distance, uncomment the following line:
//// maxDepth = depthFrame.DepthMaxReliableDistance
this.ProcessDepthFrameData(depthBuffer.UnderlyingBuffer, depthBuffer.Size, depthFrame.DepthMinReliableDistance, maxDepth);
depthFrameProcessed = true;
}
}
}
}
if (depthFrameProcessed)
{
this.RenderDepthPixels();
}
}
/// <summary>
/// Directly accesses the underlying image buffer of the DepthFrame to
/// create a displayable bitmap.
/// This function requires the /unsafe compiler option as we make use of direct
/// access to the native memory pointed to by the depthFrameData pointer.
/// </summary>
/// <param name="depthFrameData">Pointer to the DepthFrame image data</param>
/// <param name="depthFrameDataSize">Size of the DepthFrame image data</param>
/// <param name="minDepth">The minimum reliable depth value for the frame</param>
/// <param name="maxDepth">The maximum reliable depth value for the frame</param>
private unsafe void ProcessDepthFrameData(IntPtr depthFrameData, uint depthFrameDataSize, ushort minDepth, ushort maxDepth)
{
// depth frame data is a 16 bit value
ushort* frameData = (ushort*)depthFrameData;
// convert depth to a visual representation
for (int i = 0; i < (int)(depthFrameDataSize / this.depthFrameDescription.BytesPerPixel); ++i)
{
// Get the depth for this pixel
ushort depth = frameData[i];
// To convert to a byte, we're mapping the depth value to the byte range.
// Values outside the reliable depth range are mapped to 0 (black).
this.depthPixels[i] = (byte)(depth >= minDepth && depth <= maxDepth ? (depth / MapDepthToByte) : 0);
}
}
/// <summary>
/// Renders color pixels into the writeableBitmap.
/// </summary>
private void RenderDepthPixels()
{
this.depthBitmap.WritePixels(
new Int32Rect(0, 0, this.depthBitmap.PixelWidth, this.depthBitmap.PixelHeight),
this.depthPixels,
this.depthBitmap.PixelWidth,
0);
}
/// <summary>
/// Handles the infrared frame data arriving from the sensor
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void Reader_InfraredFrameArrived(object sender, InfraredFrameArrivedEventArgs e)
{
// InfraredFrame is IDisposable
using (InfraredFrame infraredFrame = e.FrameReference.AcquireFrame())
{
if (infraredFrame != null)
{
if (showMode != showModes.IR) return;
// the fastest way to process the infrared frame data is to directly access
// the underlying buffer
using (Microsoft.Kinect.KinectBuffer infraredBuffer = infraredFrame.LockImageBuffer())
{
// verify data and write the new infrared frame data to the display bitmap
if (((this.infraredFrameDescription.Width * this.infraredFrameDescription.Height) == (infraredBuffer.Size / this.infraredFrameDescription.BytesPerPixel)) &&
(this.infraredFrameDescription.Width == this.infraredBitmap.PixelWidth) && (this.infraredFrameDescription.Height == this.infraredBitmap.PixelHeight))
{
this.ProcessInfraredFrameData(infraredBuffer.UnderlyingBuffer, infraredBuffer.Size);
}
}
}
}
}
/// <summary>
/// Directly accesses the underlying image buffer of the InfraredFrame to
/// create a displayable bitmap.
/// This function requires the /unsafe compiler option as we make use of direct
/// access to the native memory pointed to by the infraredFrameData pointer.
/// </summary>
/// <param name="infraredFrameData">Pointer to the InfraredFrame image data</param>
/// <param name="infraredFrameDataSize">Size of the InfraredFrame image data</param>
private unsafe void ProcessInfraredFrameData(IntPtr infraredFrameData, uint infraredFrameDataSize)
{
// infrared frame data is a 16 bit value
ushort* frameData = (ushort*)infraredFrameData;
// lock the target bitmap
this.infraredBitmap.Lock();
// get the pointer to the bitmap's back buffer
float* backBuffer = (float*)this.infraredBitmap.BackBuffer;
// process the infrared data
for (int i = 0; i < (int)(infraredFrameDataSize / this.infraredFrameDescription.BytesPerPixel); ++i)
{
// since we are displaying the image as a normalized grey scale image, we need to convert from
// the ushort data (as provided by the InfraredFrame) to a value from [InfraredOutputValueMinimum, InfraredOutputValueMaximum]
backBuffer[i] = Math.Min(InfraredOutputValueMaximum, (((float)frameData[i] / InfraredSourceValueMaximum * InfraredSourceScale) * (1.0f - InfraredOutputValueMinimum)) + InfraredOutputValueMinimum);
}
// mark the entire bitmap as needing to be drawn
this.infraredBitmap.AddDirtyRect(new Int32Rect(0, 0, this.infraredBitmap.PixelWidth, this.infraredBitmap.PixelHeight));
// unlock the bitmap
this.infraredBitmap.Unlock();
}
// ===============================================================
// ===============================================================
// ===================== OSCeleton additions =====================
// ===============================================================
// ===============================================================
/// <summary>
/// Handles keypresses from the keyboard
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void MainWindow_KeyUp(object sender, System.Windows.Input.KeyEventArgs e)
{
if (e.Key == System.Windows.Input.Key.Space)
{
osceleton.OpenNewCSVFile();
}
}
/// <summary>
/// Handles changes to the show skeleton checkbox
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void CheckBoxShowSkeletonChanged(object sender, System.Windows.RoutedEventArgs e)
{
this.drawBody = this.checkBoxShowSkeleton.IsChecked.GetValueOrDefault();
}
/// <summary>
/// Handles changes to the use speech commands checkbox
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void CheckBoxSpeechCommandsChanged(object sender, System.Windows.RoutedEventArgs e)
{
if (this.checkBoxSpeechCommands.IsChecked.Value)
StartSpeechRecognizer();
else
StopSpeechRecognizer();
}
/// <summary>
/// Handles changes to the hands only checkbox
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void CheckBoxTrackHandsOnlyChanged(object sender, System.Windows.RoutedEventArgs e)
{
osceleton.SwitchHandsOnly(this.checkBoxTrackHandsOnly.IsChecked.Value);
}
/// <summary>
/// Handles changes to the track face checkbox
/// </summary>
/// <param name="sender">object sending the event</param>
/// <param name="e">event arguments</param>
private void CheckBoxTrackFaceChanged(object sender, System.Windows.RoutedEventArgs e)
{
this.trackFace = this.checkBoxTrackFace.IsChecked.Value;
UpdateFaceTracking();
// switch to color view
if (this.trackFace)
this.ComboBoxDisplay.SelectedIndex = 1;
}
private void StartSpeechRecognizer()
{
// TODO
// // grab the audio stream
// IReadOnlyList<AudioBeam> audioBeamList = this.kinectSensor.AudioSource.AudioBeams;
// System.IO.Stream audioStream = audioBeamList[0].OpenInputStream();
// // create the convert stream
// this.convertStream = new KinectAudioStream(audioStream);
// RecognizerInfo ri = TryGetKinectRecognizer();
// if (null != ri)
// {
// this.recognitionSpans = new List<Span> { forwardSpan, backSpan, rightSpan, leftSpan };
// this.speechEngine = new SpeechRecognitionEngine(ri.Id);
// /****************************************************************
// *
// * Use this code to create grammar programmatically rather than from
// * a grammar file.
// *
// * var directions = new Choices();
// * directions.Add(new SemanticResultValue("forward", "FORWARD"));
// * directions.Add(new SemanticResultValue("forwards", "FORWARD"));
// * directions.Add(new SemanticResultValue("straight", "FORWARD"));
// * directions.Add(new SemanticResultValue("backward", "BACKWARD"));
// * directions.Add(new SemanticResultValue("backwards", "BACKWARD"));
// * directions.Add(new SemanticResultValue("back", "BACKWARD"));
// * directions.Add(new SemanticResultValue("turn left", "LEFT"));
// * directions.Add(new SemanticResultValue("turn right", "RIGHT"));
// *
// * var gb = new GrammarBuilder { Culture = ri.Culture };
// * gb.Append(directions);
// *
// * var g = new Grammar(gb);
// *
// ****************************************************************/
// // Create a grammar from grammar definition XML file.
// using (var memoryStream = new MemoryStream(Encoding.ASCII.GetBytes(Properties.Resources.SpeechGrammar)))
// {
// var g = new Grammar(memoryStream);
// this.speechEngine.LoadGrammar(g);
// }
// this.speechEngine.SpeechRecognized += this.SpeechRecognized;
// this.speechEngine.SpeechRecognitionRejected += this.SpeechRejected;
// // let the convertStream know speech is going active
// this.convertStream.SpeechActive = true;
// // For long recognition sessions (a few hours or more), it may be beneficial to turn off adaptation of the acoustic model.
// // This will prevent recognition accuracy from degrading over time.
// ////speechEngine.UpdateRecognizerSetting("AdaptationOn", 0);
// this.speechEngine.SetInputToAudioStream(
// this.convertStream, new SpeechAudioFormatInfo(EncodingFormat.Pcm, 16000, 16, 1, 32000, 2, null));
// this.speechEngine.RecognizeAsync(RecognizeMode.Multiple);
}
private void StopSpeechRecognizer()
{
// TODO
}
private void ComboBoxDisplaySelectionChanged(object sender, System.Windows.Controls.SelectionChangedEventArgs e)
{
switch (this.ComboBoxDisplay.SelectedIndex)
{
case 0:
this.showMode = showModes.None;
break;
case 1:
this.showMode = showModes.Color;
break;
case 2:
this.showMode = showModes.Depth;
break;
case 3:
this.showMode = showModes.IR;
break;
}
UpdateObservers();
}
private void initFaceDB()
{
// Copy NUIDatabase
string source = Environment.GetEnvironmentVariable("KINECTSDK20_DIR");
if (source == null)
{
MessageBox.Show("KINECTSDK20_DIR environment variable not found. Please install Kinect SDK v2.0", "OSCeleton-KinectSD2 dependency error");
Application.Current.Shutdown();
return;
}
System.Uri uri = new Uri(System.Reflection.Assembly.GetAssembly(typeof(MainWindow)).CodeBase);
string target = Path.GetDirectoryName(uri.LocalPath);
Process proc = new Process
{
StartInfo = new ProcessStartInfo
{
FileName = @"C:\WINDOWS\system32\xcopy.exe",
Arguments = "\"" + source + "\\Redist\\Face\\x64\\NuiDatabase\" \"" + target + "\\NuiDatabase\" /S /R /Y /I",
UseShellExecute = false,
RedirectStandardOutput = false,
CreateNoWindow = true
}
};
proc.Start();
proc.WaitForExit();
}
private void initOSCeleton()
{
initFaceDB();
this.osceleton.Initialise();
this.depthFrameDescription = this.kinectSensor.DepthFrameSource.FrameDescription;
this.displayDepthWidth = depthFrameDescription.Width;
this.displayDepthHeight = depthFrameDescription.Height;
this.checkBoxTrackHandsOnly.IsChecked = osceleton.GetHandsOnly();
this.checkBoxTrackFace.IsChecked = osceleton.GetFaceTracking();
UpdateObservers();
}
private void UpdateFaceTracking()
{
if (this.trackFace)
{
for (int i = 0; i < this.bodyCount; i++)
{
if (this.faceFrameSources[i] == null || this.faceFrameReaders[i] == null)
{
// specify the required face frame results
FaceFrameFeatures faceFrameFeatures =
FaceFrameFeatures.BoundingBoxInColorSpace
| FaceFrameFeatures.PointsInColorSpace
| FaceFrameFeatures.RotationOrientation
| FaceFrameFeatures.FaceEngagement
| FaceFrameFeatures.Glasses
| FaceFrameFeatures.Happy
| FaceFrameFeatures.LeftEyeClosed
| FaceFrameFeatures.RightEyeClosed
| FaceFrameFeatures.LookingAway
| FaceFrameFeatures.MouthMoved
| FaceFrameFeatures.MouthOpen;
// create the face frame source with the required face frame features and an initial tracking Id of 0
this.faceFrameSources[i] = new FaceFrameSource(this.kinectSensor, 0, faceFrameFeatures);
// open the corresponding reader
this.faceFrameReaders[i] = this.faceFrameSources[i].OpenReader();
}
// wire handler for face frame arrival
this.faceFrameReaders[i].FrameArrived += this.Reader_FaceFrameArrived;
}
this.bodyFrameReader.FrameArrived += this.Reader_FaceFrameArrived;
}
else
{
this.bodyFrameReader.FrameArrived -= this.Reader_FaceFrameArrived;
for (int i = 0; i < this.bodyCount; i++)
{
if (this.faceFrameReaders[i] != null)
{
// FaceFrameReader is IDisposable
this.faceFrameReaders[i].FrameArrived -= this.Reader_FaceFrameArrived;
}
}
}
}
private void UpdateObservers()
{
if (showMode == showModes.None)
{
this.depthFrameDescription = this.kinectSensor.DepthFrameSource.FrameDescription;
this.displayWidth = depthFrameDescription.Width;
this.displayHeight = depthFrameDescription.Height;
this.displayRect = new Rect(0.0, 0.0, this.displayWidth, this.displayHeight);
}
if (this.showMode == showModes.Color)
{
this.colorFrameReader = this.kinectSensor.ColorFrameSource.OpenReader();
this.colorFrameReader.FrameArrived += this.Reader_ColorFrameArrived;
FrameDescription colorFrameDescription = this.kinectSensor.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Bgra);
this.displayWidth = colorFrameDescription.Width;
this.displayHeight = colorFrameDescription.Height;
this.displayRect = new Rect(0.0, 0.0, this.displayWidth, this.displayHeight);
this.colorBitmap = new WriteableBitmap(colorFrameDescription.Width, colorFrameDescription.Height, 96.0, 96.0, PixelFormats.Bgr32, null);
}
else if (this.colorFrameReader != null)
{
this.colorFrameReader.FrameArrived -= this.Reader_ColorFrameArrived;
this.colorFrameReader.Dispose();
this.colorFrameReader = null;
}
if (this.showMode == showModes.Depth)
{
this.depthFrameReader = this.kinectSensor.DepthFrameSource.OpenReader();
this.depthFrameReader.FrameArrived += this.Reader_FrameArrived;
this.depthFrameDescription = this.kinectSensor.DepthFrameSource.FrameDescription;
this.displayWidth = depthFrameDescription.Width;
this.displayHeight = depthFrameDescription.Height;
this.displayRect = new Rect(0.0, 0.0, this.displayWidth, this.displayHeight);
this.depthPixels = new byte[this.depthFrameDescription.Width * this.depthFrameDescription.Height];
this.depthBitmap = new WriteableBitmap(this.depthFrameDescription.Width, this.depthFrameDescription.Height, 96.0, 96.0, PixelFormats.Gray8, null);
this.DataContext = this;
}
else if (this.depthFrameReader != null)
{
this.depthFrameReader.FrameArrived -= this.Reader_FrameArrived;
this.depthFrameReader.Dispose();
this.depthFrameReader = null;
}
if (this.showMode == showModes.IR)
{
this.infraredFrameReader = this.kinectSensor.InfraredFrameSource.OpenReader();
this.infraredFrameReader.FrameArrived += this.Reader_InfraredFrameArrived;
this.infraredFrameDescription = this.kinectSensor.InfraredFrameSource.FrameDescription;
this.displayWidth = infraredFrameDescription.Width;
this.displayHeight = infraredFrameDescription.Height;
this.displayRect = new Rect(0.0, 0.0, this.displayWidth, this.displayHeight);
this.infraredBitmap = new WriteableBitmap(this.infraredFrameDescription.Width, this.infraredFrameDescription.Height, 96.0, 96.0, PixelFormats.Gray32Float, null);
}
else if (this.infraredFrameReader != null)
{
this.infraredFrameReader.FrameArrived -= this.Reader_InfraredFrameArrived;
this.infraredFrameReader.Dispose();
this.infraredFrameReader = null;
}
}
}
}