Skip to content

MLCompute tvOS xcode13.0 beta1

Manuel de la Pena edited this page Aug 4, 2021 · 3 revisions

#MLCompute.framework https://github.com/xamarin/xamarin-macios/pull/12348

diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamOptimizer.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamOptimizer.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamOptimizer.h	2021-03-16 13:56:16.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamOptimizer.h	2021-06-02 05:35:08.000000000 -0400
@@ -34,10 +34,17 @@
     @abstract   A term added to improve numerical stability.
     @discussion The default is 1e-8.
  */
+
 @property (readonly, nonatomic) float epsilon;
 
+/*! @property   usesAMSGrad
+    @abstract   Whether to use the AMSGrad variant of this algorithm
+    @discussion The default is false
+ */
+@property (readonly, nonatomic) BOOL usesAMSGrad;
+
 /*! @property   timeStep
-    @abstract   The current timestep used for the update.  
+    @abstract   The current timestep used for the update.
     @discussion The default is 1.
  */
 @property (readonly, nonatomic) NSUInteger timeStep;
@@ -62,6 +69,23 @@
                                 epsilon:(float)epsilon
                                timeStep:(NSUInteger)timeStep;
 
+/*! @abstract   Create a MLCAdamOptimizer object
+    @param      optimizerDescriptor    The optimizer descriptor object
+    @param      beta1                                   The beta1 value
+    @param      beta2                                   The beta2 value
+    @param      epsilon                              The epsilon value to use to improve numerical stability
+    @param      usesAMSGrad                     Whether to use the AMSGrad variant of this algorithm from the paper (https://arxiv.org/abs/1904.09237)
+    @param      timeStep                            The initial timestep to use for the update
+    @return     A new MLCAdamOptimizer object.
+ */
++ (instancetype)optimizerWithDescriptor:(MLCOptimizerDescriptor *)optimizerDescriptor
+                                  beta1:(float)beta1
+                                  beta2:(float)beta2
+                                epsilon:(float)epsilon
+                            usesAMSGrad:(BOOL)usesAMSGrad
+                               timeStep:(NSUInteger)timeStep
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15), tvos(15));
+
 @end
 
 NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamWOptimizer.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamWOptimizer.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamWOptimizer.h	1969-12-31 19:00:00.000000000 -0500
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCAdamWOptimizer.h	2021-06-02 05:35:10.000000000 -0400
@@ -0,0 +1,75 @@
+//
+//  MLCAdamWOptimizer.h
+//  MLCompute
+//
+//  Copyright © 2021 Apple. All rights reserved.
+//
+
+#import <MLCompute/MLCompute.h>
+
+@class MLCDevice;
+@class MLCOptimizerDescriptor;
+
+NS_ASSUME_NONNULL_BEGIN
+
+/*! @class      MLCAdamWOptimizer
+    @discussion The MLCAdamWOptimizer specifies the AdamW optimizer.
+ */
+MLCOMPUTE_CLASS_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0))
+@interface  MLCAdamWOptimizer : MLCOptimizer<NSCopying>
+
+/*! @property   beta1
+    @abstract   Coefficent used for computing running averages of gradient.
+    @discussion The default is 0.9.
+ */
+@property (readonly, nonatomic) float beta1;
+
+/*! @property   beta2
+    @abstract   Coefficent used for computing running averages of square of gradient.
+    @discussion The default is 0.999.
+ */
+@property (readonly, nonatomic) float beta2;
+
+/*! @property   epsilon
+    @abstract   A term added to improve numerical stability.
+    @discussion The default is 1e-8.
+ */
+@property (readonly, nonatomic) float epsilon;
+
+/*! @property   usesAMSGrad
+    @abstract   Whether to use the AMSGrad variant of this algorithm
+    @discussion The default is false
+ */
+@property (readonly, nonatomic) BOOL usesAMSGrad;
+
+/*! @property   timeStep
+    @abstract   The current timestep used for the update.
+    @discussion The default is 1.
+ */
+@property (readonly, nonatomic) NSUInteger timeStep;
+
+
+/*! @abstract   Create an MLCAdamWOptimizer object with defaults
+    @return     A new MLCAdamWOptimizer object.
+ */
++ (instancetype)optimizerWithDescriptor:(MLCOptimizerDescriptor *)optimizerDescriptor;
+
+/*! @abstract   Create an MLCAdamWOptimizer object
+    @param      optimizerDescriptor    The optimizer descriptor object
+    @param      beta1                  The beta1 value
+    @param      beta2                  The beta2 value
+    @param      epsilon              The epsilon value to use to improve numerical stability
+    @param      usesAMSGrad     Whether to use the AMSGrad variant of this algorithm from the paper (https://arxiv.org/abs/1904.09237)
+    @param      timeStep            The initial timestep to use for the update
+    @return     A new MLCAdamWOptimizer object.
+ */
++ (instancetype)optimizerWithDescriptor:(MLCOptimizerDescriptor *)optimizerDescriptor
+                                  beta1:(float)beta1
+                                  beta2:(float)beta2
+                                epsilon:(float)epsilon
+                            usesAMSGrad:(BOOL)usesAMSGrad
+                               timeStep:(NSUInteger)timeStep;
+
+@end
+
+NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCDevice.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCDevice.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCDevice.h	2021-03-16 13:56:18.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCDevice.h	2021-06-02 05:35:11.000000000 -0400
@@ -25,7 +25,7 @@
 @interface MLCDevice : NSObject<NSCopying>
 
 /*! @property   type
-    @abstract   The device type.
+    @abstract   The type specified when the device is created
     @discussion Recommend that developers use MLCDeviceTypeAny as the device type.
                 This will ensure that MLCompute will select the best device to execute the neural network.
                 If developers want to be able to control device selection, they can select CPU or GPU and
@@ -33,6 +33,14 @@
  */
 @property (readonly, nonatomic) MLCDeviceType type;
 
+/*! @property   actualDeviceType
+    @abstract   The specific device selected.
+    @discussion This can be CPU, GPU or ANE.  If type is MLCDeviceTypeAny, this property
+                can be used to find out the specific device type that is selected.
+ */
+@property (readonly, nonatomic) MLCDeviceType actualDeviceType
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
 @property (readonly, nonatomic) NSArray<id<MTLDevice>> *gpuDevices;
 
 /*! @abstract   Creates a device which uses the CPU.
@@ -45,6 +53,12 @@
  */
 + (instancetype _Nullable)gpuDevice;
 
+/*! @abstract   Creates a device which uses the Apple Neural Engine, if any.
+    @return     A new device, or `nil` if no ANE exists.
+ */
++ (instancetype _Nullable)aneDevice
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
 /*! @abstract   Create a MLCDevice object
     @param      type    A device type
     @return     A new device object
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLSTMDescriptor.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLSTMDescriptor.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLSTMDescriptor.h	2021-03-16 13:56:18.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLSTMDescriptor.h	2021-06-02 12:46:50.000000000 -0400
@@ -42,8 +42,7 @@
 @property (readonly, nonatomic) BOOL usesBiases;
 
 /*! @property   batchFirst
-    @abstract   if YES, the input and output will be in shape [batch, feature, 1, time steps], else it will be in shape [time steps, batch, feature].
-                Note that [time steps, batch, feature] will be the more efficient implementation. Default is YES.
+    @abstract   LSTM only supports batchFirst=YES. This means the input and output will have shape [batch size, time steps, feature]. Default is YES.
  */
 @property (readonly, nonatomic) BOOL batchFirst;
 
@@ -120,8 +119,7 @@
  @param      hiddenSize The number of features in the hidden state
  @param      layerCount Number of recurrent layers
  @param      usesBiases  If NO, the layer does not use bias weights.  Default: YES
- @param      batchFirst if YES, the input and output will be in shape [batch, feature, 1, time steps], else it will be in shape [time steps, batch, feature].
- Note that [time steps, batch, feature] will be the more efficient implementation. Default is YES.
+ @param      batchFirst LSTM only supports batchFirst=YES. This means the input and output will have shape [batch size, time steps, feature]. Default is YES.
  @param      isBidirectional  If YES, becomes a bi-directional LSTM.  Default: NO
  @param      returnsSequences if YES return output for all sequences else return output only for the last sequences. Default: YES
  @param      dropout  If non-zero, introduces a dropout layer on the outputs of each LSTM layer except the last layer
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLayer.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLayer.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLayer.h	2021-03-16 13:56:18.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCLayer.h	2021-06-02 05:35:10.000000000 -0400
@@ -48,6 +48,15 @@
  */
 + (BOOL)supportsDataType:(MLCDataType)dataType onDevice:(MLCDevice *)device;
 
+/*! @property   deviceType
+    @abstract   The device type where this layer will be executed
+    @discussion Typically the MLCDevice passed to compileWithOptions will be the device used to execute layers in the graph.
+                If MLCDeviceTypeANE is selected, it is possible that some of the layers of the graph may not be executed on the ANE
+                but instead on the CPU or GPU.  This property can be used to determine which device type the layer will be executed on.
+ */
+@property (readonly, nonatomic) MLCDeviceType deviceType
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
 /* This is a virtual base class. Make MLCLayer subclass objects instead. */
 + (instancetype)new NS_UNAVAILABLE;
 - (instancetype)init NS_UNAVAILABLE;
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionDescriptor.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionDescriptor.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionDescriptor.h	2021-03-16 13:56:18.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionDescriptor.h	2021-06-02 05:35:10.000000000 -0400
@@ -24,10 +24,10 @@
 /*! @brief model or embedding dimension */
 @property (readonly, nonatomic) NSUInteger modelDimension;
 
-/*! @brief total dimension of key space, must be divisible by number of heads. Default = modelDimension */
+/*! @brief total dimension of key space, Default = modelDimension */
 @property (readonly, nonatomic) NSUInteger keyDimension;
 
-/*! @brief total dimension of value space, must be divisible by number of heads. Default = modelDimension */
+/*! @brief total dimension of value space, Default = modelDimension */
 @property (readonly, nonatomic) NSUInteger valueDimension;
 
 /*! @brief number of parallel attention heads */
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionLayer.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionLayer.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionLayer.h	2021-03-16 13:56:18.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCMultiheadAttentionLayer.h	2021-06-02 05:35:10.000000000 -0400
@@ -22,10 +22,11 @@
 /*! @class      MLCMultiheadAttentionLayer
     @abstract   A multi-head attention layer
     @discussion A multi-head "Scaled Dot-Product Attention" layer which attends to one or more entries in the input key-value pairs
-                N=Batch, S=source length, L=target length, E = model(embedding) dimension
-                The sources to this layer are of shapes: Query:(N,L,E), Key:(N,S,E), Value:(N,S,E), KeyMask:(N,S), AttentionMask(1,L,S)
-                where KeyMask and AttentionMask are optional and none, either or both of them can be passed.
-                Output is of shape:(N,L,E). Only the case of modelDim = keyDim = valueDim is currently supported,
+                N=Batch, S=source length, L=target length, E = model(embedding) dimension, K = Key dimension, V = value
+                dimension H = headCount. The sources to this layer are of shapes: Query:(N,L,E), Key:(N,S,K), Value:(N,S,V),
+                KeyMask:(N,S), AttentionMask:(1,L,S) or (NxH,L,S). KeyMask and AttentionMask are optional and either, both
+                or none of them can be passed. KeyMask is of Boolean type and AttentionMask can be of Float or Boolean type.
+                Output is of shape:(N,L,E).
                 For details refer to: https://pytorch.org/docs/stable/nn.html#multiheadattention
  */
 MLCOMPUTE_CLASS_AVAILABLE_STARTING(macos(11.0), ios(14.0), tvos(14.0))
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizer.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizer.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizer.h	2021-03-16 13:56:17.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizer.h	2021-06-02 05:35:09.000000000 -0400
@@ -57,6 +57,25 @@
  */
 @property (readonly, nonatomic) MLCRegularizationType regularizationType;
 
+/*! @property   gradientClippingType
+    @abstract   The type of clipping applied to gradient
+ */
+@property (readonly, nonatomic) MLCGradientClippingType gradientClippingType
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
+/*! @property   maximumClippingNorm
+    @abstract   The maximum clipping value
+ */
+@property (readonly, nonatomic) float maximumClippingNorm
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
+/*! @property   customGlobalNorm
+    @abstract   Used only with MLCGradientClippingTypeByGlobalNorm. If non zero, this norm will be used in place of global norm.
+ */
+@property (readonly, nonatomic) float customGlobalNorm
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
+
 + (instancetype)new NS_UNAVAILABLE;
 - (instancetype)init NS_UNAVAILABLE;
 
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizerDescriptor.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizerDescriptor.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizerDescriptor.h	2021-03-16 13:56:17.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCOptimizerDescriptor.h	2021-06-02 11:06:22.000000000 -0400
@@ -19,7 +19,7 @@
 @interface  MLCOptimizerDescriptor : NSObject <NSCopying>
 
 /*! @property   learningRate
-    @abstract   The learning rate.  This property is 'readwrite' so that callers can implement a 'decay' during training
+    @abstract   The learning rate
  */
 @property (readonly, nonatomic) float learningRate;
 
@@ -54,6 +54,24 @@
  */
 @property (readonly, nonatomic) MLCRegularizationType regularizationType;
 
+/*! @property   gradientClippingType
+    @abstract   The type of clipping applied to gradient
+ */
+@property (readonly, nonatomic) MLCGradientClippingType gradientClippingType
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
+/*! @property   maximumClippingNorm
+    @abstract   The maximum clipping value
+ */
+@property (readonly, nonatomic) float maximumClippingNorm
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
+/*! @property   customGlobalNorm
+    @abstract   Used only with MLCGradientClippingTypeByGlobalNorm. If non zero, this norm will be used in place of global norm.
+ */
+@property (readonly, nonatomic) float customGlobalNorm
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
 /*! @abstract   Create a MLCOptimizerDescriptor object
     @param      learningRate                    The learning rate
     @param      gradientRescale              The gradient rescale value
@@ -84,6 +102,31 @@
                         regularizationType:(MLCRegularizationType)regularizationType
                        regularizationScale:(float)regularizationScale;
 
+/*! @abstract   Create an MLCOptimizerDescriptor object
+    @param      learningRate                            The learning rate
+    @param      gradientRescale                      The gradient rescale value
+    @param      appliesGradientClipping    Whether to apply gradient clipping
+    @param      gradientClippingType           The type of clipping applied to gradients
+    @param      gradientClipMax                      The maximum gradient value to be used with gradient clipping
+    @param      gradientClipMin                      The minimum gradient value to be used with gradient clipping
+    @param      maximumClippingNorm             The maximum norm to be used with gradient clipping
+    @param      customGlobalNorm                    If non-zero, the norm to be used instead of calculating the global norm
+    @param      regularizationType               The regularization type
+    @param      regularizationScale             The regularization scale
+    @return     A new MLCOptimizerDescriptor object.
+ */
++ (instancetype)descriptorWithLearningRate:(float)learningRate
+                           gradientRescale:(float)gradientRescale
+                   appliesGradientClipping:(BOOL)appliesGradientClipping
+                      gradientClippingType:(MLCGradientClippingType)gradientClippingType
+                           gradientClipMax:(float)gradientClipMax
+                           gradientClipMin:(float)gradientClipMin
+                       maximumClippingNorm:(float)maximumClippingNorm
+                          customGlobalNorm:(float)customGlobalNorm
+                        regularizationType:(MLCRegularizationType)regularizationType
+                       regularizationScale:(float)regularizationScale
+MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
+
 @end
 
 
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCPlatform.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCPlatform.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCPlatform.h	1969-12-31 19:00:00.000000000 -0500
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCPlatform.h	2021-06-02 05:35:11.000000000 -0400
@@ -0,0 +1,35 @@
+//
+//  MLCPlatform.h
+//  MLCompute
+//
+//  Copyright © 2021 Apple. All rights reserved.
+//
+
+#import <Foundation/Foundation.h>
+#import <MLCompute/MLCDefines.h>
+
+/*!
+ @class     MLCPlatform
+ @abstract  Utility class to set MLCompute global properties
+ */
+NS_ASSUME_NONNULL_BEGIN
+
+MLCOMPUTE_CLASS_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0))
+@interface MLCPlatform : NSObject
+
+/*!
+@method    setRNGSeedTo
+@abstract  sets the RNG seed. The seed should be of type long int.
+*/
++ (void)setRNGSeedTo:(NSNumber *)seed;
+
+/*!
+@method    getRNGseed
+@abstract  gets the RNG seed value. If the value is not set it would return nil
+*/
++ (NSNumber * _Nullable)getRNGseed;
+
+@end
+
+NS_ASSUME_NONNULL_END
+
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCSelectionLayer.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCSelectionLayer.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCSelectionLayer.h	2021-03-16 13:56:18.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCSelectionLayer.h	2021-06-02 05:35:10.000000000 -0400
@@ -13,6 +13,7 @@
 /*! @abstract   Selection layer is used to select elements from two tensors
  *  @discussion The selection layer takes a condition tensor which acts as a mask that chooses whether the corresponding element / row
  *              in the output should be taken from tensor x (if the element in condition is true) or tensor y (if it is false).
+ *              The order of source tensors of the layer must be condition tensor, tensor x, and tensor y.
  */
 MLCOMPUTE_CLASS_AVAILABLE_STARTING(macos(11.3), ios(14.5), tvos(14.5))
 @interface MLCSelectionLayer : MLCLayer
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTensor.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTensor.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTensor.h	2021-03-16 09:53:53.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTensor.h	2021-06-02 12:46:48.000000000 -0400
@@ -125,6 +125,19 @@
     NS_REFINED_FOR_SWIFT;
 
 /*! @abstract   Create a MLCTensor object
+    @discussion Create a tensor object initialized with a random initializer such as Glorot Uniform.
+                The tensor data type is MLCDataTypeFloat32
+    @param      shape                                       The tensor shape
+    @param      randomInitializerType   The random initializer type
+    @param      dataType                    The tensor data type
+    @return     A new MLCTensor object
+ */
++ (instancetype)tensorWithShape:(NSArray<NSNumber *> *)shape
+          randomInitializerType:(MLCRandomInitializerType)randomInitializerType
+                       dataType:(MLCDataType)dataType
+    NS_REFINED_FOR_SWIFT;
+
+/*! @abstract   Create a MLCTensor object
     @discussion Create a tensor object without any data
     @param      shape                           The tensor shape
     @param      dataType                    The tensor data type
@@ -383,6 +396,63 @@
 - (BOOL)bindOptimizerData:(NSArray<MLCTensorData *> *)data
                deviceData:(NSArray<MLCTensorOptimizerDeviceData *> * _Nullable)deviceData;
 
+
+/*! @abstract   Converts a 32-bit floating-point tensor with given scale and a zero point
+                Returns a quantized tensor
+    @param      type  The quantized data type.  Must be MLCDataTypeInt8, MLCDataTypeUInt8 or MLCDataTypeInt32
+    @param      scale  The scale to apply in quantization
+    @param      bias The offset value that maps to float zero
+    @return     A quantized tensor
+*/
+- (MLCTensor * _Nullable)tensorByQuantizingToType:(MLCDataType)type
+                                            scale:(float)scale
+                                             bias:(NSInteger)bias
+    NS_SWIFT_NAME(quantized(to:scale:bias:))
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15), tvos(15));
+
+/*! @abstract   Converts a 32-bit floating-point tensor with given scale and a zero point
+                Returns a quantized tensor
+    @param      type  The quantized data type.  Must be MLCDataTypeInt8, MLCDataTypeUInt8 or MLCDataTypeInt32
+    @param      scale  The scale to apply in quantization
+    @param      bias The offset value that maps to float zero
+    @param      axis The dimension on which to apply per-channel quantization
+    @return     A quantized tensor
+*/
+- (MLCTensor * _Nullable)tensorByQuantizingToType:(MLCDataType)type
+                                            scale:(MLCTensor *)scale
+                                             bias:(MLCTensor *)bias
+                                             axis:(NSInteger)axis
+    NS_SWIFT_NAME(quantized(to:scale:bias:axis:))
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15), tvos(15));
+
+/*! @abstract   Converts a quantized tensor to a 32-bit floating-point tensor
+                Returns a de-quantized tensor
+    @param      type  The de-quantized data type.  Must be MLCFloat32
+    @param      scale  The scale thst was used for the quantized data
+    @param      bias The offset value that maps to float zero used for the quantized data
+    @return     A quantized tensor
+*/
+- (MLCTensor * _Nullable)tensorByDequantizingToType:(MLCDataType)type
+                                              scale:(MLCTensor *)scale
+                                               bias:(MLCTensor *)bias
+    NS_SWIFT_NAME(dequantized(to:scale:zeroPoint:))
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15), tvos(15));
+
+/*! @abstract   Converts a quantized tensor to a 32-bit floating-point tensor
+                Returns a de-quantized tensor
+    @param      type  The de-quantized data type.  Must be MLCFloat32
+    @param      scale  The scale thst was used for the quantized data
+    @param      bias The offset value that maps to float zero used for the quantized data
+    @param      axis The dimension on which to apply per-channel quantization
+    @return     A quantized tensor
+*/
+- (MLCTensor * _Nullable)tensorByDequantizingToType:(MLCDataType)type
+                                              scale:(MLCTensor *)scale
+                                               bias:(MLCTensor *)bias
+                                               axis:(NSInteger)axis
+    NS_SWIFT_NAME(dequantized(to:scale:bias:axis:))
+    MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15), tvos(15));
+
 @end
 
 NS_ASSUME_NONNULL_END
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTypes.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTypes.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTypes.h	2021-03-16 13:56:18.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCTypes.h	2021-06-02 12:46:50.000000000 -0400
@@ -35,6 +35,9 @@
     /*! The 32-bit floating-point data type.
      */
     MLCDataTypeFloat32 = 1,
+    /*! The 16-bit floating-point data type.
+     */
+    MLCDataTypeFloat16 MLCOMPUTE_ENUM_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0)) = 3,
     /*! The Boolean data type.
      */
     MLCDataTypeBoolean = 4,
@@ -45,6 +48,13 @@
      */
     MLCDataTypeInt32   = 7,
 
+    /*! The 8-bit integer data type.
+     */
+    MLCDataTypeInt8 MLCOMPUTE_ENUM_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0)) = 8,
+    /*! The 8-bit unsigned integer data type.
+     */
+    MLCDataTypeUInt8 MLCOMPUTE_ENUM_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0)) NS_SWIFT_NAME(uint8) = 9,
+
     // Must be last
     MLCDataTypeCount   NS_SWIFT_UNAVAILABLE(""), // holds the number of MLCDataTypes
 };
@@ -76,16 +86,22 @@
  */
 MLCOMPUTE_ENUM_AVAILABLE_STARTING(macos(11.0), ios(14.0), tvos(14.0))
 typedef NS_ENUM(int32_t, MLCDeviceType) {
-    /*! The CPU device type.
+    /*! The CPU device
      */
     MLCDeviceTypeCPU NS_SWIFT_NAME(cpu) = 0,
-    /*! The GPU device type.  When selected, the framework will use a GPU.
+    /*! The GPU device.
      */
     MLCDeviceTypeGPU NS_SWIFT_NAME(gpu) = 1,
     /*! The any device type.  When selected, the framework will automatically use the appropriate devices to achieve the best
      *  performance.
      */
     MLCDeviceTypeAny = 2,
+    /*! The  Apple Neural Engine device.  When selected, the framework will use the  Neural Engine to execute all layers that can be executed on it.
+     *  Layers that cannot be executed on the ANE will run on the CPU or GPU.   The Neural Engine device must be explicitly selected.  MLDeviceTypeAny
+     *  will not select the Neural Engine device.  In addition, this device can be used with inference graphs only.  This device cannot be used with a
+     *  training graph or an inference graph that shares layers with a training graph.
+     */
+    MLCDeviceTypeANE MLCOMPUTE_ENUM_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0)) NS_SWIFT_NAME(ane) = 3,
 
     // Must be last
     MLCDeviceTypeCount  NS_SWIFT_UNAVAILABLE(""), // holds the number of MLCDeviceType
@@ -154,7 +170,7 @@
 
     /*! The option to return profiling information in the callback before returning from execution.
      *
-     *  @discussion Inlcude this option to return profliling information in the graph execute completion handler callback, including
+     *  @discussion Include this option to return profliling information in the graph execute completion handler callback, including
      *      device execution time.
      */
     MLCExecutionOptionsProfiling = 0x04,
@@ -168,7 +184,15 @@
      *      If you include this option and execute a training graph using one of the executeForward methods, such as
      *      \p -executeForwardWithBatchSize:options:completionHandler:), the framework executes the forward pass for inference only.
      */
-    MLCExecutionOptionsForwardForInference = 0x08
+    MLCExecutionOptionsForwardForInference = 0x08,
+
+    /*! The option to enable additional per layer profiling information currently emitted using signposts.
+     *
+     *  @discussion The option to enable per layer profiling information emitted as signposts. The per layer information
+     *      can be visualized using the Logging Instrument in Xcode's Instruments. This information may not be available for all MLCDevice.
+     */
+    MLCExecutionOptionsPerLayerProfiling MLCOMPUTE_ENUM_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0)) = 0x10,
+
 };
 
 #pragma mark - Layers
@@ -670,6 +694,16 @@
     MLCComparisonOperationCount          NS_SWIFT_UNAVAILABLE(""),
 };
 
+/*!
+ *  @enum       MLCGradientClippingType
+ *  @abstract   The type of clipping applied to gradient
+ */
+MLCOMPUTE_ENUM_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0))
+typedef NS_ENUM(int32_t, MLCGradientClippingType) {
+    MLCGradientClippingTypeByValue      = 0,
+    MLCGradientClippingTypeByNorm       = 1,
+    MLCGradientClippingTypeByGlobalNorm = 2
+};
 
 NS_ASSUME_NONNULL_BEGIN
 /*! @abstract Returns a textual description of the activation type, suitable for debugging.
@@ -706,10 +740,14 @@
  */
 NSString *MLCLSTMResultModeDebugDescription(MLCLSTMResultMode mode)
     NS_SWIFT_NAME(getter:MLCLSTMResultMode.debugDescription(self:));
-/*! @abstract Returns a textual description of the compare operation, suitable for debugging.
+/*! @abstract Returns a textual description of the comparison operation, suitable for debugging.
  */
 NSString *MLCComparisonOperationDebugDescription(MLCComparisonOperation operation)
     NS_SWIFT_NAME(getter:MLCComparisonOperation.debugDescription(self:)) MLCOMPUTE_AVAILABLE_STARTING(macos(11.3), ios(14.5), tvos(14.5));
+/*! @abstract Returns a textual description of the gradient clipping type, suitable for debugging.
+ */
+NSString *MLCGradientClippingTypeDebugDescription(MLCGradientClippingType gradientClippingType)
+    NS_SWIFT_NAME(getter:MLCGradientClippingType.debugDescription(self:)) MLCOMPUTE_AVAILABLE_STARTING(macos(12.0), ios(15.0), tvos(15.0));
 
 NS_ASSUME_NONNULL_END
 
diff -ruN /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCompute.h /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCompute.h
--- /Applications/Xcode_12.5.0.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCompute.h	2021-03-16 13:52:11.000000000 -0400
+++ /Applications/Xcode_13.0.0-beta.app/Contents/Developer/Platforms/AppleTVOS.platform/Developer/SDKs/AppleTVOS.sdk/System/Library/Frameworks/MLCompute.framework/Headers/MLCompute.h	2021-05-22 06:33:54.000000000 -0400
@@ -18,6 +18,7 @@
 #import <MLCompute/MLCGraph.h>
 #import <MLCompute/MLCTrainingGraph.h>
 #import <MLCompute/MLCInferenceGraph.h>
+#import <MLCompute/MLCPlatform.h>
 
 // Descriptors
 #import <MLCompute/MLCActivationDescriptor.h>
@@ -69,4 +70,5 @@
 #import <MLCompute/MLCOptimizer.h>
 #import <MLCompute/MLCSGDOptimizer.h>
 #import <MLCompute/MLCAdamOptimizer.h>
+#import <MLCompute/MLCAdamWOptimizer.h>
 #import <MLCompute/MLCRMSPropOptimizer.h>
Clone this wiki locally