Skip to content

Andibeethoven/MicronisedSwift

Folders and files

NameName
Last commit message
Last commit date

Latest commit

 

History

2 Commits
 
 
 
 

Repository files navigation

import Foundation import Accelerate // For efficient matrix operations

// === Token Embedding Function === func tokenizeAndEmbed(tokens: [Double], weightMatrix: [[Double]], bias: [Double]) -> [Double] { // Matrix multiplication: W * tokens + b let embedded = zip(weightMatrix, bias).map { (weights, b) -> Double in return zip(weights, tokens).map(*).reduce(0, +) + b } return embedded }

// === Attention Mechanism === func scaledDotProductAttention(queries: [[Double]], keys: [[Double]], values: [[Double]]) -> [[Double]] { // Compute Q * K^T (dot product) let scores = queries.map { q in keys.map { k in zip(q, k).map(*).reduce(0, +) } }

// Scale scores by sqrt(d_k)
let scalingFactor = sqrt(Double(keys[0].count))
let scaledScores = scores.map { $0.map { $0 / scalingFactor } }

// Softmax normalization
let attentionWeights = scaledScores.map { row in
    let expRow = row.map { exp($0) }
    let sumExp = expRow.reduce(0, +)
    return expRow.map { $0 / sumExp }
}

// Multiply attention weights by values
let output = attentionWeights.map { weights in
    values.reduce(Array(repeating: 0.0, count: values[0].count)) { (result, value) in
        zip(result, zip(weights, value).map(*)).map(+)
    }
}

return output

}

// === ReLU Activation === func relu(_ input: [Double]) -> [Double] { return input.map { max(0, $0) } }

// === Gradient Descent === func gradientUpdate(weightMatrix: inout [[Double]], input: [Double], lossGradient: [Double], learningRate: Double) { for i in 0..<weightMatrix.count { for j in 0..<weightMatrix[i].count { weightMatrix[i][j] -= learningRate * lossGradient[i] * input[j] } } }

// === Main Workflow === func micronizedAIWorkflow() { // Example inputs let tokens: [Double] = [0.5, 1.2, 0.8] var weightMatrix: [[Double]] = [ [0.2, 0.8, 0.4], [0.5, 0.3, 0.9], [0.7, 0.1, 0.6] ] let bias: [Double] = [0.1, 0.2, 0.3]

// Token Embedding
let embeddings = tokenizeAndEmbed(tokens: tokens, weightMatrix: weightMatrix, bias: bias)
print("Token Embeddings:", embeddings)

// Attention Mechanism
let queries: [[Double]] = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]]
let keys: [[Double]] = [[0.3, 0.2, 0.1], [0.6, 0.5, 0.4], [0.9, 0.8, 0.7]]
let values: [[Double]] = [[0.4, 0.5, 0.6], [0.7, 0.8, 0.9], [1.0, 1.1, 1.2]]

let attentionOutput = scaledDotProductAttention(queries: queries, keys: keys, values: values)
print("Attention Output:", attentionOutput)

// Forward Propagation
let layerOutput = relu(embeddings)
print("Layer Output (ReLU):", layerOutput)

// Gradient Descent
let lossGradient: [Double] = [0.1, 0.2, 0.3]
gradientUpdate(weightMatrix: &weightMatrix, input: tokens, lossGradient: lossGradient, learningRate: 0.01)
print("Updated Weights:", weightMatrix)

}

// Run the Workflow micronizedAIWorkflow()

About

Open Sorce Micronised Swift

Resources

License

Stars

Watchers

Forks

Releases

No releases published

Packages

No packages published