Skip to content

Commit

Permalink
Add simple docs
Browse files Browse the repository at this point in the history
  • Loading branch information
EricLBuehler committed Sep 11, 2023
1 parent e0cf62a commit 9db450c
Show file tree
Hide file tree
Showing 8 changed files with 10 additions and 0 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ candle-lora is able to convert:
- `Conv2d` -> `LoraConv2d`
- `Embedding` -> `LoraEmbedding`

All conversions are done as implemented in HuggingFace's official LoRA implementation.

Current working example:
```rust
use std::{collections::HashMap, hash::Hash};
Expand Down
2 changes: 2 additions & 0 deletions src/frozenconv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ use candle_nn::{Conv1dConfig, Conv2dConfig};

use crate::{Conv1dLayerLike, Conv2dLayerLike};

/// Conv1d, but with a `new` implementation that ensures the weights are detached (frozen).
#[derive(Debug)]
pub(crate) struct FrozenConv1d {
weight: Tensor,
Expand Down Expand Up @@ -67,6 +68,7 @@ impl Conv1dLayerLike for FrozenConv1d {
}
}

/// Conv2d, but with a `new` implementation that ensures the weights are detached (frozen).
#[derive(Debug)]
pub(crate) struct FrozenConv2d {
weight: Tensor,
Expand Down
1 change: 1 addition & 0 deletions src/frozenembed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use candle_core::{Result, Tensor};

use crate::EmbeddingLayerLike;

/// Embedding, but with a `new` implementation that ensures the embeddings are detached (frozen).
#[derive(Debug)]
pub(crate) struct FrozenEmbedding {
embeddings: Tensor,
Expand Down
1 change: 1 addition & 0 deletions src/frozenlinear.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use candle_core::{Module, Result, Shape, Tensor};

use crate::LinearLayerLike;

/// Linear, but with a `new` implementation that ensures the weight and/or biases are detached (frozen).
#[derive(Debug, Clone)]
pub(crate) struct FrozenLinear {
weight: Tensor,
Expand Down
1 change: 1 addition & 0 deletions src/loraconv1d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ pub struct LoraConv1d {
dropout: Option<Dropout>,
}

/// Configuration for LoraConv1d. Other configurations are inherited from the `Conv1d` struct.
pub struct LoraConv1dConfig<'a> {
pub rank: usize,
pub alpha: f64,
Expand Down
1 change: 1 addition & 0 deletions src/loraconv2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ pub struct LoraConv2d {
dropout: Option<Dropout>,
}

/// Configuration for LoraConv2d. Other configurations are inherited from the `Conv2d` struct.
pub struct LoraConv2dConfig<'a> {
pub rank: usize,
pub alpha: f64,
Expand Down
1 change: 1 addition & 0 deletions src/loraembed.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ pub struct LoraEmbedding {
scale: Option<f64>,
}

/// Configuration for LoraEmbedding, with `num_embeddings` vectors of `embedding_dim` size`.
pub struct LoraEmbeddingConfig<'a> {
pub rank: usize,
pub alpha: f64,
Expand Down
1 change: 1 addition & 0 deletions src/loralinear.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ pub struct LoraLinear {
dropout: Option<Dropout>,
}

/// Configuration for LoraLinear
pub struct LoraLinearConfig<'a> {
pub rank: usize,
pub alpha: f64,
Expand Down

0 comments on commit 9db450c

Please sign in to comment.