Skip to content

Commit

Permalink
Fix failing large note imports
Browse files Browse the repository at this point in the history
When importing to Vaultwarden (or Bitwarden) notes larger then 10_000
encrypted characters are invalid. This because it for one isn't
compatible with Bitwarden. And some clients tend to break on very large
notes.

We already added a check for this limit when adding a single cipher, but
this caused issues during import, and could cause a partial imported
vault. Bitwarden does some validations before actually running it
through the import process and generates a special error message which
helps the user indicate which items are invalid during the import.

This PR adds that validation check and returns the same kind of error.
Fixes #3048
  • Loading branch information
BlackDex committed Jan 9, 2023
1 parent 988d249 commit 6be26f0
Show file tree
Hide file tree
Showing 4 changed files with 42 additions and 3 deletions.
8 changes: 7 additions & 1 deletion src/api/core/ciphers.rs
Expand Up @@ -205,7 +205,7 @@ pub struct CipherData {
*/
pub Type: i32,
pub Name: String,
Notes: Option<String>,
pub Notes: Option<String>,
Fields: Option<Value>,

// Only one of these should exist, depending on type
Expand Down Expand Up @@ -542,6 +542,12 @@ async fn post_ciphers_import(

let data: ImportData = data.into_inner().data;

// Validate the import before continuing
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
Cipher::validate_notes(&data.Ciphers)?;

// Read and create the folders
let mut folders: Vec<_> = Vec::new();
for folder in data.Folders.into_iter() {
Expand Down
2 changes: 1 addition & 1 deletion src/api/core/mod.rs
Expand Up @@ -7,7 +7,7 @@ mod organizations;
mod sends;
pub mod two_factor;

pub use ciphers::{purge_trashed_ciphers, CipherSyncData, CipherSyncType};
pub use ciphers::{purge_trashed_ciphers, CipherData, CipherSyncData, CipherSyncType};
pub use emergency_access::{emergency_notification_reminder_job, emergency_request_timeout_job};
pub use events::{event_cleanup_job, log_event, log_user_event};
pub use sends::purge_sends;
Expand Down
6 changes: 6 additions & 0 deletions src/api/core/organizations.rs
Expand Up @@ -1378,6 +1378,12 @@ async fn post_org_import(
let data: ImportData = data.into_inner().data;
let org_id = query.organization_id;

// Validate the import before continuing
// Bitwarden does not process the import if there is one item invalid.
// Since we check for the size of the encrypted note length, we need to do that here to pre-validate it.
// TODO: See if we can optimize the whole cipher adding/importing and prevent duplicate code and checks.
Cipher::validate_notes(&data.Ciphers)?;

let mut collections = Vec::new();
for coll in data.Collections {
let collection = Collection::new(org_id.clone(), coll.Name);
Expand Down
29 changes: 28 additions & 1 deletion src/db/models/cipher.rs
Expand Up @@ -6,7 +6,7 @@ use super::{
Attachment, CollectionCipher, Favorite, FolderCipher, Group, User, UserOrgStatus, UserOrgType, UserOrganization,
};

use crate::api::core::CipherSyncData;
use crate::api::core::{CipherData, CipherSyncData};

use std::borrow::Cow;

Expand Down Expand Up @@ -73,6 +73,33 @@ impl Cipher {
reprompt: None,
}
}

pub fn validate_notes(cipher_data: &[CipherData]) -> EmptyResult {
let mut validation_errors = serde_json::Map::new();
for (index, cipher) in cipher_data.iter().enumerate() {
if let Some(note) = &cipher.Notes {
if note.len() > 10_000 {
validation_errors.insert(
format!("Ciphers[{index}].Notes"),
serde_json::to_value([
"The field Notes exceeds the maximum encrypted value length of 10000 characters.",
])
.unwrap(),
);
}
}
}
if !validation_errors.is_empty() {
let err_json = json!({
"message": "The model state is invalid.",
"validationErrors" : validation_errors,
"object": "error"
});
err_json!(err_json, "Import validation errors")
} else {
Ok(())
}
}
}

use crate::db::DbConn;
Expand Down

0 comments on commit 6be26f0

Please sign in to comment.