Skip to content
This repository has been archived by the owner on Aug 3, 2023. It is now read-only.

Commit

Permalink
Merge pull request #1145 from cloudflare/avery/shrink-asset-manifest
Browse files Browse the repository at this point in the history
Asset Manifest QOL improvements
  • Loading branch information
EverlastingBugstopper committed Mar 18, 2020
2 parents c9c760b + 590cdf7 commit cc847a0
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 50 deletions.
42 changes: 9 additions & 33 deletions src/commands/kv/bucket/mod.rs
Expand Up @@ -16,6 +16,7 @@ use data_encoding::HEXLOWER;
use failure::format_err;
use ignore::overrides::{Override, OverrideBuilder};
use ignore::{Walk, WalkBuilder};
use indicatif::{ProgressBar, ProgressStyle};
use sha2::{Digest, Sha256};

use cloudflare::endpoints::workerskv::write_bulk::KeyValuePair;
Expand All @@ -31,22 +32,22 @@ pub const VALUE_MAX_SIZE: u64 = 10 * 1024 * 1024;
pub fn directory_keys_values(
target: &Target,
directory: &Path,
verbose: bool,
) -> Result<(Vec<KeyValuePair>, AssetManifest), failure::Error> {
match &fs::metadata(directory) {
Ok(file_type) if file_type.is_dir() => {
let mut upload_vec: Vec<KeyValuePair> = Vec::new();
let mut asset_manifest: AssetManifest = AssetManifest::new();
let mut asset_manifest = AssetManifest::new();

let dir_walker = get_dir_iterator(target, directory)?;

let spinner_style =
ProgressStyle::default_spinner().template("{spinner} Preparing {msg}...");
let spinner = ProgressBar::new_spinner().with_style(spinner_style);
for entry in dir_walker {
spinner.tick();
let entry = entry.unwrap();
let path = entry.path();
if path.is_file() {
if verbose {
message::working(&format!("Preparing {}", path.display()));
}
spinner.set_message(&format!("{}", path.display()));

validate_file_size(&path)?;

Expand Down Expand Up @@ -82,31 +83,6 @@ pub fn directory_keys_values(
}
}

// Returns only the hashed keys for a directory's files.
fn directory_keys_only(target: &Target, directory: &Path) -> Result<Vec<String>, failure::Error> {
let mut key_vec: Vec<String> = Vec::new();

let dir_walker = get_dir_iterator(target, directory)?;

for entry in dir_walker {
let entry = entry.unwrap();
let path = entry.path();
if path.is_file() {
let value = std::fs::read(path)?;

// Need to base64 encode value
let b64_value = base64::encode(&value);

let (_, key) = generate_path_and_key(path, directory, Some(b64_value))?;

validate_key_size(&key)?;

key_vec.push(key);
}
}
Ok(key_vec)
}

// Ensure that all files in upload directory do not exceed the MAX_VALUE_SIZE (this ensures that
// no partial uploads happen). I don't like this functionality (and the similar key length checking
// logic in validate_key_size()) because it duplicates the size checking the API already does--but
Expand Down Expand Up @@ -238,7 +214,7 @@ fn get_digest(value: String) -> Result<String, failure::Error> {
let mut hasher = Sha256::new();
hasher.input(value);
let digest = hasher.result();
let hex_digest = HEXLOWER.encode(digest.as_ref());
let hex_digest = HEXLOWER.encode(digest.as_ref())[0..10].to_string();
Ok(hex_digest)
}

Expand Down Expand Up @@ -552,7 +528,7 @@ mod tests {
let (path, key) = generate_path_and_key(path, directory, value).unwrap();

let expected_path = "path/to/asset.ext".to_string();
let expected_key_regex = Regex::new(r"^path/to/asset\.[0-9a-f]{64}\.ext").unwrap();
let expected_key_regex = Regex::new(r"^path/to/asset\.[0-9a-f]{10}\.ext").unwrap();

assert_eq!(path, expected_path);
assert!(expected_key_regex.is_match(&key));
Expand Down
18 changes: 6 additions & 12 deletions src/commands/kv/bucket/sync.rs
@@ -1,12 +1,9 @@
use std::collections::HashSet;
use std::fs::metadata;
use std::iter::FromIterator;
use std::path::Path;

use cloudflare::endpoints::workerskv::write_bulk::KeyValuePair;

use crate::commands::kv;
use crate::commands::kv::bucket::directory_keys_only;
use crate::commands::kv::bucket::directory_keys_values;
use crate::commands::kv::key::KeyList;
use crate::settings::global_user::GlobalUser;
Expand All @@ -20,7 +17,6 @@ pub fn sync(
user: &GlobalUser,
namespace_id: &str,
path: &Path,
verbose: bool,
) -> Result<(Vec<KeyValuePair>, Vec<String>, AssetManifest), failure::Error> {
kv::validate_target(target)?;
// First, find all changed files in given local directory (aka files that are now stale
Expand All @@ -42,18 +38,16 @@ pub fn sync(
}

let (pairs, asset_manifest): (Vec<KeyValuePair>, AssetManifest) =
directory_keys_values(target, path, verbose)?;
directory_keys_values(target, path)?;

let to_upload = filter_files(pairs, &remote_keys);
let to_upload = filter_files(pairs.clone(), &remote_keys);

// Now delete files from Workers KV that exist in remote but no longer exist locally.
// Get local keys
let local_keys_vec: Vec<String> = match &metadata(path) {
Ok(file_type) if file_type.is_dir() => directory_keys_only(target, path),
Ok(_) => failure::bail!("{} should be a directory", path.display()),
Err(e) => failure::bail!("{}", e),
}?;
let local_keys: HashSet<_> = HashSet::from_iter(local_keys_vec.into_iter());
let mut local_keys: HashSet<_> = HashSet::new();
for pair in pairs.iter() {
local_keys.insert(pair.key.clone());
}

// Find keys that are present in remote but not present in local, and
// stage them for deletion.
Expand Down
2 changes: 1 addition & 1 deletion src/commands/preview/upload.rs
Expand Up @@ -65,7 +65,7 @@ pub fn upload(

let path = Path::new(&site_config.bucket);
let (to_upload, to_delete, asset_manifest) =
sync(target, user, &site_namespace.id, path, verbose)?;
sync(target, user, &site_namespace.id, path)?;

// First, upload all existing files in given directory
if verbose {
Expand Down
6 changes: 2 additions & 4 deletions src/commands/publish.rs
Expand Up @@ -32,8 +32,7 @@ pub fn publish(

let site_namespace = add_site_namespace(user, target, false)?;

let (to_upload, to_delete, asset_manifest) =
sync(target, user, &site_namespace.id, &path, verbose)?;
let (to_upload, to_delete, asset_manifest) = sync(target, user, &site_namespace.id, &path)?;

// First, upload all existing files in bucket directory
if verbose {
Expand Down Expand Up @@ -179,8 +178,7 @@ pub fn sync_non_site_buckets(
if let Some(path) = &namespace.bucket {
is_using_non_site_bucket = true;
validate_bucket_location(path)?;
let (to_upload, to_delete, _) =
kv::bucket::sync(target, user, &namespace.id, path, verbose)?;
let (to_upload, to_delete, _) = kv::bucket::sync(target, user, &namespace.id, path)?;
// First, upload all existing files in bucket directory
if verbose {
message::info("Preparing to upload updated files...");
Expand Down

0 comments on commit cc847a0

Please sign in to comment.