Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions src-tauri/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 3 additions & 0 deletions src-tauri/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ rayon = "1.7.0"
dirs = "5.0.1"
notify = "6.0.1"
tokio = { version = "1.28.2", features = ["full"] }
serde_bencode = "0.2.3"
zstd = "0.12.3"
lazy_static = "1.4.0"

[features]
# this feature is used for production builds or when `devPath` points to the filesystem
Expand Down
2 changes: 1 addition & 1 deletion src-tauri/build.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
fn main() {
tauri_build::build()
tauri_build::build()
}
80 changes: 55 additions & 25 deletions src-tauri/src/filesystem/cache.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,22 @@
use crate::filesystem::{DIRECTORY, FILE};
use crate::{AppState, CachedPath, StateSafe, VolumeCache};
use std::{fs};
use std::io::Write;
use lazy_static::lazy_static;
use notify::event::{CreateKind, ModifyKind, RenameMode};
use notify::Event;
use std::fs::{self, File};
use std::io::{BufReader, Write};
use std::path::{Path, PathBuf};
use std::sync::{Arc, MutexGuard};
use std::time::Duration;
use notify::{Event};
use notify::event::{CreateKind, ModifyKind, RenameMode};
use tokio::time;
use crate::filesystem::{DIRECTORY, FILE};

pub const CACHE_FILE_PATH: &str = "./system_cache.json";
lazy_static! {
pub static ref CACHE_FILE_PATH: String = {
let mut cache_path = dirs::cache_dir().expect("Failed to get base cache path");
cache_path.push(format!("{}.cache.bin", env!("CARGO_PKG_NAME")));
cache_path.to_string_lossy().to_string()
};
}

/// Handles filesystem events, currently intended for cache invalidation.
pub struct FsEventHandler {
Expand All @@ -19,15 +26,22 @@ pub struct FsEventHandler {

impl FsEventHandler {
pub fn new(state_mux: StateSafe, mountpoint: PathBuf) -> Self {
Self { state_mux, mountpoint }
Self {
state_mux,
mountpoint,
}
}

/// Gets the current volume from the cache
fn get_from_cache<'a>(&self, state: &'a mut AppState) -> &'a mut VolumeCache {
let mountpoint = self.mountpoint.to_string_lossy().to_string();

state.system_cache.get_mut(&mountpoint)
.unwrap_or_else(|| panic!("Failed to find mountpoint '{:?}' in cache.", self.mountpoint))
state.system_cache.get_mut(&mountpoint).unwrap_or_else(|| {
panic!(
"Failed to find mountpoint '{:?}' in cache.",
self.mountpoint
)
})
}

pub fn handle_create(&self, kind: CreateKind, path: &Path) {
Expand All @@ -39,10 +53,14 @@ impl FsEventHandler {
CreateKind::File => FILE,
CreateKind::Folder => DIRECTORY,
_ => return, // Other options are weird lol
}.to_string();
}
.to_string();

let file_path = path.to_string_lossy().to_string();
current_volume.entry(filename).or_insert(vec![CachedPath{file_path, file_type}]);
current_volume.entry(filename).or_insert(vec![CachedPath {
file_path,
file_type,
}]);
}

pub fn handle_delete(&self, path: &Path) {
Expand All @@ -58,7 +76,7 @@ impl FsEventHandler {
let state = &mut self.state_mux.lock().unwrap();
let current_volume = self.get_from_cache(state);

let old_path_string= old_path.to_string_lossy().to_string();
let old_path_string = old_path.to_string_lossy().to_string();
let old_filename = old_path.file_name().unwrap().to_string_lossy().to_string();

let empty_vec = &mut Vec::new();
Expand All @@ -82,7 +100,10 @@ impl FsEventHandler {
let file_type = if new_path.is_dir() { DIRECTORY } else { FILE };

let path_string = new_path.to_string_lossy().to_string();
current_volume.entry(filename).or_insert(vec![CachedPath{file_path: path_string, file_type: String::from(file_type)}]);
current_volume.entry(filename).or_insert(vec![CachedPath {
file_path: path_string,
file_type: String::from(file_type),
}]);
}

pub fn handle_event(&mut self, event: Event) {
Expand All @@ -95,7 +116,7 @@ impl FsEventHandler {
} else if modify_kind == ModifyKind::Name(RenameMode::To) {
self.handle_rename_to(&paths[0]);
}
},
}
notify::EventKind::Create(kind) => self.handle_create(kind, &paths[0]),
notify::EventKind::Remove(_) => self.handle_delete(&paths[0]),
_ => (),
Expand All @@ -107,7 +128,8 @@ impl FsEventHandler {
pub fn run_cache_interval(state_mux: &StateSafe) {
let state_clone = Arc::clone(state_mux);

tokio::spawn(async move { // We use tokio spawn because async closures with std spawn is unstable
tokio::spawn(async move {
// We use tokio spawn because async closures with std spawn is unstable
let mut interval = time::interval(Duration::from_secs(30));
interval.tick().await; // Wait 30 seconds before doing first re-cache

Expand All @@ -129,29 +151,37 @@ pub fn save_system_cache(state_mux: &StateSafe) {
/// Gets the cache from the state (in memory), encodes and saves it to the cache file path.
/// This needs optimising.
fn save_to_cache(state: &mut MutexGuard<AppState>) {
let serialized_cache = serde_json::to_string(&state.system_cache).unwrap();
let serialized_cache = serde_bencode::to_string(&state.system_cache).unwrap();

let mut file = fs::OpenOptions::new()
.write(true)
.truncate(true)
.open(CACHE_FILE_PATH)
.open(&CACHE_FILE_PATH[..])
.unwrap();

file.write_all(serialized_cache.as_bytes()).unwrap();
file.write_all(
&zstd::encode_all(serialized_cache.as_bytes(), 0)
.expect("Failed to compress cache contents.")[..],
)
.unwrap();
}

/// Reads and decodes the cache file and stores it in memory for quick access.
/// Returns false if the cache was unable to deserialize.
pub fn load_system_cache(state_mux: &StateSafe) -> bool {
let state = &mut state_mux.lock().unwrap();
let file_contents = fs::read_to_string(CACHE_FILE_PATH).unwrap();
let state = &mut state_mux.lock().expect("Failed to lock mutex");

let cache_file = File::open(&CACHE_FILE_PATH[..]).expect("Failed to open cache file");
let reader = BufReader::new(cache_file);

let deserialize_result = serde_json::from_str(&file_contents);
if let Ok(system_cache) = deserialize_result {
state.system_cache = system_cache;
return true;
if let Ok(decompressed) = zstd::decode_all(reader) {
let deserialize_result = serde_bencode::from_bytes(&decompressed[..]);
if let Ok(system_cache) = deserialize_result {
state.system_cache = system_cache;
return true;
}
}

println!("Failed to deserialize the cache from disk, recaching...");
false
}
}
6 changes: 4 additions & 2 deletions src-tauri/src/filesystem/mod.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
pub mod cache;
pub mod volume;

use std::fs::{read_dir};
use crate::filesystem::volume::DirectoryChild;
use std::fs::read_dir;

pub const DIRECTORY: &str = "directory";
pub const FILE: &str = "file";

pub const fn bytes_to_gb(bytes: u64) -> u16 { (bytes / (1e+9 as u64)) as u16 }
pub const fn bytes_to_gb(bytes: u64) -> u16 {
(bytes / (1e+9 as u64)) as u16
}

/// Searches and returns the files in a given directory. This is not recursive.
#[tauri::command]
Expand Down
43 changes: 24 additions & 19 deletions src-tauri/src/filesystem/volume.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,20 @@
use crate::filesystem::cache::{
load_system_cache, run_cache_interval, save_system_cache, FsEventHandler, CACHE_FILE_PATH,
};
use crate::filesystem::{bytes_to_gb, DIRECTORY, FILE};
use crate::{CachedPath, StateSafe};
use notify::{RecursiveMode, Watcher};
use rayon::prelude::*;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::{fs, thread};
use std::fs::{File};
use std::path::{PathBuf};
use std::fs::File;
use std::path::PathBuf;
use std::sync::{Arc, Mutex};
use std::{fs, thread};
use sysinfo::{Disk, DiskExt, System, SystemExt};
use tauri::{State};
use walkdir::WalkDir;
use notify::{Watcher, RecursiveMode};
use tauri::State;
use tokio::task::block_in_place;
use crate::filesystem::{bytes_to_gb, DIRECTORY, FILE};
use crate::filesystem::cache::{CACHE_FILE_PATH, FsEventHandler, load_system_cache, run_cache_interval, save_system_cache};
use walkdir::WalkDir;

#[derive(Serialize)]
pub struct Volume {
Expand Down Expand Up @@ -91,7 +93,7 @@ impl Volume {
true => "Local Volume",
false => volume_name,
}
.to_string()
.to_string()
};

let mountpoint = disk.mount_point().to_path_buf();
Expand Down Expand Up @@ -128,8 +130,12 @@ impl Volume {
let file_path = entry.path().to_string_lossy().to_string();

let walkdir_filetype = entry.file_type();
let file_type = if walkdir_filetype.is_dir() { DIRECTORY } else { FILE }
.to_string();
let file_type = if walkdir_filetype.is_dir() {
DIRECTORY
} else {
FILE
}
.to_string();

let cache_guard = &mut system_cache.lock().unwrap();
cache_guard
Expand All @@ -145,12 +151,11 @@ impl Volume {
fn watch_changes(&self, state_mux: &StateSafe) {
let mut fs_event_manager = FsEventHandler::new(state_mux.clone(), self.mountpoint.clone());

let mut watcher = notify::recommended_watcher(move |res| {
match res {
Ok(event) => fs_event_manager.handle_event(event),
Err(e) => panic!("Failed to handle event: {:?}", e),
}
}).unwrap();
let mut watcher = notify::recommended_watcher(move |res| match res {
Ok(event) => fs_event_manager.handle_event(event),
Err(e) => panic!("Failed to handle event: {:?}", e),
})
.unwrap();

let path = self.mountpoint.clone();

Expand Down Expand Up @@ -180,11 +185,11 @@ pub fn get_volumes(state_mux: State<StateSafe>) -> Vec<Volume> {
let mut sys = System::new_all();
sys.refresh_all();

let mut cache_exists = fs::metadata(CACHE_FILE_PATH).is_ok();
let mut cache_exists = fs::metadata(&CACHE_FILE_PATH[..]).is_ok();
if cache_exists {
cache_exists = load_system_cache(&state_mux);
} else {
File::create(CACHE_FILE_PATH).unwrap();
File::create(&CACHE_FILE_PATH[..]).unwrap();
}

for disk in sys.disks() {
Expand Down
Loading