diff --git a/CHANGELOG.md b/CHANGELOG.md index b4ee0d4915..6182af96b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 description, even if there already was a description set. It now also only works on the working-copy commit (there's no `-r` argument). +* The storage format for the operation log has changed. It will be + automatically upgraded the first time you run a command in an existing repo. + The operation IDs will change in that process. + ### New features * The new `jj git remote rename` command allows git remotes to be renamed diff --git a/Cargo.lock b/Cargo.lock index 56dbf7ba89..9465b4f6a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -672,6 +672,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + [[package]] name = "itertools" version = "0.10.5" @@ -773,6 +779,7 @@ dependencies = [ "test-case", "testutils", "thiserror", + "thrift", "uuid", "version_check", "whoami", @@ -987,6 +994,15 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "ordered-float" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7940cf2ca942593318d07fcf2596cdca60a85c9e7fab408a5e21a4f9dcd40d87" +dependencies = [ + "num-traits", +] + [[package]] name = "os_str_bytes" version = "6.3.0" @@ -1611,6 +1627,16 @@ dependencies = [ "syn", ] +[[package]] +name = "thrift" +version = "0.17.0" +source = "git+https://github.com/apache/thrift?rev=4d493e867b349f3475203ef9848353b315203c51#4d493e867b349f3475203ef9848353b315203c51" +dependencies = [ + "byteorder", + "integer-encoding", + "ordered-float", +] + [[package]] name = "tinytemplate" version = "1.2.1" diff --git a/flake.nix b/flake.nix index 1c91838497..fc9b4a2c7f 100644 --- a/flake.nix +++ b/flake.nix @@ -43,6 +43,9 @@ cargoLock = { lockFile = "${self}/Cargo.lock"; + outputHashes = { + "thrift-0.17.0" = "sha256-Zczwq6zRKPXXj7JR0X/0Osl1Lafo5r+2wK5tuWJbvI8="; + }; }; nativeBuildInputs = [ pkg-config gzip makeWrapper diff --git a/lib/Cargo.toml b/lib/Cargo.toml index 026ac561fc..fbe72c9bd9 100644 --- a/lib/Cargo.toml +++ b/lib/Cargo.toml @@ -38,6 +38,8 @@ regex = "1.7.0" serde_json = "1.0.87" tempfile = "3.3.0" thiserror = "1.0.37" +# thrift v0.17.0 (specified by hash for security reasons) +thrift = { git = "https://github.com/apache/thrift", rev = "4d493e867b349f3475203ef9848353b315203c51", default-features = false } uuid = { version = "1.2.1", features = ["v4"] } whoami = "1.2.3" zstd = "0.11.2" @@ -50,4 +52,8 @@ test-case = "2.2.2" testutils = { path = "testutils" } [features] +default = ["legacy_protobuf"] vendored-openssl = ["git2/vendored-openssl"] +# Enable upgrade of repositories created with storage backends based on +# Protobuf format (from before we switched to Thrift) +legacy_protobuf = [] diff --git a/lib/src/lib.rs b/lib/src/lib.rs index fe838607df..31dab0c3a9 100644 --- a/lib/src/lib.rs +++ b/lib/src/lib.rs @@ -37,6 +37,7 @@ pub mod nightly_shims; pub mod op_heads_store; pub mod op_store; pub mod operation; +#[cfg(feature = "legacy_protobuf")] mod proto_op_store; pub mod protos; pub mod refs; @@ -47,6 +48,8 @@ pub mod revset_graph_iterator; pub mod rewrite; pub mod settings; pub mod simple_op_store; +#[allow(dead_code)] +mod simple_op_store_model; pub mod stacked_table; pub mod store; pub mod transaction; diff --git a/lib/src/proto_op_store.rs b/lib/src/proto_op_store.rs index 097572604f..5eef2f0156 100644 --- a/lib/src/proto_op_store.rs +++ b/lib/src/proto_op_store.rs @@ -22,7 +22,7 @@ use std::path::PathBuf; use blake2::Blake2b512; use itertools::Itertools; use protobuf::{Message, MessageField}; -use tempfile::{NamedTempFile, PersistError}; +use tempfile::NamedTempFile; use crate::backend::{CommitId, MillisSinceEpoch, Timestamp}; use crate::content_hash::ContentHash; @@ -32,18 +32,6 @@ use crate::op_store::{ RefTarget, View, ViewId, WorkspaceId, }; -impl From for OpStoreError { - fn from(err: std::io::Error) -> Self { - OpStoreError::Other(err.to_string()) - } -} - -impl From for OpStoreError { - fn from(err: PersistError) -> Self { - OpStoreError::Other(err.to_string()) - } -} - impl From for OpStoreError { fn from(err: protobuf::Error) -> Self { OpStoreError::Other(err.to_string()) @@ -56,7 +44,8 @@ pub struct ProtoOpStore { } impl ProtoOpStore { - pub fn init(store_path: PathBuf) -> Self { + #[allow(dead_code)] + fn init(store_path: PathBuf) -> Self { fs::create_dir(store_path.join("views")).unwrap(); fs::create_dir(store_path.join("operations")).unwrap(); Self::load(store_path) diff --git a/lib/src/simple_op_store.rs b/lib/src/simple_op_store.rs index 1e7bdfb6b0..829121b535 100644 --- a/lib/src/simple_op_store.rs +++ b/lib/src/simple_op_store.rs @@ -12,27 +12,193 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::collections::{BTreeMap, HashMap, HashSet}; use std::fmt::Debug; +use std::fs; +use std::fs::File; +use std::io::{ErrorKind, Read, Write}; use std::path::PathBuf; -use crate::op_store::{OpStore, OpStoreResult, Operation, OperationId, View, ViewId}; +use blake2::Blake2b512; +use itertools::Itertools; +use tempfile::{NamedTempFile, PersistError}; +use thrift::protocol::{TCompactInputProtocol, TCompactOutputProtocol, TSerializable}; + +use crate::backend::{CommitId, MillisSinceEpoch, Timestamp}; +use crate::content_hash::ContentHash; +use crate::file_util::persist_content_addressed_temp_file; +use crate::op_store::{ + BranchTarget, OpStore, OpStoreError, OpStoreResult, Operation, OperationId, OperationMetadata, + RefTarget, View, ViewId, WorkspaceId, +}; +#[cfg(feature = "legacy_protobuf")] use crate::proto_op_store::ProtoOpStore; +use crate::simple_op_store_model; + +impl From for OpStoreError { + fn from(err: std::io::Error) -> Self { + OpStoreError::Other(err.to_string()) + } +} + +impl From for OpStoreError { + fn from(err: PersistError) -> Self { + OpStoreError::Other(err.to_string()) + } +} + +impl From for OpStoreError { + fn from(err: thrift::Error) -> Self { + OpStoreError::Other(err.to_string()) + } +} +// TODO: In version 0.7.0 or so, inline ThriftOpStore into this type and drop +// support for upgrading from the proto format #[derive(Debug)] pub struct SimpleOpStore { - delegate: ProtoOpStore, + delegate: ThriftOpStore, +} + +#[cfg(feature = "legacy_protobuf")] +fn upgrade_to_thrift(store_path: PathBuf) -> std::io::Result<()> { + println!("Upgrading operation log to Thrift format..."); + let proto_store = ProtoOpStore::load(store_path.clone()); + let tmp_store_dir = tempfile::Builder::new() + .prefix("jj-op-store-upgrade-") + .tempdir() + .unwrap(); + let tmp_store_path = tmp_store_dir.path().to_path_buf(); + + // Find the current operation head(s) of the operation log. Because the hash is + // based on the serialized format, it will be different after conversion, so + // we need to rewrite these later. + let op_heads_store_path = store_path.parent().unwrap().join("op_heads"); + let mut old_op_heads = HashSet::new(); + for entry in fs::read_dir(&op_heads_store_path)? { + let basename = entry?.file_name(); + let op_id_str = basename.to_str().unwrap(); + if let Ok(op_id_bytes) = hex::decode(op_id_str) { + old_op_heads.insert(OperationId::new(op_id_bytes)); + } + } + + // Do a DFS to rewrite the operations + let thrift_store = ThriftOpStore::init(tmp_store_path.clone()); + let mut converted: HashMap = HashMap::new(); + // The DFS stack + let mut to_convert = old_op_heads + .iter() + .map(|op_id| (op_id.clone(), proto_store.read_operation(op_id).unwrap())) + .collect_vec(); + while !to_convert.is_empty() { + let (_, op) = to_convert.last().unwrap(); + let mut new_parent_ids: Vec = vec![]; + let mut new_to_convert = vec![]; + // Check which parents are already converted and which ones we need to rewrite + // first + for parent_id in &op.parents { + if let Some(new_parent_id) = converted.get(parent_id) { + new_parent_ids.push(new_parent_id.clone()); + } else { + let parent_op = proto_store.read_operation(parent_id).unwrap(); + new_to_convert.push((parent_id.clone(), parent_op)); + } + } + if new_to_convert.is_empty() { + // If all parents have already been converted, remove this operation from the + // stack and convert it + let (op_id, mut op) = to_convert.pop().unwrap(); + op.parents = new_parent_ids; + let view = proto_store.read_view(&op.view_id).unwrap(); + let thrift_view_id = thrift_store.write_view(&view).unwrap(); + op.view_id = thrift_view_id; + let thrift_op_id = thrift_store.write_operation(&op).unwrap(); + converted.insert(op_id, thrift_op_id); + } else { + to_convert.extend(new_to_convert); + } + } + + fs::write(tmp_store_path.join("thrift_store"), "")?; + let backup_store_path = store_path.parent().unwrap().join("op_store_old"); + fs::rename(&store_path, &backup_store_path)?; + fs::rename(&tmp_store_path, &store_path)?; + + // Update the pointers to the head(s) of the operation log + for old_op_head in old_op_heads { + let new_op_head = converted.get(&old_op_head).unwrap().clone(); + fs::write(op_heads_store_path.join(new_op_head.hex()), "")?; + fs::remove_file(op_heads_store_path.join(old_op_head.hex()))?; + } + + // Update the pointers from operations to index files + let index_operations_path = store_path + .parent() + .unwrap() + .join("index") + .join("operations"); + for entry in fs::read_dir(&index_operations_path)? { + let basename = entry?.file_name(); + let op_id_str = basename.to_str().unwrap(); + if let Ok(op_id_bytes) = hex::decode(op_id_str) { + let old_op_id = OperationId::new(op_id_bytes); + // This should always succeed, but just skip it if it doesn't. We'll index + // the commits on demand if we don't have an pointer to an index file. + if let Some(new_op_id) = converted.get(&old_op_id) { + fs::rename( + index_operations_path.join(basename), + index_operations_path.join(new_op_id.hex()), + )?; + } + } + } + + // Update the pointer to the last operation exported to Git + let git_export_path = store_path.parent().unwrap().join("git_export_operation_id"); + if let Ok(op_id_string) = fs::read_to_string(&git_export_path) { + if let Ok(op_id_bytes) = hex::decode(&op_id_string) { + let old_op_id = OperationId::new(op_id_bytes); + let new_op_id = converted.get(&old_op_id).unwrap(); + fs::write(&git_export_path, new_op_id.hex())?; + } + } + + println!("Upgrade complete"); + Ok(()) } impl SimpleOpStore { pub fn init(store_path: PathBuf) -> Self { - let delegate = ProtoOpStore::init(store_path); + #[cfg(feature = "legacy_protobuf")] + fs::write(store_path.join("thrift_store"), "").unwrap(); + let delegate = ThriftOpStore::init(store_path); SimpleOpStore { delegate } } + #[cfg(feature = "legacy_protobuf")] pub fn load(store_path: PathBuf) -> Self { - let delegate = ProtoOpStore::load(store_path); + if !store_path.join("thrift_store").exists() { + upgrade_to_thrift(store_path.clone()) + .expect("Failed to upgrade operation log to Thrift format"); + } + let delegate = ThriftOpStore::load(store_path); SimpleOpStore { delegate } } + + #[cfg(not(feature = "legacy_protobuf"))] + pub fn load(store_path: PathBuf) -> Self { + let delegate = ThriftOpStore::load(store_path); + SimpleOpStore { delegate } + } +} + +fn not_found_to_store_error(err: std::io::Error) -> OpStoreError { + if err.kind() == ErrorKind::NotFound { + OpStoreError::NotFound + } else { + OpStoreError::from(err) + } } impl OpStore for SimpleOpStore { @@ -52,3 +218,454 @@ impl OpStore for SimpleOpStore { self.delegate.write_operation(operation) } } + +#[derive(Debug)] +struct ThriftOpStore { + path: PathBuf, +} + +impl ThriftOpStore { + fn init(store_path: PathBuf) -> Self { + fs::create_dir(store_path.join("views")).unwrap(); + fs::create_dir(store_path.join("operations")).unwrap(); + Self::load(store_path) + } + + fn load(store_path: PathBuf) -> Self { + ThriftOpStore { path: store_path } + } + + fn view_path(&self, id: &ViewId) -> PathBuf { + self.path.join("views").join(id.hex()) + } + + fn operation_path(&self, id: &OperationId) -> PathBuf { + self.path.join("operations").join(id.hex()) + } +} + +impl OpStore for ThriftOpStore { + fn read_view(&self, id: &ViewId) -> OpStoreResult { + let path = self.view_path(id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + let thrift_view = read_thrift(&mut file)?; + Ok(View::from(&thrift_view)) + } + + fn write_view(&self, view: &View) -> OpStoreResult { + let id = ViewId::new(hash(view).to_vec()); + let temp_file = NamedTempFile::new_in(&self.path)?; + let thrift_view = simple_op_store_model::View::from(view); + write_thrift(&thrift_view, &mut temp_file.as_file())?; + persist_content_addressed_temp_file(temp_file, self.view_path(&id))?; + Ok(id) + } + + fn read_operation(&self, id: &OperationId) -> OpStoreResult { + let path = self.operation_path(id); + let mut file = File::open(path).map_err(not_found_to_store_error)?; + let thrift_operation = read_thrift(&mut file)?; + Ok(Operation::from(&thrift_operation)) + } + + fn write_operation(&self, operation: &Operation) -> OpStoreResult { + let id = OperationId::new(hash(operation).to_vec()); + let temp_file = NamedTempFile::new_in(&self.path)?; + let thrift_operation = simple_op_store_model::Operation::from(operation); + write_thrift(&thrift_operation, &mut temp_file.as_file())?; + persist_content_addressed_temp_file(temp_file, self.operation_path(&id))?; + Ok(id) + } +} + +fn hash(x: &impl ContentHash) -> digest::Output { + use digest::Digest; + let mut hasher = Blake2b512::default(); + x.hash(&mut hasher); + hasher.finalize() +} + +fn read_thrift(input: &mut impl Read) -> OpStoreResult { + let mut protocol = TCompactInputProtocol::new(input); + Ok(TSerializable::read_from_in_protocol(&mut protocol).unwrap()) +} + +fn write_thrift(thrift_object: &T, output: &mut impl Write) -> OpStoreResult<()> { + let mut protocol = TCompactOutputProtocol::new(output); + thrift_object.write_to_out_protocol(&mut protocol)?; + Ok(()) +} + +impl From<&Timestamp> for simple_op_store_model::Timestamp { + fn from(timestamp: &Timestamp) -> Self { + simple_op_store_model::Timestamp::new(timestamp.timestamp.0, timestamp.tz_offset) + } +} + +impl From<&simple_op_store_model::Timestamp> for Timestamp { + fn from(timestamp: &simple_op_store_model::Timestamp) -> Self { + Timestamp { + timestamp: MillisSinceEpoch(timestamp.millis_since_epoch), + tz_offset: timestamp.tz_offset, + } + } +} + +impl From<&OperationMetadata> for simple_op_store_model::OperationMetadata { + fn from(metadata: &OperationMetadata) -> Self { + let start_time = simple_op_store_model::Timestamp::from(&metadata.start_time); + let end_time = simple_op_store_model::Timestamp::from(&metadata.end_time); + let description = metadata.description.clone(); + let hostname = metadata.hostname.clone(); + let username = metadata.username.clone(); + let tags: BTreeMap = metadata + .tags + .iter() + .map(|(x, y)| (x.clone(), y.clone())) + .collect(); + simple_op_store_model::OperationMetadata::new( + start_time, + end_time, + description, + hostname, + username, + tags, + ) + } +} + +impl From<&simple_op_store_model::OperationMetadata> for OperationMetadata { + fn from(metadata: &simple_op_store_model::OperationMetadata) -> Self { + let start_time = Timestamp::from(&metadata.start_time); + let end_time = Timestamp::from(&metadata.end_time); + let description = metadata.description.to_owned(); + let hostname = metadata.hostname.to_owned(); + let username = metadata.username.to_owned(); + let tags = metadata + .tags + .iter() + .map(|(key, value)| (key.clone(), value.clone())) + .collect(); + OperationMetadata { + start_time, + end_time, + description, + hostname, + username, + tags, + } + } +} + +impl From<&Operation> for simple_op_store_model::Operation { + fn from(operation: &Operation) -> Self { + let view_id = operation.view_id.as_bytes().to_vec(); + let mut parents = vec![]; + for parent in &operation.parents { + parents.push(parent.to_bytes()); + } + let metadata = Box::new(simple_op_store_model::OperationMetadata::from( + &operation.metadata, + )); + simple_op_store_model::Operation::new(view_id, parents, metadata) + } +} + +impl From<&View> for simple_op_store_model::View { + fn from(view: &View) -> Self { + let mut wc_commit_ids = BTreeMap::new(); + for (workspace_id, commit_id) in &view.wc_commit_ids { + wc_commit_ids.insert(workspace_id.as_str().to_string(), commit_id.to_bytes()); + } + + let mut head_ids = vec![]; + for head_id in &view.head_ids { + head_ids.push(head_id.to_bytes()); + } + + let mut public_head_ids = vec![]; + for head_id in &view.public_head_ids { + public_head_ids.push(head_id.to_bytes()); + } + + let mut branches = vec![]; + for (name, target) in &view.branches { + let local_target = target + .local_target + .as_ref() + .map(simple_op_store_model::RefTarget::from); + let mut remote_branches = vec![]; + for (remote_name, target) in &target.remote_targets { + remote_branches.push(simple_op_store_model::RemoteBranch::new( + remote_name.clone(), + simple_op_store_model::RefTarget::from(target), + )); + } + branches.push(simple_op_store_model::Branch::new( + name.clone(), + local_target, + remote_branches, + )); + } + + let mut tags = vec![]; + for (name, target) in &view.tags { + tags.push(simple_op_store_model::Tag::new( + name.clone(), + simple_op_store_model::RefTarget::from(target), + )); + } + + let mut git_refs = vec![]; + for (git_ref_name, target) in &view.git_refs { + git_refs.push(simple_op_store_model::GitRef::new( + git_ref_name.clone(), + simple_op_store_model::RefTarget::from(target), + )); + } + + let git_head = view.git_head.as_ref().map(|git_head| git_head.to_bytes()); + + simple_op_store_model::View::new( + head_ids, + public_head_ids, + wc_commit_ids, + branches, + tags, + git_refs, + git_head, + ) + } +} + +impl From<&simple_op_store_model::Operation> for Operation { + fn from(operation: &simple_op_store_model::Operation) -> Self { + let operation_id_from_thrift = |parent: &Vec| OperationId::new(parent.clone()); + let parents = operation + .parents + .iter() + .map(operation_id_from_thrift) + .collect(); + let view_id = ViewId::new(operation.view_id.clone()); + let metadata = OperationMetadata::from(operation.metadata.as_ref()); + Operation { + view_id, + parents, + metadata, + } + } +} + +impl From<&simple_op_store_model::View> for View { + fn from(thrift_view: &simple_op_store_model::View) -> Self { + let mut view = View::default(); + for (workspace_id, commit_id) in &thrift_view.wc_commit_ids { + view.wc_commit_ids.insert( + WorkspaceId::new(workspace_id.clone()), + CommitId::new(commit_id.clone()), + ); + } + for head_id_bytes in &thrift_view.head_ids { + view.head_ids.insert(CommitId::from_bytes(head_id_bytes)); + } + for head_id_bytes in &thrift_view.public_head_ids { + view.public_head_ids + .insert(CommitId::from_bytes(head_id_bytes)); + } + + for thrift_branch in &thrift_view.branches { + let local_target = thrift_branch.local_target.as_ref().map(RefTarget::from); + + let mut remote_targets = BTreeMap::new(); + for remote_branch in &thrift_branch.remote_branches { + remote_targets.insert( + remote_branch.remote_name.clone(), + RefTarget::from(&remote_branch.target), + ); + } + + view.branches.insert( + thrift_branch.name.clone(), + BranchTarget { + local_target, + remote_targets, + }, + ); + } + + for thrift_tag in &thrift_view.tags { + view.tags + .insert(thrift_tag.name.clone(), RefTarget::from(&thrift_tag.target)); + } + + for git_ref in &thrift_view.git_refs { + view.git_refs + .insert(git_ref.name.clone(), RefTarget::from(&git_ref.target)); + } + + view.git_head = thrift_view + .git_head + .as_ref() + .map(|head| CommitId::new(head.clone())); + + view + } +} + +impl From<&RefTarget> for simple_op_store_model::RefTarget { + fn from(ref_target: &RefTarget) -> Self { + match ref_target { + RefTarget::Normal(id) => simple_op_store_model::RefTarget::CommitId(id.to_bytes()), + RefTarget::Conflict { removes, adds } => { + let adds = adds.iter().map(|id| id.to_bytes()).collect_vec(); + let removes = removes.iter().map(|id| id.to_bytes()).collect_vec(); + let ref_conflict_thrift = simple_op_store_model::RefConflict::new(removes, adds); + simple_op_store_model::RefTarget::Conflict(ref_conflict_thrift) + } + } + } +} + +impl From<&simple_op_store_model::RefTarget> for RefTarget { + fn from(thrift_ref_target: &simple_op_store_model::RefTarget) -> Self { + match thrift_ref_target { + simple_op_store_model::RefTarget::CommitId(commit_id) => { + RefTarget::Normal(CommitId::from_bytes(commit_id)) + } + simple_op_store_model::RefTarget::Conflict(conflict) => { + let removes = conflict + .removes + .iter() + .map(|id_bytes| CommitId::from_bytes(id_bytes)) + .collect_vec(); + let adds = conflict + .adds + .iter() + .map(|id_bytes| CommitId::from_bytes(id_bytes)) + .collect_vec(); + RefTarget::Conflict { removes, adds } + } + } + } +} + +#[cfg(test)] +mod tests { + use insta::assert_snapshot; + use maplit::{btreemap, hashmap, hashset}; + + use super::*; + + fn create_view() -> View { + let head_id1 = CommitId::from_hex("aaa111"); + let head_id2 = CommitId::from_hex("aaa222"); + let public_head_id1 = CommitId::from_hex("bbb444"); + let public_head_id2 = CommitId::from_hex("bbb555"); + let branch_main_local_target = RefTarget::Normal(CommitId::from_hex("ccc111")); + let branch_main_origin_target = RefTarget::Normal(CommitId::from_hex("ccc222")); + let branch_deleted_origin_target = RefTarget::Normal(CommitId::from_hex("ccc333")); + let tag_v1_target = RefTarget::Normal(CommitId::from_hex("ddd111")); + let git_refs_main_target = RefTarget::Normal(CommitId::from_hex("fff111")); + let git_refs_feature_target = RefTarget::Conflict { + removes: vec![CommitId::from_hex("fff111")], + adds: vec![CommitId::from_hex("fff222"), CommitId::from_hex("fff333")], + }; + let default_wc_commit_id = CommitId::from_hex("abc111"); + let test_wc_commit_id = CommitId::from_hex("abc222"); + View { + head_ids: hashset! {head_id1, head_id2}, + public_head_ids: hashset! {public_head_id1, public_head_id2}, + branches: btreemap! { + "main".to_string() => BranchTarget { + local_target: Some(branch_main_local_target), + remote_targets: btreemap! { + "origin".to_string() => branch_main_origin_target, + } + }, + "deleted".to_string() => BranchTarget { + local_target: None, + remote_targets: btreemap! { + "origin".to_string() => branch_deleted_origin_target, + } + }, + }, + tags: btreemap! { + "v1.0".to_string() => tag_v1_target, + }, + git_refs: btreemap! { + "refs/heads/main".to_string() => git_refs_main_target, + "refs/heads/feature".to_string() => git_refs_feature_target + }, + git_head: Some(CommitId::from_hex("fff111")), + wc_commit_ids: hashmap! { + WorkspaceId::default() => default_wc_commit_id, + WorkspaceId::new("test".to_string()) => test_wc_commit_id, + }, + } + } + + fn create_operation() -> Operation { + Operation { + view_id: ViewId::from_hex("aaa111"), + parents: vec![ + OperationId::from_hex("bbb111"), + OperationId::from_hex("bbb222"), + ], + metadata: OperationMetadata { + start_time: Timestamp { + timestamp: MillisSinceEpoch(123456789), + tz_offset: 3600, + }, + end_time: Timestamp { + timestamp: MillisSinceEpoch(123456800), + tz_offset: 3600, + }, + description: "check out foo".to_string(), + hostname: "some.host.example.com".to_string(), + username: "someone".to_string(), + tags: hashmap! { + "key1".to_string() => "value1".to_string(), + "key2".to_string() => "value2".to_string(), + }, + }, + } + } + + #[test] + fn test_hash_view() { + // Test exact output so we detect regressions in compatibility + assert_snapshot!( + ViewId::new(hash(&create_view()).to_vec()).hex(), + @"2a026b6a091219a3d8ca43d822984cf9be0c53438225d76a5ba5e6d3724fab15104579fb08fa949977c4357b1806d240bef28d958cbcd7d786962ac88c15df31" + ); + } + + #[test] + fn test_hash_operation() { + // Test exact output so we detect regressions in compatibility + assert_snapshot!( + OperationId::new(hash(&create_operation()).to_vec()).hex(), + @"3ec986c29ff8eb808ea8f6325d6307cea75ef02987536c8e4645406aba51afc8e229957a6e855170d77a66098c58912309323f5e0b32760caa2b59dc84d45fcf" + ); + } + + #[test] + fn test_read_write_view() { + let temp_dir = testutils::new_temp_dir(); + let store = SimpleOpStore::init(temp_dir.path().to_owned()); + let view = create_view(); + let view_id = store.write_view(&view).unwrap(); + let read_view = store.read_view(&view_id).unwrap(); + assert_eq!(read_view, view); + } + + #[test] + fn test_read_write_operation() { + let temp_dir = testutils::new_temp_dir(); + let store = SimpleOpStore::init(temp_dir.path().to_owned()); + let operation = create_operation(); + let op_id = store.write_operation(&operation).unwrap(); + let read_operation = store.read_operation(&op_id).unwrap(); + assert_eq!(read_operation, operation); + } +} diff --git a/lib/src/simple_op_store_model.rs b/lib/src/simple_op_store_model.rs new file mode 100644 index 0000000000..49bd98f149 --- /dev/null +++ b/lib/src/simple_op_store_model.rs @@ -0,0 +1,972 @@ +// Autogenerated by Thrift Compiler (0.17.0) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +#![allow(unused_imports)] +#![allow(unused_extern_crates)] +#![allow(clippy::too_many_arguments, clippy::type_complexity, clippy::vec_box)] +#![cfg_attr(rustfmt, rustfmt_skip)] + +use std::cell::RefCell; +use std::collections::{BTreeMap, BTreeSet}; +use std::convert::{From, TryFrom}; +use std::default::Default; +use std::error::Error; +use std::fmt; +use std::fmt::{Display, Formatter}; +use std::rc::Rc; + +use thrift::OrderedFloat; +use thrift::{ApplicationError, ApplicationErrorKind, ProtocolError, ProtocolErrorKind, TThriftClient}; +use thrift::protocol::{TFieldIdentifier, TListIdentifier, TMapIdentifier, TMessageIdentifier, TMessageType, TInputProtocol, TOutputProtocol, TSerializable, TSetIdentifier, TStructIdentifier, TType}; +use thrift::protocol::field_id; +use thrift::protocol::verify_expected_message_type; +use thrift::protocol::verify_expected_sequence_number; +use thrift::protocol::verify_expected_service_call; +use thrift::protocol::verify_required_field_exists; + +// +// RefConflict +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct RefConflict { + pub removes: Vec>, + pub adds: Vec>, +} + +impl RefConflict { + pub fn new(removes: Vec>, adds: Vec>) -> RefConflict { + RefConflict { + removes, + adds, + } + } +} + +impl TSerializable for RefConflict { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option>> = None; + let mut f_2: Option>> = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec> = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_0 = i_prot.read_bytes()?; + val.push(list_elem_0); + } + i_prot.read_list_end()?; + f_1 = Some(val); + }, + 2 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec> = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_1 = i_prot.read_bytes()?; + val.push(list_elem_1); + } + i_prot.read_list_end()?; + f_2 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("RefConflict.removes", &f_1)?; + verify_required_field_exists("RefConflict.adds", &f_2)?; + let ret = RefConflict { + removes: f_1.expect("auto-generated code should have checked for presence of required fields"), + adds: f_2.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("RefConflict"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("removes", TType::List, 1))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::String, self.removes.len() as i32))?; + for e in &self.removes { + o_prot.write_bytes(e)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("adds", TType::List, 2))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::String, self.adds.len() as i32))?; + for e in &self.adds { + o_prot.write_bytes(e)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// RefTarget +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub enum RefTarget { + CommitId(Vec), + Conflict(RefConflict), +} + +impl TSerializable for RefTarget { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + let mut ret: Option = None; + let mut received_field_count = 0; + i_prot.read_struct_begin()?; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = i_prot.read_bytes()?; + if ret.is_none() { + ret = Some(RefTarget::CommitId(val)); + } + received_field_count += 1; + }, + 2 => { + let val = RefConflict::read_from_in_protocol(i_prot)?; + if ret.is_none() { + ret = Some(RefTarget::Conflict(val)); + } + received_field_count += 1; + }, + _ => { + i_prot.skip(field_ident.field_type)?; + received_field_count += 1; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + if received_field_count == 0 { + Err( + thrift::Error::Protocol( + ProtocolError::new( + ProtocolErrorKind::InvalidData, + "received empty union from remote RefTarget" + ) + ) + ) + } else if received_field_count > 1 { + Err( + thrift::Error::Protocol( + ProtocolError::new( + ProtocolErrorKind::InvalidData, + "received multiple fields for union from remote RefTarget" + ) + ) + ) + } else { + Ok(ret.expect("return value should have been constructed")) + } + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("RefTarget"); + o_prot.write_struct_begin(&struct_ident)?; + match *self { + RefTarget::CommitId(ref f) => { + o_prot.write_field_begin(&TFieldIdentifier::new("commit_id", TType::String, 1))?; + o_prot.write_bytes(f)?; + o_prot.write_field_end()?; + }, + RefTarget::Conflict(ref f) => { + o_prot.write_field_begin(&TFieldIdentifier::new("conflict", TType::Struct, 2))?; + f.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()?; + }, + } + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// RemoteBranch +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct RemoteBranch { + pub remote_name: String, + pub target: RefTarget, +} + +impl RemoteBranch { + pub fn new(remote_name: String, target: RefTarget) -> RemoteBranch { + RemoteBranch { + remote_name, + target, + } + } +} + +impl TSerializable for RemoteBranch { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option = None; + let mut f_2: Option = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = i_prot.read_string()?; + f_1 = Some(val); + }, + 2 => { + let val = RefTarget::read_from_in_protocol(i_prot)?; + f_2 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("RemoteBranch.remote_name", &f_1)?; + verify_required_field_exists("RemoteBranch.target", &f_2)?; + let ret = RemoteBranch { + remote_name: f_1.expect("auto-generated code should have checked for presence of required fields"), + target: f_2.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("RemoteBranch"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("remote_name", TType::String, 1))?; + o_prot.write_string(&self.remote_name)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("target", TType::Struct, 2))?; + self.target.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// Branch +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct Branch { + pub name: String, + pub local_target: Option, + pub remote_branches: Vec, +} + +impl Branch { + pub fn new(name: String, local_target: F2, remote_branches: Vec) -> Branch where F2: Into> { + Branch { + name, + local_target: local_target.into(), + remote_branches, + } + } +} + +impl TSerializable for Branch { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option = None; + let mut f_2: Option = None; + let mut f_3: Option> = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = i_prot.read_string()?; + f_1 = Some(val); + }, + 2 => { + let val = RefTarget::read_from_in_protocol(i_prot)?; + f_2 = Some(val); + }, + 3 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_2 = RemoteBranch::read_from_in_protocol(i_prot)?; + val.push(list_elem_2); + } + i_prot.read_list_end()?; + f_3 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("Branch.name", &f_1)?; + verify_required_field_exists("Branch.remote_branches", &f_3)?; + let ret = Branch { + name: f_1.expect("auto-generated code should have checked for presence of required fields"), + local_target: f_2, + remote_branches: f_3.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("Branch"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("name", TType::String, 1))?; + o_prot.write_string(&self.name)?; + o_prot.write_field_end()?; + if let Some(ref fld_var) = self.local_target { + o_prot.write_field_begin(&TFieldIdentifier::new("local_target", TType::Struct, 2))?; + fld_var.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()? + } + o_prot.write_field_begin(&TFieldIdentifier::new("remote_branches", TType::List, 3))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::Struct, self.remote_branches.len() as i32))?; + for e in &self.remote_branches { + e.write_to_out_protocol(o_prot)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// GitRef +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct GitRef { + pub name: String, + pub target: RefTarget, +} + +impl GitRef { + pub fn new(name: String, target: RefTarget) -> GitRef { + GitRef { + name, + target, + } + } +} + +impl TSerializable for GitRef { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option = None; + let mut f_2: Option = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = i_prot.read_string()?; + f_1 = Some(val); + }, + 2 => { + let val = RefTarget::read_from_in_protocol(i_prot)?; + f_2 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("GitRef.name", &f_1)?; + verify_required_field_exists("GitRef.target", &f_2)?; + let ret = GitRef { + name: f_1.expect("auto-generated code should have checked for presence of required fields"), + target: f_2.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("GitRef"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("name", TType::String, 1))?; + o_prot.write_string(&self.name)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("target", TType::Struct, 2))?; + self.target.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// Tag +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct Tag { + pub name: String, + pub target: RefTarget, +} + +impl Tag { + pub fn new(name: String, target: RefTarget) -> Tag { + Tag { + name, + target, + } + } +} + +impl TSerializable for Tag { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option = None; + let mut f_2: Option = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = i_prot.read_string()?; + f_1 = Some(val); + }, + 2 => { + let val = RefTarget::read_from_in_protocol(i_prot)?; + f_2 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("Tag.name", &f_1)?; + verify_required_field_exists("Tag.target", &f_2)?; + let ret = Tag { + name: f_1.expect("auto-generated code should have checked for presence of required fields"), + target: f_2.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("Tag"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("name", TType::String, 1))?; + o_prot.write_string(&self.name)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("target", TType::Struct, 2))?; + self.target.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// View +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct View { + pub head_ids: Vec>, + pub public_head_ids: Vec>, + pub wc_commit_ids: BTreeMap>, + pub branches: Vec, + pub tags: Vec, + pub git_refs: Vec, + pub git_head: Option>, +} + +impl View { + pub fn new(head_ids: Vec>, public_head_ids: Vec>, wc_commit_ids: BTreeMap>, branches: Vec, tags: Vec, git_refs: Vec, git_head: F7) -> View where F7: Into>> { + View { + head_ids, + public_head_ids, + wc_commit_ids, + branches, + tags, + git_refs, + git_head: git_head.into(), + } + } +} + +impl TSerializable for View { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option>> = None; + let mut f_2: Option>> = None; + let mut f_3: Option>> = None; + let mut f_4: Option> = None; + let mut f_5: Option> = None; + let mut f_6: Option> = None; + let mut f_7: Option> = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec> = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_3 = i_prot.read_bytes()?; + val.push(list_elem_3); + } + i_prot.read_list_end()?; + f_1 = Some(val); + }, + 2 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec> = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_4 = i_prot.read_bytes()?; + val.push(list_elem_4); + } + i_prot.read_list_end()?; + f_2 = Some(val); + }, + 3 => { + let map_ident = i_prot.read_map_begin()?; + let mut val: BTreeMap> = BTreeMap::new(); + for _ in 0..map_ident.size { + let map_key_5 = i_prot.read_string()?; + let map_val_6 = i_prot.read_bytes()?; + val.insert(map_key_5, map_val_6); + } + i_prot.read_map_end()?; + f_3 = Some(val); + }, + 4 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_7 = Branch::read_from_in_protocol(i_prot)?; + val.push(list_elem_7); + } + i_prot.read_list_end()?; + f_4 = Some(val); + }, + 5 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_8 = Tag::read_from_in_protocol(i_prot)?; + val.push(list_elem_8); + } + i_prot.read_list_end()?; + f_5 = Some(val); + }, + 6 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_9 = GitRef::read_from_in_protocol(i_prot)?; + val.push(list_elem_9); + } + i_prot.read_list_end()?; + f_6 = Some(val); + }, + 7 => { + let val = i_prot.read_bytes()?; + f_7 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("View.head_ids", &f_1)?; + verify_required_field_exists("View.public_head_ids", &f_2)?; + verify_required_field_exists("View.wc_commit_ids", &f_3)?; + verify_required_field_exists("View.branches", &f_4)?; + verify_required_field_exists("View.tags", &f_5)?; + verify_required_field_exists("View.git_refs", &f_6)?; + let ret = View { + head_ids: f_1.expect("auto-generated code should have checked for presence of required fields"), + public_head_ids: f_2.expect("auto-generated code should have checked for presence of required fields"), + wc_commit_ids: f_3.expect("auto-generated code should have checked for presence of required fields"), + branches: f_4.expect("auto-generated code should have checked for presence of required fields"), + tags: f_5.expect("auto-generated code should have checked for presence of required fields"), + git_refs: f_6.expect("auto-generated code should have checked for presence of required fields"), + git_head: f_7, + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("View"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("head_ids", TType::List, 1))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::String, self.head_ids.len() as i32))?; + for e in &self.head_ids { + o_prot.write_bytes(e)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("public_head_ids", TType::List, 2))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::String, self.public_head_ids.len() as i32))?; + for e in &self.public_head_ids { + o_prot.write_bytes(e)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("wc_commit_ids", TType::Map, 3))?; + o_prot.write_map_begin(&TMapIdentifier::new(TType::String, TType::String, self.wc_commit_ids.len() as i32))?; + for (k, v) in &self.wc_commit_ids { + o_prot.write_string(k)?; + o_prot.write_bytes(v)?; + } + o_prot.write_map_end()?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("branches", TType::List, 4))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::Struct, self.branches.len() as i32))?; + for e in &self.branches { + e.write_to_out_protocol(o_prot)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("tags", TType::List, 5))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::Struct, self.tags.len() as i32))?; + for e in &self.tags { + e.write_to_out_protocol(o_prot)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("git_refs", TType::List, 6))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::Struct, self.git_refs.len() as i32))?; + for e in &self.git_refs { + e.write_to_out_protocol(o_prot)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + if let Some(ref fld_var) = self.git_head { + o_prot.write_field_begin(&TFieldIdentifier::new("git_head", TType::String, 7))?; + o_prot.write_bytes(fld_var)?; + o_prot.write_field_end()? + } + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// Operation +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct Operation { + pub view_id: Vec, + pub parents: Vec>, + pub metadata: Box, +} + +impl Operation { + pub fn new(view_id: Vec, parents: Vec>, metadata: Box) -> Operation { + Operation { + view_id, + parents, + metadata, + } + } +} + +impl TSerializable for Operation { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option> = None; + let mut f_2: Option>> = None; + let mut f_3: Option> = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = i_prot.read_bytes()?; + f_1 = Some(val); + }, + 2 => { + let list_ident = i_prot.read_list_begin()?; + let mut val: Vec> = Vec::with_capacity(list_ident.size as usize); + for _ in 0..list_ident.size { + let list_elem_10 = i_prot.read_bytes()?; + val.push(list_elem_10); + } + i_prot.read_list_end()?; + f_2 = Some(val); + }, + 3 => { + let val = Box::new(OperationMetadata::read_from_in_protocol(i_prot)?); + f_3 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("Operation.view_id", &f_1)?; + verify_required_field_exists("Operation.parents", &f_2)?; + verify_required_field_exists("Operation.metadata", &f_3)?; + let ret = Operation { + view_id: f_1.expect("auto-generated code should have checked for presence of required fields"), + parents: f_2.expect("auto-generated code should have checked for presence of required fields"), + metadata: f_3.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("Operation"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("view_id", TType::String, 1))?; + o_prot.write_bytes(&self.view_id)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("parents", TType::List, 2))?; + o_prot.write_list_begin(&TListIdentifier::new(TType::String, self.parents.len() as i32))?; + for e in &self.parents { + o_prot.write_bytes(e)?; + } + o_prot.write_list_end()?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("metadata", TType::Struct, 3))?; + self.metadata.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// Timestamp +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct Timestamp { + pub millis_since_epoch: i64, + pub tz_offset: i32, +} + +impl Timestamp { + pub fn new(millis_since_epoch: i64, tz_offset: i32) -> Timestamp { + Timestamp { + millis_since_epoch, + tz_offset, + } + } +} + +impl TSerializable for Timestamp { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option = None; + let mut f_2: Option = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = i_prot.read_i64()?; + f_1 = Some(val); + }, + 2 => { + let val = i_prot.read_i32()?; + f_2 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("Timestamp.millis_since_epoch", &f_1)?; + verify_required_field_exists("Timestamp.tz_offset", &f_2)?; + let ret = Timestamp { + millis_since_epoch: f_1.expect("auto-generated code should have checked for presence of required fields"), + tz_offset: f_2.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("Timestamp"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("millis_since_epoch", TType::I64, 1))?; + o_prot.write_i64(self.millis_since_epoch)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("tz_offset", TType::I32, 2))?; + o_prot.write_i32(self.tz_offset)?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + +// +// OperationMetadata +// + +#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct OperationMetadata { + pub start_time: Timestamp, + pub end_time: Timestamp, + pub description: String, + pub hostname: String, + pub username: String, + pub tags: BTreeMap, +} + +impl OperationMetadata { + pub fn new(start_time: Timestamp, end_time: Timestamp, description: String, hostname: String, username: String, tags: BTreeMap) -> OperationMetadata { + OperationMetadata { + start_time, + end_time, + description, + hostname, + username, + tags, + } + } +} + +impl TSerializable for OperationMetadata { + fn read_from_in_protocol(i_prot: &mut dyn TInputProtocol) -> thrift::Result { + i_prot.read_struct_begin()?; + let mut f_1: Option = None; + let mut f_2: Option = None; + let mut f_3: Option = None; + let mut f_4: Option = None; + let mut f_5: Option = None; + let mut f_6: Option> = None; + loop { + let field_ident = i_prot.read_field_begin()?; + if field_ident.field_type == TType::Stop { + break; + } + let field_id = field_id(&field_ident)?; + match field_id { + 1 => { + let val = Timestamp::read_from_in_protocol(i_prot)?; + f_1 = Some(val); + }, + 2 => { + let val = Timestamp::read_from_in_protocol(i_prot)?; + f_2 = Some(val); + }, + 3 => { + let val = i_prot.read_string()?; + f_3 = Some(val); + }, + 4 => { + let val = i_prot.read_string()?; + f_4 = Some(val); + }, + 5 => { + let val = i_prot.read_string()?; + f_5 = Some(val); + }, + 6 => { + let map_ident = i_prot.read_map_begin()?; + let mut val: BTreeMap = BTreeMap::new(); + for _ in 0..map_ident.size { + let map_key_11 = i_prot.read_string()?; + let map_val_12 = i_prot.read_string()?; + val.insert(map_key_11, map_val_12); + } + i_prot.read_map_end()?; + f_6 = Some(val); + }, + _ => { + i_prot.skip(field_ident.field_type)?; + }, + }; + i_prot.read_field_end()?; + } + i_prot.read_struct_end()?; + verify_required_field_exists("OperationMetadata.start_time", &f_1)?; + verify_required_field_exists("OperationMetadata.end_time", &f_2)?; + verify_required_field_exists("OperationMetadata.description", &f_3)?; + verify_required_field_exists("OperationMetadata.hostname", &f_4)?; + verify_required_field_exists("OperationMetadata.username", &f_5)?; + verify_required_field_exists("OperationMetadata.tags", &f_6)?; + let ret = OperationMetadata { + start_time: f_1.expect("auto-generated code should have checked for presence of required fields"), + end_time: f_2.expect("auto-generated code should have checked for presence of required fields"), + description: f_3.expect("auto-generated code should have checked for presence of required fields"), + hostname: f_4.expect("auto-generated code should have checked for presence of required fields"), + username: f_5.expect("auto-generated code should have checked for presence of required fields"), + tags: f_6.expect("auto-generated code should have checked for presence of required fields"), + }; + Ok(ret) + } + fn write_to_out_protocol(&self, o_prot: &mut dyn TOutputProtocol) -> thrift::Result<()> { + let struct_ident = TStructIdentifier::new("OperationMetadata"); + o_prot.write_struct_begin(&struct_ident)?; + o_prot.write_field_begin(&TFieldIdentifier::new("start_time", TType::Struct, 1))?; + self.start_time.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("end_time", TType::Struct, 2))?; + self.end_time.write_to_out_protocol(o_prot)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("description", TType::String, 3))?; + o_prot.write_string(&self.description)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("hostname", TType::String, 4))?; + o_prot.write_string(&self.hostname)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("username", TType::String, 5))?; + o_prot.write_string(&self.username)?; + o_prot.write_field_end()?; + o_prot.write_field_begin(&TFieldIdentifier::new("tags", TType::Map, 6))?; + o_prot.write_map_begin(&TMapIdentifier::new(TType::String, TType::String, self.tags.len() as i32))?; + for (k, v) in &self.tags { + o_prot.write_string(k)?; + o_prot.write_string(v)?; + } + o_prot.write_map_end()?; + o_prot.write_field_end()?; + o_prot.write_field_stop()?; + o_prot.write_struct_end() + } +} + diff --git a/lib/src/simple_op_store_model.thrift b/lib/src/simple_op_store_model.thrift new file mode 100644 index 0000000000..fc5a6d99d1 --- /dev/null +++ b/lib/src/simple_op_store_model.thrift @@ -0,0 +1,81 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"), +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +struct RefConflict { + 1: required list removes, + 2: required list adds, +} + +union RefTarget { + 1: binary commit_id, + 2: RefConflict conflict, +} + +struct RemoteBranch { + 1: required string remote_name, + 2: required RefTarget target, +} + +struct Branch { + 1: required string name, + // Unset if the branch has been deleted locally. + 2: optional RefTarget local_target, + // TODO: How would we support renaming remotes while having undo work? If + // the remote name is stored in config, it's going to become a mess if the + // remote is renamed but the configs are left unchanged. Should each remote + // be identified (here and in configs) by a UUID? + 3: required list remote_branches, +} + +struct GitRef { + 1: required string name, + 2: required RefTarget target, +} + +struct Tag { + 1: required string name, + 2: required RefTarget target, +} + +struct View { + 1: required list head_ids, + 2: required list public_head_ids, + 3: required map wc_commit_ids, + 4: required list branches, + 5: required list tags, + // Only a subset of the refs. For example, does not include refs/notes/. + 6: required list git_refs, + 7: optional binary git_head, +} + +struct Operation { + 1: required binary view_id, + 2: required list parents, + 3: required OperationMetadata metadata, +} + +// TODO: Share with store.proto? Do we even need the timezone here? +struct Timestamp { + 1: required i64 millis_since_epoch, + 2: required i32 tz_offset, +} + +struct OperationMetadata { + 1: required Timestamp start_time, + 2: required Timestamp end_time, + 3: required string description, + 4: required string hostname, + 5: required string username, + 6: required map tags, +}