Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: rsync logs files to local machine #39

Merged
merged 6 commits into from
Nov 21, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
86 changes: 54 additions & 32 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@ dotenv = "0.15.0"
env_logger = "0.10.0"
flate2 = "1.0"
fs_extra = "1.2.0"
futures = "~0.3.13"
log = "0.4"
indicatif = "0.17.3"
inquire = "0.6.2"
rand = "0.8.5"
rayon = "1.8.0"
regex = "1.9.5"
reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] }
sha2 = "0.10.7"
Expand Down
3 changes: 3 additions & 0 deletions resources/ansible/logs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
- name: fetch logs from remote machines
hosts: all
ignore_unreachable: yes
max_fail_percentage: 10
tasks:
# Due to the logs being continually written to, they must first be copied to a temp directory.
# Otherwise the `fetch` task will produce checksum errors.
Expand Down Expand Up @@ -43,6 +44,8 @@

- name: reorganise local log files
hosts: localhost
max_fail_percentage: 10
ignore_unreachable: yes
tasks:
- name: reorganise local log files
command: python3 ../scripts/reorganise_logs.py "{{ env_name }}"
13 changes: 13 additions & 0 deletions resources/scripts/copy_node_logs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/usr/bin/env bash

# Not used, pair it with logs rsync if we need some resiliency there.

# Clear tmpdir if it exists
if [ -d "tmpdir" ]; then
rm -r tmpdir/*
else
mkdir tmpdir
fi

# Find all .log files and copy them to tmpdir preserving the same directory structure
rsync -avm --include='*.log*' -f 'hide,! */' ~/.local/share/safe/node/ tmpdir/
84 changes: 47 additions & 37 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,25 +20,30 @@ pub mod terraform;
#[cfg(test)]
mod tests;

use crate::ansible::{AnsibleRunner, AnsibleRunnerInterface};
use crate::error::{Error, Result};
use crate::rpc_client::{RpcClient, RpcClientInterface};
use crate::s3::{S3Repository, S3RepositoryInterface};
use crate::ssh::{SshClient, SshClientInterface};
use crate::terraform::{TerraformRunner, TerraformRunnerInterface};
use crate::{
ansible::{AnsibleRunner, AnsibleRunnerInterface},
error::{Error, Result},
rpc_client::{RpcClient, RpcClientInterface},
s3::{S3Repository, S3RepositoryInterface},
ssh::{SshClient, SshClientInterface},
terraform::{TerraformRunner, TerraformRunnerInterface},
};
use flate2::read::GzDecoder;
use futures::future::join_all;
use indicatif::{ProgressBar, ProgressStyle};
use log::debug;
use rand::Rng;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::fs::File;
use std::io::{BufRead, BufReader, BufWriter, Write};
use std::net::{IpAddr, SocketAddr};
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::time::{Duration, Instant};
use std::{
fs::File,
io::{BufRead, BufReader, BufWriter, Write},
net::{IpAddr, SocketAddr},
os::unix::fs::PermissionsExt,
path::{Path, PathBuf},
process::{Command, Stdio},
time::{Duration, Instant},
};
use tar::Archive;

#[derive(Debug, Clone)]
Expand Down Expand Up @@ -712,34 +717,28 @@ impl TestnetDeploy {
// The scripts are relative to the `resources` directory, so we need to change the current
// working directory back to that location first.
std::env::set_current_dir(self.working_directory_path.clone())?;
let mut peers = Vec::new();
println!("Retrieving sample peers");
println!("Retrieving sample peers. This can take a minute.");
// Todo: RPC into nodes to fetch the multiaddr.
for batch in remaining_nodes_inventory.chunks(20) {
let mut handles = Vec::new();
for (_, ip_address) in batch {
let peers = remaining_nodes_inventory
.par_iter()
.filter_map(|(vm_name, ip_address)| {
let ip_address = *ip_address;
let ssh_client_clone = self.ssh_client.clone_box();
let handle = tokio::spawn(async move {
ssh_client_clone.run_script(
ip_address,
"safe",
PathBuf::from("scripts").join("get_peer_multiaddr.sh"),
true,
)
});
handles.push(handle);
}

for result in join_all(handles).await {
match result? {
Ok(output) => {
peers.extend(output);
match ssh_client_clone.run_script(
ip_address,
"safe",
PathBuf::from("scripts").join("get_peer_multiaddr.sh"),
true,
) {
Ok(output) => Some(output),
Err(err) => {
println!("Failed to SSH into {vm_name:?}: {ip_address} with err: {err:?}");
None
}
Err(err) => println!("Failed to SSH with err: {err:?}"),
}
}
}
})
.flatten()
.collect::<Vec<_>>();

// The VM list includes the genesis node and the build machine, hence the subtraction of 2
// from the total VM count. After that, add one node for genesis, since this machine only
Expand Down Expand Up @@ -1147,3 +1146,14 @@ fn print_duration(duration: Duration) {
let seconds = total_seconds % 60;
debug!("Time taken: {} minutes and {} seconds", minutes, seconds);
}

pub fn get_progress_bar(length: u64) -> Result<ProgressBar> {
let progress_bar = ProgressBar::new(length);
progress_bar.set_style(
ProgressStyle::default_bar()
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len}")?
.progress_chars("#>-"),
);
progress_bar.enable_steady_tick(Duration::from_millis(100));
Ok(progress_bar)
}