diff --git a/components/test_util/src/lib.rs b/components/test_util/src/lib.rs index 2c61ca579dc..bf132eba257 100644 --- a/components/test_util/src/lib.rs +++ b/components/test_util/src/lib.rs @@ -16,7 +16,6 @@ extern crate test; extern crate rand; -#[macro_use] extern crate slog; extern crate slog_scope; extern crate time; @@ -37,7 +36,7 @@ pub use security::*; pub fn setup_for_ci() { let guard = if env::var("CI").is_ok() && env::var("LOG_FILE").is_ok() { - Some(logging::init_log()) + Some(logging::init_log_for_test()) } else { None }; diff --git a/components/test_util/src/logging.rs b/components/test_util/src/logging.rs index b29891dbc02..c628bb854bf 100644 --- a/components/test_util/src/logging.rs +++ b/components/test_util/src/logging.rs @@ -67,7 +67,7 @@ impl Drop for CaseTraceLogger { } // A help function to initial logger. -pub fn init_log() -> GlobalLoggerGuard { +pub fn init_log_for_test() -> GlobalLoggerGuard { let output = env::var("LOG_FILE").ok(); let level = tikv::util::logger::get_level_by_string( &env::var("LOG_LEVEL").unwrap_or_else(|_| "debug".to_owned()), @@ -75,14 +75,32 @@ pub fn init_log() -> GlobalLoggerGuard { let writer = output.map(|f| Mutex::new(File::create(f).unwrap())); // we don't mind set it multiple times. let drain = CaseTraceLogger { f: writer }; + + // Collects following targes. + const ENABLED_TARGETS: &[&str] = &[ + "tikv::", + "tests::", + "benches::", + "integrations::", + "failpoints::", + "raft::", + // Collects logs for test components. + "test_", + ]; + let filtered = drain.filter(|record| { + ENABLED_TARGETS + .iter() + .any(|target| record.module().starts_with(target)) + }); + // CaseTraceLogger relies on test's thread name, however slog_async has // its own thread, and the name is "". // TODO: Enable the slog_async when the [Custom test frameworks][1] is mature, // and hook the slog_async logger to every test cases. // // [1]: https://github.com/rust-lang/rfcs/blob/master/text/2318-custom-test-frameworks.md - // - // let drain = slog_async::Async::new(drain).build().fuse(); - let logger = slog::Logger::root_typed(drain, slog_o!()); - tikv::util::logger::init_log_for_tikv_only(logger, level).unwrap() + tikv::util::logger::init_log( + filtered, level, false, // disable async drainer + true, // init std log + ).unwrap() } diff --git a/src/bin/tikv-ctl.rs b/src/bin/tikv-ctl.rs index 034ac1bc89b..f413eff0b52 100644 --- a/src/bin/tikv-ctl.rs +++ b/src/bin/tikv-ctl.rs @@ -22,24 +22,22 @@ extern crate kvproto; extern crate libc; #[macro_use] extern crate log; -extern crate protobuf; -extern crate raft; -extern crate rocksdb; -#[macro_use] -extern crate tikv; -extern crate toml; -#[macro_use(slog_o, slog_kv)] -extern crate slog; extern crate hex; #[cfg(unix)] extern crate nix; +extern crate protobuf; +extern crate raft; extern crate rand; +extern crate rocksdb; #[cfg(unix)] extern crate signal; +extern crate slog; extern crate slog_async; extern crate slog_scope; extern crate slog_stdlog; extern crate slog_term; +extern crate tikv; +extern crate toml; mod util; diff --git a/src/bin/tikv-importer.rs b/src/bin/tikv-importer.rs index aafc7b5fd9d..c3c4c76bab8 100644 --- a/src/bin/tikv-importer.rs +++ b/src/bin/tikv-importer.rs @@ -22,8 +22,6 @@ extern crate jemallocator; extern crate libc; #[macro_use] extern crate log; -#[macro_use(slog_o, slog_kv)] -extern crate slog; #[cfg(unix)] extern crate nix; extern crate prometheus; @@ -31,11 +29,11 @@ extern crate rocksdb; extern crate serde_json; #[cfg(unix)] extern crate signal; +extern crate slog; extern crate slog_async; extern crate slog_scope; extern crate slog_stdlog; extern crate slog_term; -#[macro_use] extern crate tikv; extern crate toml; @@ -100,7 +98,7 @@ fn main() { .get_matches(); let config = setup_config(&matches); - let guard = init_log(&config); + let guard = initial_logger(&config); tikv_util::set_exit_hook(false, Some(guard), &config.storage.data_dir); initial_metric(&config.metric, None); diff --git a/src/bin/tikv-server.rs b/src/bin/tikv-server.rs index 94c65559b76..c8b6d68ad72 100644 --- a/src/bin/tikv-server.rs +++ b/src/bin/tikv-server.rs @@ -22,21 +22,19 @@ extern crate jemallocator; extern crate libc; #[macro_use] extern crate log; -#[macro_use(slog_o, slog_kv)] -extern crate slog; +extern crate hyper; #[cfg(unix)] extern crate nix; extern crate rocksdb; extern crate serde_json; #[cfg(unix)] extern crate signal; +extern crate slog; extern crate slog_async; extern crate slog_scope; extern crate slog_stdlog; extern crate slog_term; -#[macro_use] extern crate tikv; -extern crate hyper; extern crate toml; #[cfg(unix)] @@ -409,8 +407,8 @@ fn main() { // Sets the global logger ASAP. // It is okay to use the config w/o `validata()`, - // because `init_log()` handles various conditions. - let guard = init_log(&config); + // because `initial_logger()` handles various conditions. + let guard = initial_logger(&config); tikv_util::set_exit_hook(false, Some(guard), &config.storage.data_dir); // Print version information. diff --git a/src/bin/util/setup.rs b/src/bin/util/setup.rs index 2128ed7f245..3fc141aad7f 100644 --- a/src/bin/util/setup.rs +++ b/src/bin/util/setup.rs @@ -12,31 +12,19 @@ // limitations under the License. use std::env; -use std::io::BufWriter; use std::process; use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT}; use chrono; use clap::ArgMatches; -use slog::{Drain, Logger}; -use slog_async::{Async, OverflowStrategy}; use slog_scope::GlobalLoggerGuard; -use slog_term::{PlainDecorator, TermDecorator}; use tikv::config::{MetricConfig, TiKvConfig}; -use tikv::util; use tikv::util::collections::HashMap; -use tikv::util::file_log::RotatingFileLogger; -use tikv::util::logger; +use tikv::util::{self, logger}; // A workaround for checking if log is initialized. pub static LOG_INITIALIZED: AtomicBool = ATOMIC_BOOL_INIT; -// Default is 128. -// Extended since blocking is set, and we don't want to block very often. -const SLOG_CHANNEL_SIZE: usize = 10240; -// Default is DropAndReport. -// It is not desirable to have dropped logs in our use case. -const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block; macro_rules! fatal { ($lvl:expr, $($arg:tt)+) => ({ @@ -50,43 +38,27 @@ macro_rules! fatal { } #[allow(dead_code)] -pub fn init_log(config: &TiKvConfig) -> GlobalLoggerGuard { +pub fn initial_logger(config: &TiKvConfig) -> GlobalLoggerGuard { let log_rotation_timespan = chrono::Duration::from_std( config.log_rotation_timespan.clone().into(), ).expect("config.log_rotation_timespan is an invalid duration."); let guard = if config.log_file.is_empty() { - let decorator = TermDecorator::new().build(); - let drain = logger::TikvFormat::new(decorator).fuse(); - let drain = Async::new(drain) - .chan_size(SLOG_CHANNEL_SIZE) - .overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY) - .thread_name(thd_name!("term-slogger")) - .build() - .fuse(); - let logger = Logger::root_typed(drain, slog_o!()); - logger::init_log(logger, config.log_level).unwrap_or_else(|e| { + let drainer = logger::term_drainer(); + // use async drainer and init std log. + logger::init_log(drainer, config.log_level, true, true).unwrap_or_else(|e| { fatal!("failed to initialize log: {:?}", e); }) } else { - let logger = BufWriter::new( - RotatingFileLogger::new(&config.log_file, log_rotation_timespan).unwrap_or_else(|e| { + let drainer = + logger::file_drainer(&config.log_file, log_rotation_timespan).unwrap_or_else(|e| { fatal!( "failed to initialize log with file {:?}: {:?}", config.log_file, e ); - }), - ); - let decorator = PlainDecorator::new(logger); - let drain = logger::TikvFormat::new(decorator).fuse(); - let drain = Async::new(drain) - .chan_size(SLOG_CHANNEL_SIZE) - .overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY) - .thread_name(thd_name!("file-slogger")) - .build() - .fuse(); - let logger = Logger::root_typed(drain, slog_o!()); - logger::init_log(logger, config.log_level).unwrap_or_else(|e| { + }); + // use async drainer and init std log. + logger::init_log(drainer, config.log_level, true, true).unwrap_or_else(|e| { fatal!("failed to initialize log: {:?}", e); }) }; diff --git a/src/util/file_log.rs b/src/util/logger/file_log.rs similarity index 100% rename from src/util/file_log.rs rename to src/util/logger/file_log.rs diff --git a/src/util/logger.rs b/src/util/logger/mod.rs similarity index 81% rename from src/util/logger.rs rename to src/util/logger/mod.rs index c5a4eeb45a2..e3152a81ade 100644 --- a/src/util/logger.rs +++ b/src/util/logger/mod.rs @@ -11,64 +11,87 @@ // See the License for the specific language governing permissions and // limitations under the License. +mod file_log; + use std::fmt; -use std::io::{self, Write}; -use std::panic::{RefUnwindSafe, UnwindSafe}; +use std::io::{self, BufWriter, Write}; use std::path::Path; +use std::sync::Mutex; -use chrono; +use chrono::{self, Duration}; use grpc; -use log; -use log::SetLoggerError; +use log::{self, SetLoggerError}; use slog::{self, Drain, Key, OwnedKVList, Record, KV}; +use slog_async::{Async, OverflowStrategy}; use slog_scope::{self, GlobalLoggerGuard}; use slog_stdlog; -use slog_term::{Decorator, RecordDecorator}; +use slog_term::{Decorator, PlainDecorator, RecordDecorator, TermDecorator}; + +use self::file_log::RotatingFileLogger; pub use slog::Level; +// Default is 128. +// Extended since blocking is set, and we don't want to block very often. +const SLOG_CHANNEL_SIZE: usize = 10240; +// Default is DropAndReport. +// It is not desirable to have dropped logs in our use case. +const SLOG_CHANNEL_OVERFLOW_STRATEGY: OverflowStrategy = OverflowStrategy::Block; const TIMESTAMP_FORMAT: &str = "%Y/%m/%d %H:%M:%S%.3f"; -const ENABLED_TARGETS: &[&str] = &[ - "tikv::", - "tests::", - "benches::", - "integrations::", - "failpoints::", - "raft::", - // Collects logs of components. - "test_", -]; - -pub fn init_log(drain: D, level: Level) -> Result + +pub fn init_log( + drain: D, + level: Level, + use_async: bool, + init_stdlog: bool, +) -> Result where - D: Drain + Send + Sync + 'static + RefUnwindSafe + UnwindSafe, - ::Err: ::std::fmt::Debug, + D: Drain + Send + 'static, + ::Err: ::std::fmt::Debug, { grpc::redirect_log(); - let drain = drain.filter_level(level).fuse(); - - let logger = slog::Logger::root(drain, slog_o!()); + let logger = if use_async { + let drain = Async::new(drain.fuse()) + .chan_size(SLOG_CHANNEL_SIZE) + .overflow_strategy(SLOG_CHANNEL_OVERFLOW_STRATEGY) + .thread_name(thd_name!("slogger")) + .build() + .fuse(); + slog::Logger::root(drain, slog_o!()) + } else { + let drain = Mutex::new(drain).fuse(); + slog::Logger::root(drain, slog_o!()) + }; let guard = slog_scope::set_global_logger(logger); - slog_stdlog::init_with_level(convert_slog_level_to_log_level(level))?; + if init_stdlog { + slog_stdlog::init_with_level(convert_slog_level_to_log_level(level))?; + } + Ok(guard) } -pub fn init_log_for_tikv_only( - drain: D, - level: Level, -) -> Result -where - D: Drain + Send + Sync + 'static + RefUnwindSafe + UnwindSafe, - ::Err: ::std::fmt::Debug, -{ - let filtered = drain.filter(|record| { - ENABLED_TARGETS - .iter() - .any(|target| record.module().starts_with(target)) - }); - init_log(filtered, level) +/// A simple alias to `PlainDecorator>`. +// Avoid clippy type_complexity lint. +pub type RotatingFileDecorator = PlainDecorator>; + +/// Constructs a new file drainer which outputs log to a file at the specified +/// path. The file drainer rotates for the specified timespan. +pub fn file_drainer( + path: impl AsRef, + rotation_timespan: Duration, +) -> io::Result> { + let logger = BufWriter::new(RotatingFileLogger::new(path, rotation_timespan)?); + let decorator = PlainDecorator::new(logger); + let drain = TikvFormat::new(decorator); + Ok(drain) +} + +/// Constructs a new terminal drainer which outputs logs to stderr. +pub fn term_drainer() -> TikvFormat { + let decorator = TermDecorator::new().build(); + TikvFormat::new(decorator) } pub fn get_level_by_string(lv: &str) -> Option { @@ -367,7 +390,6 @@ impl<'a> slog::ser::Serializer for Serializer<'a> { #[test] fn test_log_format() { use chrono::{TimeZone, Utc}; - use slog::Logger; use slog_term::PlainSyncDecorator; use std::cell::RefCell; use std::io::Write; @@ -392,7 +414,7 @@ fn test_log_format() { // Make the log let decorator = PlainSyncDecorator::new(TestWriter); let drain = TikvFormat::new(decorator).fuse(); - let logger = Logger::root_typed(drain, slog_o!()); + let logger = slog::Logger::root_typed(drain, slog_o!()); slog_crit!(logger, "test"); // Check the logged value. @@ -401,10 +423,15 @@ fn test_log_format() { let output = from_utf8(&*buffer).unwrap(); // This functions roughly as an assert to make sure that the log level and file name is logged. - let mut split_iter = output.split(" CRIT logger.rs:"); + let mut split_iter = output.split(" CRIT mod.rs:"); // The pre-split portion will contain a timestamp which we can check by parsing and ensuring it is valid. let datetime = split_iter.next().unwrap(); - assert!(Utc.datetime_from_str(datetime, TIMESTAMP_FORMAT).is_ok()); + assert!( + Utc.datetime_from_str(datetime, TIMESTAMP_FORMAT).is_ok(), + "{:?} | {:?}", + output, + datetime + ); // The post-split portion will contain the line number of the file (which we validate is a number), and then the log message. let line_and_message = split_iter.next().unwrap(); let mut split_iter = line_and_message.split(": "); diff --git a/src/util/mod.rs b/src/util/mod.rs index de13a931278..42f3290352c 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -32,7 +32,6 @@ pub mod codec; pub mod collections; pub mod config; pub mod file; -pub mod file_log; pub mod future; pub mod futurepool; pub mod io_limiter; diff --git a/tests/failpoints/cases/test_merge.rs b/tests/failpoints/cases/test_merge.rs index 9574d9900a2..4e085153610 100644 --- a/tests/failpoints/cases/test_merge.rs +++ b/tests/failpoints/cases/test_merge.rs @@ -121,7 +121,6 @@ fn test_node_merge_rollback() { #[test] fn test_node_merge_restart() { let _guard = ::setup(); - // ::util::init_log(); let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.run(); diff --git a/tests/integrations/raftstore/test_merge.rs b/tests/integrations/raftstore/test_merge.rs index d36f8cff9d3..d8b4d1dd606 100644 --- a/tests/integrations/raftstore/test_merge.rs +++ b/tests/integrations/raftstore/test_merge.rs @@ -116,7 +116,6 @@ fn test_node_base_merge() { /// Test whether merge will be aborted if prerequisites is not met. #[test] fn test_node_merge_prerequisites_check() { - // ::init_log(); let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); let pd_client = Arc::clone(&cluster.pd_client); @@ -186,7 +185,6 @@ fn test_node_merge_prerequisites_check() { /// Test if stale peer will be handled properly after merge. #[test] fn test_node_check_merged_message() { - // ::init_log(); let mut cluster = new_node_cluster(0, 4); configure_for_merge(&mut cluster); let pd_client = Arc::clone(&cluster.pd_client); @@ -345,7 +343,6 @@ fn test_node_merge_slow_split(is_right_derive: bool) { /// Test various cases that a store is isolated during merge. #[test] fn test_node_merge_dist_isolation() { - // ::init_log(); let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); let pd_client = Arc::clone(&cluster.pd_client); @@ -421,7 +418,6 @@ fn test_node_merge_dist_isolation() { /// way behind others so others have to send it a snapshot. #[test] fn test_node_merge_brain_split() { - // ::init_log(); let mut cluster = new_node_cluster(0, 3); configure_for_merge(&mut cluster); cluster.cfg.raft_store.raft_log_gc_threshold = 12;