Skip to content
This repository has been archived by the owner on Dec 29, 2022. It is now read-only.

Commit

Permalink
Keep the analysis queue in a context, rather than being global
Browse files Browse the repository at this point in the history
Closes #765
  • Loading branch information
nrc committed Mar 17, 2018
1 parent 2324103 commit 567fb6d
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 15 deletions.
7 changes: 6 additions & 1 deletion src/actions/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use url::Url;
use span;
use Span;

use actions::post_build::{BuildResults, PostBuildHandler};
use actions::post_build::{BuildResults, PostBuildHandler, AnalysisQueue};
use actions::progress::{BuildProgressNotifier, BuildDiagnosticsNotifier};
use build::*;
use lsp_data;
Expand Down Expand Up @@ -117,6 +117,8 @@ impl ActionContext {
pub struct InitActionContext {
analysis: Arc<AnalysisHost>,
vfs: Arc<Vfs>,
// Queues analysis jobs so that we don't over-use the CPU.
analysis_queue: Arc<AnalysisQueue>,

current_project: PathBuf,

Expand Down Expand Up @@ -173,8 +175,10 @@ impl InitActionContext {
current_project: PathBuf,
) -> InitActionContext {
let build_queue = BuildQueue::new(vfs.clone(), config.clone());
let analysis_queue = Arc::new(AnalysisQueue::init());
InitActionContext {
analysis,
analysis_queue,
vfs,
config,
current_project,
Expand Down Expand Up @@ -220,6 +224,7 @@ impl InitActionContext {
let config = self.config.lock().unwrap();
PostBuildHandler {
analysis: self.analysis.clone(),
analysis_queue: self.analysis_queue.clone(),
previous_build_results: self.previous_build_results.clone(),
project_path: project_path.to_owned(),
show_warnings: config.show_warnings,
Expand Down
52 changes: 38 additions & 14 deletions src/actions/post_build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
use std::collections::HashMap;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
use std::panic::RefUnwindSafe;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
Expand All @@ -35,6 +36,7 @@ pub type BuildResults = HashMap<PathBuf, Vec<(Diagnostic, Vec<Suggestion>)>>;

pub struct PostBuildHandler {
pub analysis: Arc<AnalysisHost>,
pub analysis_queue: Arc<AnalysisQueue>,
pub previous_build_results: Arc<Mutex<BuildResults>>,
pub project_path: PathBuf,
pub show_warnings: bool,
Expand All @@ -54,9 +56,10 @@ impl PostBuildHandler {

// Emit appropriate diagnostics using the ones from build.
self.handle_messages(&cwd, &messages);
let analysis_queue = self.analysis_queue.clone();

let job = Job::new(self, new_analysis, cwd);
ANALYSIS_QUEUE.enqueue(job);
analysis_queue.enqueue(job);
}
BuildResult::Squashed => {
trace!("build - Squashed");
Expand Down Expand Up @@ -172,22 +175,18 @@ impl PostBuildHandler {

// Queue up analysis tasks and execute them on the same thread (this is slower
// than executing in parallel, but allows us to skip indexing tasks).
struct AnalysisQueue {
pub struct AnalysisQueue {
// The cwd of the previous build.
// !!! Do not take this lock without holding the lock on `queue` !!!
cur_cwd: Mutex<Option<PathBuf>>,
queue: Arc<Mutex<Vec<Job>>>,
queue: Arc<Mutex<Vec<QueuedJob>>>,
// Handle to the worker thread where we handle analysis tasks.
worker_thread: Thread,
}

lazy_static! {
static ref ANALYSIS_QUEUE: AnalysisQueue = AnalysisQueue::init();
}

impl AnalysisQueue {
// Create a new queue and start the worker thread.
fn init() -> AnalysisQueue {
pub fn init() -> AnalysisQueue {
let queue = Arc::new(Mutex::new(Vec::new()));
let queue_clone = queue.clone();
let worker_thread = thread::spawn(move || AnalysisQueue::run_worker_thread(queue_clone))
Expand All @@ -212,18 +211,20 @@ impl AnalysisQueue {
// Remove any analysis jobs which this job obsoletes.
trace!("Pre-prune queue len: {}", queue.len());
if let Some(hash) = job.hash {
let mut squashed = queue.drain_filter(|j| j.hash == Some(hash));
squashed.for_each(|j| j.handler.finalise());
queue.drain_filter(|j| match *j {
QueuedJob::Job(ref j) if j.hash == Some(hash) => true,
_ => false,
}).for_each(|j| j.unwrap_job().handler.finalise())
}
trace!("Post-prune queue len: {}", queue.len());

queue.push(job);
queue.push(QueuedJob::Job(job));
}

self.worker_thread.unpark();
}

fn run_worker_thread(queue: Arc<Mutex<Vec<Job>>>) {
fn run_worker_thread(queue: Arc<Mutex<Vec<QueuedJob>>>) {
loop {
let job = {
let mut queue = queue.lock().unwrap();
Expand All @@ -234,13 +235,36 @@ impl AnalysisQueue {
}
};
match job {
Some(job) => job.process(),
Some(QueuedJob::Terminate) => return,
Some(QueuedJob::Job(job)) => job.process(),
None => thread::park(),
}
}
}
}

impl RefUnwindSafe for AnalysisQueue {}

impl Drop for AnalysisQueue {
fn drop(&mut self) {
let _ = self.queue.lock().map(|mut q| q.push(QueuedJob::Terminate));
}
}

enum QueuedJob {
Job(Job),
Terminate,
}

impl QueuedJob {
fn unwrap_job(self) -> Job {
match self {
QueuedJob::Job(job) => job,
QueuedJob::Terminate => panic!("Expected Job"),
}
}
}

// An analysis task to be queued and executed by `AnalysisQueue`.
struct Job {
handler: PostBuildHandler,
Expand Down Expand Up @@ -449,7 +473,7 @@ fn format_notes(children: &[CompilerMessage], primary: &DiagnosticSpan) -> Optio
notes.push_str(&format!("\n{}: {}", level, lines.next().unwrap()));
for line in lines {
notes.push_str(&format!(
"\n{:indent$line}",
"\n{:indent$}{line}",
"",
indent = level.len() + 2,
line = line,
Expand Down

0 comments on commit 567fb6d

Please sign in to comment.