Skip to content

Commit

Permalink
Merge branch 'main' into sokra/trace-server-perf
Browse files Browse the repository at this point in the history
  • Loading branch information
sokra committed May 17, 2024
2 parents 447986b + a562144 commit 93bdec0
Show file tree
Hide file tree
Showing 131 changed files with 1,014 additions and 950 deletions.
404 changes: 194 additions & 210 deletions Cargo.lock

Large diffs are not rendered by default.

25 changes: 15 additions & 10 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,13 @@ turbopack-wasi = [
[workspace.lints.clippy]
too_many_arguments = "allow"

[profile.dev.package.turbo-tasks-macros]
opt-level = 3
# This crate is particularly sensitive to compiler optimizations
[profile.dev.package.turbo-tasks-memory]
opt-level = 1

# Set the options for dependencies (not crates in the workspace), this mostly impacts cold builds
[profile.dev.package."*"]
opt-level = 1

# Set the settings for build scripts and proc-macros.
[profile.dev.build-override]
Expand All @@ -103,17 +108,17 @@ async-recursion = "1.0.2"
# Keep consistent with preset_env_base through swc_core
browserslist-rs = { version = "0.15.0" }
miette = { version = "5.10.0", features = ["fancy"] }
mdxjs = "0.1.23"
modularize_imports = { version = "0.68.9" }
styled_components = { version = "0.96.8" }
styled_jsx = { version = "0.73.13" }
swc_core = { version = "0.90.33", features = [
mdxjs = "0.2.2"
modularize_imports = { version = "0.68.14" }
styled_components = { version = "0.96.15" }
styled_jsx = { version = "0.73.20" }
swc_core = { version = "0.92.5", features = [
"ecma_loader_lru",
"ecma_loader_parking_lot",
] }
swc_emotion = { version = "0.72.8" }
swc_relay = { version = "0.44.8" }
testing = { version = "0.35.23" }
swc_emotion = { version = "0.72.13" }
swc_relay = { version = "0.44.13" }
testing = { version = "0.35.24" }
# Temporary: Reference the latest git minor version of pathfinder_simd until it's published.
pathfinder_simd = "0.5.3"

Expand Down
34 changes: 10 additions & 24 deletions crates/turbo-tasks-malloc/src/counter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@ use std::{
sync::atomic::{AtomicUsize, Ordering},
};

use crate::AllocationCounters;

static ALLOCATED: AtomicUsize = AtomicUsize::new(0);
const KB: usize = 1024;
/// When global counter is updates we will keep a thread-local buffer of this
Expand All @@ -13,37 +15,20 @@ const TARGET_BUFFER: usize = 100 * KB;
/// global counter.
const MAX_BUFFER: usize = 200 * KB;

#[derive(Default)]
pub struct AllocationInfo {
pub allocations: usize,
pub deallocations: usize,
pub allocation_count: usize,
pub deallocation_count: usize,
}

impl AllocationInfo {
pub fn is_empty(&self) -> bool {
self.allocations == 0
&& self.deallocations == 0
&& self.allocation_count == 0
&& self.deallocation_count == 0
}
}

#[derive(Default)]
struct ThreadLocalCounter {
/// Thread-local buffer of allocated bytes that have been added to the
/// global counter desprite not being allocated yet. It is unsigned so that
/// means the global counter is always equal or greater than the real
/// value.
buffer: usize,
allocation_info: AllocationInfo,
allocation_counters: AllocationCounters,
}

impl ThreadLocalCounter {
fn add(&mut self, size: usize) {
self.allocation_info.allocations += size;
self.allocation_info.allocation_count += 1;
self.allocation_counters.allocations += size;
self.allocation_counters.allocation_count += 1;
if self.buffer >= size {
self.buffer -= size;
} else {
Expand All @@ -54,8 +39,8 @@ impl ThreadLocalCounter {
}

fn remove(&mut self, size: usize) {
self.allocation_info.deallocations += size;
self.allocation_info.deallocation_count += 1;
self.allocation_counters.deallocations += size;
self.allocation_counters.deallocation_count += 1;
self.buffer += size;
if self.buffer > MAX_BUFFER {
let offset = self.buffer - TARGET_BUFFER;
Expand All @@ -69,6 +54,7 @@ impl ThreadLocalCounter {
ALLOCATED.fetch_sub(self.buffer, Ordering::Relaxed);
self.buffer = 0;
}
self.allocation_counters = AllocationCounters::default();
}
}

Expand All @@ -80,8 +66,8 @@ pub fn get() -> usize {
ALLOCATED.load(Ordering::Relaxed)
}

pub fn pop_allocations() -> AllocationInfo {
with_local_counter(|local| std::mem::take(&mut local.allocation_info))
pub fn allocation_counters() -> AllocationCounters {
with_local_counter(|local| local.allocation_counters.clone())
}

fn with_local_counter<T>(f: impl FnOnce(&mut ThreadLocalCounter) -> T) -> T {
Expand Down
47 changes: 44 additions & 3 deletions crates/turbo-tasks-malloc/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,50 @@
mod counter;

use std::alloc::{GlobalAlloc, Layout};
use std::{
alloc::{GlobalAlloc, Layout},
marker::PhantomData,
};

use self::counter::{add, flush, get, remove};

#[derive(Default, Clone, Debug)]
pub struct AllocationInfo {
pub allocations: usize,
pub deallocations: usize,
pub allocation_count: usize,
pub deallocation_count: usize,
}

impl AllocationInfo {
pub fn is_empty(&self) -> bool {
self.allocations == 0
&& self.deallocations == 0
&& self.allocation_count == 0
&& self.deallocation_count == 0
}
}

#[derive(Default, Clone, Debug)]
pub struct AllocationCounters {
pub allocations: usize,
pub deallocations: usize,
pub allocation_count: usize,
pub deallocation_count: usize,
_not_send: PhantomData<*mut ()>,
}

impl AllocationCounters {
pub fn until_now(&self) -> AllocationInfo {
let new = TurboMalloc::allocation_counters();
AllocationInfo {
allocations: new.allocations - self.allocations,
deallocations: new.deallocations - self.deallocations,
allocation_count: new.allocation_count - self.allocation_count,
deallocation_count: new.deallocation_count - self.deallocation_count,
}
}
}

/// Turbo's preferred global allocator. This is a new type instead of a type
/// alias because you can't use type aliases to instantiate unit types (E0423).
pub struct TurboMalloc;
Expand All @@ -17,8 +58,8 @@ impl TurboMalloc {
flush();
}

pub fn pop_allocations() -> self::counter::AllocationInfo {
self::counter::pop_allocations()
pub fn allocation_counters() -> AllocationCounters {
self::counter::allocation_counters()
}
}

Expand Down
19 changes: 10 additions & 9 deletions crates/turbo-tasks-memory/src/memory_backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ impl MemoryBackend {
let id = self.backend_job_id_factory.get();
// SAFETY: This is a fresh id
unsafe {
self.backend_jobs.insert(*id, job);
self.backend_jobs.insert(*id as usize, job);
}
id
}
Expand All @@ -110,12 +110,12 @@ impl MemoryBackend {

#[inline(always)]
pub fn with_task<T>(&self, id: TaskId, func: impl FnOnce(&Task) -> T) -> T {
func(self.memory_tasks.get(*id).unwrap())
func(self.memory_tasks.get(*id as usize).unwrap())
}

#[inline(always)]
pub fn task(&self, id: TaskId) -> &Task {
self.memory_tasks.get(*id).unwrap()
self.memory_tasks.get(*id as usize).unwrap()
}

pub fn on_task_might_become_inactive(&self, task: TaskId) {
Expand Down Expand Up @@ -179,7 +179,7 @@ impl MemoryBackend {
) -> TaskId {
let new_id = new_id.into();
// Safety: We have a fresh task id that nobody knows about yet
unsafe { self.memory_tasks.insert(*new_id, task) };
unsafe { self.memory_tasks.insert(*new_id as usize, task) };
let result_task = match task_cache.entry(key) {
Entry::Vacant(entry) => {
// This is the most likely case
Expand All @@ -191,7 +191,7 @@ impl MemoryBackend {
let task_id = *entry.get();
drop(entry);
unsafe {
self.memory_tasks.remove(*new_id);
self.memory_tasks.remove(*new_id as usize);
let new_id = Unused::new_unchecked(new_id);
turbo_tasks.reuse_task_id(new_id);
}
Expand Down Expand Up @@ -321,11 +321,12 @@ impl Backend for MemoryBackend {
task_id: TaskId,
duration: Duration,
instant: Instant,
memory_usage: usize,
stateful: bool,
turbo_tasks: &dyn TurboTasksBackendApi<MemoryBackend>,
) -> bool {
let reexecute = self.with_task(task_id, |task| {
task.execution_completed(duration, instant, stateful, self, turbo_tasks)
task.execution_completed(duration, instant, memory_usage, stateful, self, turbo_tasks)
});
if !reexecute {
self.run_gc(false, turbo_tasks);
Expand Down Expand Up @@ -495,7 +496,7 @@ impl Backend for MemoryBackend {
turbo_tasks: &'a dyn TurboTasksBackendApi<MemoryBackend>,
) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
// SAFETY: id will not be reused until with job is done
if let Some(job) = unsafe { self.backend_jobs.take(*id) } {
if let Some(job) = unsafe { self.backend_jobs.take(*id as usize) } {
Box::pin(async move {
job.run(self, turbo_tasks).await;
// SAFETY: This id will no longer be used
Expand Down Expand Up @@ -573,13 +574,13 @@ impl Backend for MemoryBackend {
TransientTaskType::Root(f) => {
let task = Task::new_root(id, move || f() as _, stats_type);
// SAFETY: We have a fresh task id where nobody knows about yet
unsafe { self.memory_tasks.insert(*id, task) };
unsafe { self.memory_tasks.insert(*id as usize, task) };
Task::set_root(id, self, turbo_tasks);
}
TransientTaskType::Once(f) => {
let task = Task::new_once(id, f, stats_type);
// SAFETY: We have a fresh task id where nobody knows about yet
unsafe { self.memory_tasks.insert(*id, task) };
unsafe { self.memory_tasks.insert(*id as usize, task) };
Task::set_once(id, self, turbo_tasks);
}
};
Expand Down
34 changes: 18 additions & 16 deletions crates/turbo-tasks-memory/src/memory_backend_with_pg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ impl<P: PersistedGraph> MemoryBackendWithPersistedGraph<P> {
task: TaskId,
turbo_tasks: &dyn TurboTasksBackendApi<MemoryBackendWithPersistedGraph<P>>,
) -> (MutexGuard<'_, TaskState>, &Task) {
let task_info = self.tasks.get(*task).unwrap();
let task_info = self.tasks.get(*task as usize).unwrap();
let mut state = task_info.task_state.lock().unwrap();
self.ensure_task_initialized(task, task_info, &mut state, turbo_tasks);
(state, task_info)
Expand All @@ -195,7 +195,7 @@ impl<P: PersistedGraph> MemoryBackendWithPersistedGraph<P> {
task: TaskId,
turbo_tasks: &dyn TurboTasksBackendApi<MemoryBackendWithPersistedGraph<P>>,
) -> (MutexGuard<'_, TaskState>, &Task) {
let task_info = self.tasks.get(*task).unwrap();
let task_info = self.tasks.get(*task as usize).unwrap();
loop {
let mut delayed_activate = Vec::new();
let mut state = task_info.task_state.lock().unwrap();
Expand Down Expand Up @@ -391,7 +391,7 @@ impl<P: PersistedGraph> MemoryBackendWithPersistedGraph<P> {
let id = self.background_job_id_factory.get();
// SAFETY: It's a fresh id
unsafe {
self.background_jobs.insert(*id, job);
self.background_jobs.insert(*id as usize, job);
}
turbo_tasks.schedule_backend_background_job(id);
}
Expand Down Expand Up @@ -481,7 +481,7 @@ impl<P: PersistedGraph> MemoryBackendWithPersistedGraph<P> {
delayed_activate: &mut Vec<TaskId>,
turbo_tasks: &dyn TurboTasksBackendApi<MemoryBackendWithPersistedGraph<P>>,
) {
let task_info = self.tasks.get(*task).unwrap();
let task_info = self.tasks.get(*task as usize).unwrap();
let prev = task_info.active_parents.fetch_add(by, Ordering::Relaxed);
if prev == 0 {
// only the connect() call that increases from 0 is responsible for activating
Expand Down Expand Up @@ -633,7 +633,7 @@ impl<P: PersistedGraph> MemoryBackendWithPersistedGraph<P> {
delayed_deactivate: &mut Vec<TaskId>,
turbo_tasks: &dyn TurboTasksBackendApi<MemoryBackendWithPersistedGraph<P>>,
) {
let task_info = self.tasks.get(*task).unwrap();
let task_info = self.tasks.get(*task as usize).unwrap();
let prev = task_info.active_parents.fetch_sub(by, Ordering::Relaxed);
if prev == by {
// count reached zero
Expand Down Expand Up @@ -1048,7 +1048,7 @@ impl<P: PersistedGraph> Backend for MemoryBackendWithPersistedGraph<P> {
}

fn get_task_description(&self, task: TaskId) -> String {
let task_info = self.tasks.get(*task).unwrap();
let task_info = self.tasks.get(*task as usize).unwrap();
format!("{:?}", task_info.task_type)
}

Expand Down Expand Up @@ -1154,6 +1154,7 @@ impl<P: PersistedGraph> Backend for MemoryBackendWithPersistedGraph<P> {
task: TaskId,
duration: Duration,
_instant: Instant,
_memory_usage: usize,
_stateful: bool,
turbo_tasks: &dyn TurboTasksBackendApi<MemoryBackendWithPersistedGraph<P>>,
) -> bool {
Expand Down Expand Up @@ -1202,10 +1203,11 @@ impl<P: PersistedGraph> Backend for MemoryBackendWithPersistedGraph<P> {
}
if has_changes || is_dirty_persisted {
self.need_persisting.insert(task);
self.persist_queue_by_duration[*task % self.persist_queue_by_duration.len()]
.lock()
.unwrap()
.push((duration, task));
self.persist_queue_by_duration
[*task as usize % self.persist_queue_by_duration.len()]
.lock()
.unwrap()
.push((duration, task));
self.increase_persist_workers(1, turbo_tasks);
}
}
Expand Down Expand Up @@ -1234,7 +1236,7 @@ impl<P: PersistedGraph> Backend for MemoryBackendWithPersistedGraph<P> {
});
}
// SAFETY: We are the only owner of this id
let job = unsafe { self.background_jobs.take(*id) };
let job = unsafe { self.background_jobs.take(*id as usize) };
unsafe {
self.background_job_id_factory.reuse(id);
}
Expand Down Expand Up @@ -1543,14 +1545,14 @@ impl<P: PersistedGraph> Backend for MemoryBackendWithPersistedGraph<P> {
};
// SAFETY: It's a fresh task id
unsafe {
self.tasks.insert(*task, new_task);
self.tasks.insert(*task as usize, new_task);
}
match self.cache.entry(task_type) {
Entry::Occupied(e) => {
let existing_task = *e.into_ref();
// SAFETY: We are still the only owner of this task and id
unsafe {
self.tasks.remove(*task);
self.tasks.remove(*task as usize);
let task = Unused::new_unchecked(task);
turbo_tasks.reuse_task_id(task);
}
Expand Down Expand Up @@ -1601,7 +1603,7 @@ impl<P: PersistedGraph> Backend for MemoryBackendWithPersistedGraph<P> {
};
// SAFETY: It's a fresh task id
unsafe {
self.tasks.insert(*task, new_task);
self.tasks.insert(*task as usize, new_task);
}
self.only_known_to_memory_tasks.insert(task);
task
Expand Down Expand Up @@ -1631,7 +1633,7 @@ impl<'a, P: PersistedGraph> PersistedGraphApi for MemoryBackendPersistedGraphApi
let task = task.into();
// SAFETY: It's a fresh task id
unsafe {
self.backend.tasks.insert(*task, new_task);
self.backend.tasks.insert(*task as usize, new_task);
}
match cache.entry(task_type) {
Entry::Occupied(e) => {
Expand All @@ -1651,7 +1653,7 @@ impl<'a, P: PersistedGraph> PersistedGraphApi for MemoryBackendPersistedGraphApi
}

fn lookup_task_type(&self, id: TaskId) -> &PersistentTaskType {
let task = self.backend.tasks.get(*id).unwrap();
let task = self.backend.tasks.get(*id as usize).unwrap();
match &task.task_type {
TaskType::Persistent(ty) => ty,
_ => panic!("lookup_task_type should only be used for PersistentTaskType"),
Expand Down
Loading

0 comments on commit 93bdec0

Please sign in to comment.