Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
183 changes: 92 additions & 91 deletions turbopack/crates/turbo-tasks-backend/src/backend/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ use crate::backend::operation::TaskDirtyCause;
use crate::{
backend::{
operation::{
AggregatedDataUpdate, AggregationUpdateJob, AggregationUpdateQueue,
CleanupOldEdgesOperation, ConnectChildOperation, ExecuteContext, ExecuteContextImpl,
AggregationUpdateJob, AggregationUpdateQueue, CleanupOldEdgesOperation,
ComputeDirtyAndCleanUpdate, ConnectChildOperation, ExecuteContext, ExecuteContextImpl,
Operation, OutdatedEdge, TaskGuard, connect_children, get_aggregation_number,
get_uppers, is_root_node, make_task_dirty_internal, prepare_new_children,
},
Expand Down Expand Up @@ -560,11 +560,8 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
let is_dirty = task.is_dirty(self.session_id);

// Check the dirty count of the root node
let dirty_tasks = get!(task, AggregatedDirtyContainerCount)
.cloned()
.unwrap_or_default()
.get(self.session_id);
if dirty_tasks > 0 || is_dirty {
let has_dirty_containers = task.has_dirty_containers(self.session_id);
if has_dirty_containers || is_dirty {
let activeness = get_mut!(task, Activeness);
let mut task_ids_to_schedule: Vec<_> = Vec::new();
// When there are dirty task, subscribe to the all_clean_event
Expand All @@ -582,14 +579,7 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
.set_active_until_clean();
if ctx.should_track_activeness() {
// A newly added Activeness need to make sure to schedule the tasks
task_ids_to_schedule = get_many!(
task,
AggregatedDirtyContainer {
task
} count if count.get(self.session_id) > 0 => {
task
}
);
task_ids_to_schedule = task.dirty_containers(self.session_id).collect();
task_ids_to_schedule.push(task_id);
}
get!(task, Activeness).unwrap()
Expand Down Expand Up @@ -634,10 +624,7 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
};

// Check the dirty count of the root node
let dirty_tasks = get!(task, AggregatedDirtyContainerCount)
.cloned()
.unwrap_or_default()
.get(ctx.session_id());
let has_dirty_containers = task.has_dirty_containers(ctx.session_id());

let task_description = ctx.get_task_description(task_id);
let is_dirty = if is_dirty { ", dirty" } else { "" };
Expand All @@ -650,24 +637,16 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
"{task_id} {task_description}{count} (aggr={aggregation_number}, \
{in_progress}, {activeness}{is_dirty})",
);
let children: Vec<_> = iter_many!(
task,
AggregatedDirtyContainer {
task
} count => {
(task, count.get(ctx.session_id()))
}
)
.filter(|(_, count)| *count > 0)
.collect();
let children: Vec<_> =
task.dirty_containers_with_count(ctx.session_id()).collect();
drop(task);

if missing_upper {
info.push_str("\n ERROR: missing upper connection");
}

if dirty_tasks > 0 || !children.is_empty() {
writeln!(info, "\n {dirty_tasks} dirty tasks:").unwrap();
if has_dirty_containers || !children.is_empty() {
writeln!(info, "\n dirty tasks:").unwrap();

for (child_task_id, count) in children {
let task_description = ctx.get_task_description(child_task_id);
Expand Down Expand Up @@ -2353,72 +2332,96 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
},
));

// Update the dirty state
let old_dirtyness = task.dirtyness_and_session();

let new_dirtyness = if session_dependent {
Some((Dirtyness::SessionDependent, Some(self.session_id)))
} else {
None
// Grab the old dirty state
let old_dirtyness = get!(task, Dirty).cloned();
let (was_dirty, was_current_session_clean, old_clean_in_session) = match old_dirtyness {
None => (false, false, None),
Some(Dirtyness::Dirty) => (true, false, None),
Some(Dirtyness::SessionDependent) => {
let clean_in_session = get!(task, CleanInSession).copied();
(
true,
clean_in_session == Some(self.session_id),
clean_in_session,
)
}
};
let old_dirty_value = if was_dirty { 1 } else { 0 };
let old_current_session_clean_value = if was_current_session_clean { 1 } else { 0 };

// Compute the new dirty state
let (new_dirtyness, new_clean_in_session, new_dirty_value, new_current_session_clean_value) =
if session_dependent {
(
Some(Dirtyness::SessionDependent),
Some(self.session_id),
1,
1,
)
} else {
(None, None, 0, 0)
};

let dirty_changed = old_dirtyness != new_dirtyness;
let data_update = if dirty_changed {
if let Some((value, _)) = new_dirtyness {
// Update the dirty state
if old_dirtyness != new_dirtyness {
if let Some(value) = new_dirtyness {
task.insert(CachedDataItem::Dirty { value });
} else if old_dirtyness.is_some() {
task.remove(&CachedDataItemKey::Dirty {});
}
if let Some(session_id) = new_dirtyness.and_then(|t| t.1) {
}
if old_clean_in_session != new_clean_in_session {
if let Some(session_id) = new_clean_in_session {
task.insert(CachedDataItem::CleanInSession { value: session_id });
} else if old_dirtyness.is_some_and(|t| t.1.is_some()) {
} else if old_clean_in_session.is_some() {
task.remove(&CachedDataItemKey::CleanInSession {});
}
}

let mut dirty_containers = get!(task, AggregatedDirtyContainerCount)
// Propagate dirtyness changes
let data_update = if old_dirty_value != new_dirty_value
|| old_current_session_clean_value != new_current_session_clean_value
{
let dirty_container_count = get!(task, AggregatedDirtyContainerCount)
.cloned()
.unwrap_or_default();
if let Some((old_dirtyness, old_clean_in_session)) = old_dirtyness {
dirty_containers
.update_with_dirtyness_and_session(old_dirtyness, old_clean_in_session);
}
let aggregated_update = match (old_dirtyness, new_dirtyness) {
(None, None) => unreachable!(),
(Some(old), None) => {
dirty_containers.undo_update_with_dirtyness_and_session(old.0, old.1)
}
(None, Some(new)) => {
dirty_containers.update_with_dirtyness_and_session(new.0, new.1)
}
(Some(old), Some(new)) => {
dirty_containers.replace_dirtyness_and_session(old.0, old.1, new.0, new.1)
}
};
if !aggregated_update.is_zero() {
if aggregated_update.get(self.session_id) < 0
&& let Some(activeness_state) = get_mut!(task, Activeness)
{
activeness_state.all_clean_event.notify(usize::MAX);
activeness_state.unset_active_until_clean();
if activeness_state.is_empty() {
task.remove(&CachedDataItemKey::Activeness {});
}
let current_session_clean_container_count = get!(
task,
AggregatedSessionDependentCleanContainerCount {
session_id: self.session_id
}
AggregationUpdateJob::data_update(
&mut task,
AggregatedDataUpdate::new().dirty_container_update(
task_id,
aggregated_update.count,
aggregated_update.current_session_clean(ctx.session_id()),
),
)
} else {
None
}
)
.copied()
.unwrap_or_default();
let result = ComputeDirtyAndCleanUpdate {
old_dirty_container_count: dirty_container_count,
new_dirty_container_count: dirty_container_count,
old_current_session_clean_container_count: current_session_clean_container_count,
new_current_session_clean_container_count: current_session_clean_container_count,
old_dirty_value,
new_dirty_value,
old_current_session_clean_value,
new_current_session_clean_value,
}
.compute();
result
.aggregated_update(task_id)
.and_then(|aggregated_update| {
AggregationUpdateJob::data_update(&mut task, aggregated_update)
})
} else {
None
};

if let Some(activeness_state) = get_mut!(task, Activeness) {
// The task is clean now
activeness_state.all_clean_event.notify(usize::MAX);
activeness_state.unset_active_until_clean();
if activeness_state.is_empty() {
task.remove(&CachedDataItemKey::Activeness {});
}
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The variable dirty_changed is used but never defined in the refactored code, causing a compilation error.

View Details
📝 Patch Details
diff --git a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs
index b91647b00b..9300183a68 100644
--- a/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs
+++ b/turbopack/crates/turbo-tasks-backend/src/backend/mod.rs
@@ -2376,6 +2376,8 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
             }
         }
 
+        #[cfg(feature = "verify_determinism")]
+        let dirty_changed = old_dirtyness != new_dirtyness;
         #[cfg(feature = "verify_determinism")]
         let reschedule = (dirty_changed || no_output_set) && !task_id.is_transient();
         #[cfg(not(feature = "verify_determinism"))]

Analysis

Missing dirty_changed variable definition causes compilation error with verify_determinism feature

What fails: The task_execution_completed_finish() method in turbopack/crates/turbo-tasks-backend/src/backend/mod.rs references an undefined variable dirty_changed on line 2380, causing a compilation error when the verify_determinism feature is enabled.

How to reproduce:

cd turbopack
cargo check --package turbo-tasks-backend --features verify_determinism

Result: Compilation fails with:

error[E0425]: cannot find value `dirty_changed` in this scope
    --> turbopack/crates/turbo-tasks-backend/src/backend/mod.rs:2380:27
     |
2380 |         let reschedule = (dirty_changed || no_output_set) && !task_id.is_transient();
     |                           ^^^^^^^^^^^^^ not found in this scope

Expected: The code compiles successfully. The variable dirty_changed should be defined as old_dirtyness != new_dirtyness to track whether the dirty state changed, which is the logical intent given that the code computes both old_dirtyness (line ~2282) and new_dirtyness (lines 2308-2318) and checks their equality on line 2320.

Fix: Added the missing variable definition guarded by the same #[cfg(feature = "verify_determinism")] attribute as its usage:

#[cfg(feature = "verify_determinism")]
let dirty_changed = old_dirtyness != new_dirtyness;

#[cfg(feature = "verify_determinism")]
let reschedule = (dirty_changed || no_output_set) && !task_id.is_transient();
#[cfg(not(feature = "verify_determinism"))]
Expand Down Expand Up @@ -2881,10 +2884,7 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
let mut ctx = self.execute_context(turbo_tasks);
let mut task = ctx.task(task_id, TaskDataCategory::All);
let is_dirty = task.is_dirty(self.session_id);
let has_dirty_containers = get!(task, AggregatedDirtyContainerCount)
.map_or(false, |dirty_containers| {
dirty_containers.get(self.session_id) > 0
});
let has_dirty_containers = task.has_dirty_containers(self.session_id);
if is_dirty || has_dirty_containers {
if let Some(activeness_state) = get_mut!(task, Activeness) {
// We will finish the task, but it would be removed after the task is done
Expand Down Expand Up @@ -2973,9 +2973,8 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {
}
}

let is_dirty = task.is_dirty(self.session_id);
let has_dirty_container = get!(task, AggregatedDirtyContainerCount)
.is_some_and(|count| count.get(self.session_id) > 0);
let is_dirty = get!(task, Dirty).is_some();
let has_dirty_container = task.has_dirty_containers(self.session_id);
let should_be_in_upper = is_dirty || has_dirty_container;

let aggregation_number = get_aggregation_number(&task);
Expand All @@ -2998,17 +2997,19 @@ impl<B: BackingStorage> TurboTasksBackendInner<B> {

if should_be_in_upper {
for upper_id in uppers {
let task = ctx.task(task_id, TaskDataCategory::All);
let task = ctx.task(upper_id, TaskDataCategory::All);
let in_upper = get!(task, AggregatedDirtyContainer { task: task_id })
.is_some_and(|dirty| dirty.get(self.session_id) > 0);
.is_some_and(|&dirty| dirty > 0);
if !in_upper {
let containers: Vec<_> = get_many!(task, AggregatedDirtyContainer { task: task_id } value => (task_id, *value));
panic!(
"Task {} ({}) is dirty, but is not listed in the upper task {} \
({})",
({})\nThese dirty containers are present:\n{:#?}",
task_id,
ctx.get_task_description(task_id),
upper_id,
ctx.get_task_description(upper_id)
ctx.get_task_description(upper_id),
containers,
);
}
}
Expand Down
Loading
Loading