Skip to content

Commit

Permalink
Merge branch 'abk/run-802-reserve-cycles-in-upload-chunk' into 'master'
Browse files Browse the repository at this point in the history
RUN-802: Reserve cycles in `upload_chunk`

Make sure that cycles reservation is performed for the new memory used
when uploading to the chunk store. 

See merge request dfinity-lab/public/ic!15642
  • Loading branch information
adambratschikaye committed Nov 1, 2023
2 parents 8c8a973 + 10e1183 commit 1011aa4
Show file tree
Hide file tree
Showing 3 changed files with 147 additions and 11 deletions.
65 changes: 54 additions & 11 deletions rs/execution_environment/src/canister_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ use ic_interfaces::execution_environment::{
use ic_logger::{error, fatal, info, ReplicaLogger};
use ic_registry_provisional_whitelist::ProvisionalWhitelist;
use ic_registry_subnet_type::SubnetType;
use ic_replicated_state::canister_state::system_state::ReservationError;
use ic_replicated_state::{
canister_state::system_state::{
wasm_chunk_store::{self, WasmChunkStore},
Expand Down Expand Up @@ -1449,6 +1450,7 @@ impl CanisterManager {
chunk: &[u8],
subnet_available_memory: &mut SubnetAvailableMemory,
subnet_size: usize,
resource_saturation: &ResourceSaturation,
) -> Result<UploadChunkReply, CanisterManagerError> {
if self.config.wasm_chunk_store == FlagStatus::Disabled {
return Err(CanisterManagerError::WasmChunkStoreError {
Expand All @@ -1464,19 +1466,33 @@ impl CanisterManager {
.can_insert_chunk(chunk)
.map_err(|err| CanisterManagerError::WasmChunkStoreError { message: err })?;

let new_memory_usage = canister.memory_usage() + wasm_chunk_store::chunk_size();
let chunk_bytes = wasm_chunk_store::chunk_size();
let new_memory_usage = canister.memory_usage() + chunk_bytes;

match canister.memory_allocation() {
MemoryAllocation::Reserved(bytes) => {
if bytes < new_memory_usage {
return Err(CanisterManagerError::NotEnoughMemoryAllocationGiven {
memory_allocation_given: canister.memory_allocation(),
memory_usage_needed: canister.memory_usage()
+ wasm_chunk_store::chunk_size(),
memory_usage_needed: new_memory_usage,
});
}
}
MemoryAllocation::BestEffort => {
// Run the following checks on memory usage and return an error
// if any fails:
// 1. Check new usage will not freeze canister
// 2. Check subnet has available memory
// 3. Reserve cycles on canister
// 4. Actually deduct memory from subnet (asserting it won't fail)

// Calculate if any cycles will need to be reserved.
let reservation_cycles = self.cycles_account_manager.storage_reservation_cycles(
chunk_bytes,
resource_saturation,
subnet_size,
);

// Memory usage will increase by the chunk size, so we need to
// check that it doesn't bump us over the freezing threshold.
let threshold = self.cycles_account_manager.freeze_threshold_cycles(
Expand All @@ -1486,9 +1502,11 @@ impl CanisterManager {
canister.message_memory_usage(),
canister.compute_allocation(),
subnet_size,
canister.system_state.reserved_balance(),
canister.system_state.reserved_balance() + reservation_cycles,
);
if threshold > canister.system_state.balance() {
// Note: if the subtraction here saturates, then we will get an
// error later when trying to actually reserve the cycles.
if threshold > canister.system_state.balance() - reservation_cycles {
return Err(CanisterManagerError::WasmChunkStoreError {
message: format!(
"Cannot upload chunk. At least {} additional cycles are required.",
Expand All @@ -1497,20 +1515,45 @@ impl CanisterManager {
});
}

// Verify subnet has enough memory.
subnet_available_memory
.try_decrement(
wasm_chunk_store::chunk_size(),
NumBytes::from(0),
NumBytes::from(0),
)
.check_available_memory(chunk_bytes, NumBytes::from(0), NumBytes::from(0))
.map_err(
|_| CanisterManagerError::SubnetMemoryCapacityOverSubscribed {
requested: wasm_chunk_store::chunk_size(),
requested: chunk_bytes,
available: NumBytes::from(
subnet_available_memory.get_execution_memory().max(0) as u64,
),
},
)?;

// Reserve needed cycles if the subnet is becoming saturated.
canister
.system_state
.reserve_cycles(reservation_cycles)
.map_err(|err| match err {
ReservationError::InsufficientCycles {
requested,
available,
} => CanisterManagerError::InsufficientCyclesInMemoryGrow {
bytes: chunk_bytes,
available,
threshold: requested,
},
ReservationError::ReservedLimitExceed { requested, limit } => {
CanisterManagerError::ReservedCyclesLimitExceededInMemoryGrow {
bytes: chunk_bytes,
requested,
limit,
}
}
})?;

// Actually deduct memory from the subnet. It's safe to unwrap
// here because we already checked the available memory above.
subnet_available_memory
.try_decrement(chunk_bytes, NumBytes::from(0), NumBytes::from(0))
.expect("Error: Cannot fail to decrement SubnetAvailableMemory after checking for availability");
}
}

Expand Down
88 changes: 88 additions & 0 deletions rs/execution_environment/src/canister_manager/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7007,6 +7007,7 @@ fn upload_chunk_counts_to_memory_usage() {
let canister_id = test.create_canister(CYCLES);

let initial_memory_usage = test.canister_state(canister_id).memory_usage();
let initial_subnet_available_memory = test.subnet_available_memory().get_execution_memory();

// Check memory usage after one chunk uploaded.
let chunk = vec![1, 2, 3, 4, 5];
Expand All @@ -7020,6 +7021,10 @@ fn upload_chunk_counts_to_memory_usage() {
test.canister_state(canister_id).memory_usage(),
chunk_size + initial_memory_usage
);
assert_eq!(
test.subnet_available_memory().get_execution_memory(),
initial_subnet_available_memory - chunk_size.get() as i64
);

// Check memory usage after two chunks uploaded.
let chunk = vec![4; 1000];
Expand All @@ -7033,6 +7038,10 @@ fn upload_chunk_counts_to_memory_usage() {
test.canister_state(canister_id).memory_usage(),
NumBytes::from(2 * chunk_size.get()) + initial_memory_usage
);
assert_eq!(
test.subnet_available_memory().get_execution_memory(),
initial_subnet_available_memory - 2 * chunk_size.get() as i64
);

// Check memory usage after three chunks uploaded.
let chunk = vec![6; 1024 * 1024];
Expand All @@ -7046,6 +7055,10 @@ fn upload_chunk_counts_to_memory_usage() {
test.canister_state(canister_id).memory_usage(),
NumBytes::from(3 * chunk_size.get()) + initial_memory_usage
);
assert_eq!(
test.subnet_available_memory().get_execution_memory(),
initial_subnet_available_memory - 3 * chunk_size.get() as i64
);
}

#[test]
Expand Down Expand Up @@ -7172,6 +7185,81 @@ fn upload_chunk_fails_when_it_exceeds_chunk_size() {
);
}

#[test]
fn upload_chunk_reserves_cycles() {
const CYCLES: Cycles = Cycles::new(1_000_000_000_000_000);

let memory_usage_after_uploading_one_chunk = {
const CAPACITY: i64 = 1_000_000_000;

let mut test = ExecutionTestBuilder::new()
.with_subnet_execution_memory(CAPACITY)
.with_subnet_memory_reservation(0)
.with_subnet_memory_threshold(0)
.with_wasm_chunk_store()
.build();
let canister_id = test.create_canister(CYCLES);

let upload_args = UploadChunkArgs {
canister_id: canister_id.into(),
chunk: vec![42; 10],
};

let _hash = test
.subnet_message("upload_chunk", upload_args.encode())
.unwrap();

CAPACITY - test.subnet_available_memory().get_execution_memory()
};

let mut test = ExecutionTestBuilder::new()
.with_wasm_chunk_store()
.with_subnet_memory_reservation(0)
.with_subnet_memory_threshold(memory_usage_after_uploading_one_chunk + 1)
.build();
let canister_id = test.create_canister(CYCLES);
assert_eq!(
test.canister_state(canister_id)
.system_state
.reserved_balance(),
Cycles::from(0_u128)
);

// Upload a chunk
let upload_args = UploadChunkArgs {
canister_id: canister_id.into(),
chunk: vec![42; 10],
};
let _hash = test
.subnet_message("upload_chunk", upload_args.encode())
.unwrap();
assert_eq!(
test.canister_state(canister_id)
.system_state
.reserved_balance(),
Cycles::from(0_u128)
);

// Upload a second chunk which should reserve some cycles.
let upload_args = UploadChunkArgs {
canister_id: canister_id.into(),
chunk: vec![43; 10],
};
let _hash = test
.subnet_message("upload_chunk", upload_args.encode())
.unwrap();
let reserved_balance = test
.canister_state(canister_id)
.system_state
.reserved_balance()
.get();
assert!(
reserved_balance > 0,
"Reserved balance {} should be positive",
reserved_balance
);
}

#[test]
fn clear_chunk_store_works() {
const CYCLES: Cycles = Cycles::new(1_000_000_000_000_000);
Expand Down
5 changes: 5 additions & 0 deletions rs/execution_environment/src/execution_environment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1031,6 +1031,8 @@ impl ExecutionEnvironment {
}

Ok(Ic00Method::UploadChunk) => {
let resource_saturation =
self.subnet_memory_saturation(&round_limits.subnet_available_memory);
let res = match UploadChunkArgs::decode(payload) {
Err(err) => Err(err),
Ok(request) => self.upload_chunk(
Expand All @@ -1039,6 +1041,7 @@ impl ExecutionEnvironment {
request,
&mut round_limits.subnet_available_memory,
registry_settings.subnet_size,
&resource_saturation,
),
};
Some((res, msg.take_cycles()))
Expand Down Expand Up @@ -1540,6 +1543,7 @@ impl ExecutionEnvironment {
args: UploadChunkArgs,
subnet_available_memory: &mut SubnetAvailableMemory,
subnet_size: usize,
resource_saturation: &ResourceSaturation,
) -> Result<Vec<u8>, UserError> {
let canister = get_canister_mut(args.get_canister_id(), state)?;
self.canister_manager
Expand All @@ -1549,6 +1553,7 @@ impl ExecutionEnvironment {
&args.chunk,
subnet_available_memory,
subnet_size,
resource_saturation,
)
.map(|reply| reply.encode())
.map_err(|err| err.into())
Expand Down

0 comments on commit 1011aa4

Please sign in to comment.