From e7f1d0639690ed3c2332684e059c9d4c9f123947 Mon Sep 17 00:00:00 2001 From: William Schor Date: Wed, 14 Feb 2024 16:41:58 -0800 Subject: [PATCH] decouple read size from write size in ddb model --- service_capacity_modeling/models/org/netflix/ddb.py | 13 +++++++++---- tests/netflix/test_ddb.py | 2 ++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/service_capacity_modeling/models/org/netflix/ddb.py b/service_capacity_modeling/models/org/netflix/ddb.py index 6a7ecf1..be3bee0 100644 --- a/service_capacity_modeling/models/org/netflix/ddb.py +++ b/service_capacity_modeling/models/org/netflix/ddb.py @@ -200,11 +200,16 @@ def _get_write_consistency_percentages( } -def _mean_item_size_bytes(desires: CapacityDesires) -> float: +def _mean_write_item_size_bytes(desires: CapacityDesires) -> float: mean_item_size = desires.query_pattern.estimated_mean_write_size_bytes.mid return mean_item_size +def _mean_read_item_size_bytes(desires: CapacityDesires) -> float: + mean_item_size = desires.query_pattern.estimated_mean_read_size_bytes.mid + return mean_item_size + + def _get_dynamo_standard(context: RegionContext) -> Service: number_of_regions = context.num_regions dynamo_service = ( @@ -238,7 +243,7 @@ def _plan_writes( desires: CapacityDesires, extra_model_arguments: Dict[str, Any], ) -> _WritePlan: - mean_item_size = _mean_item_size_bytes(desires) + mean_item_size = _mean_write_item_size_bytes(desires) # For items up to 1 KB in size, # one WCU can perform one standard write request per second @@ -305,7 +310,7 @@ def _plan_reads( transactional_read_percent = read_percentages["transactional_read_percent"] eventual_read_percent = read_percentages["eventual_read_percent"] strong_read_percent = read_percentages["strong_read_percent"] - mean_item_size = _mean_item_size_bytes(desires) + mean_item_size = _mean_read_item_size_bytes(desires) # items up to 4 KB in size rounded_rcus_per_item = math.ceil(max(1.0, mean_item_size / (4 * 1024))) @@ -377,7 +382,7 @@ def _plan_data_transfer( return _DataTransferPlan( total_data_transfer_gib=0, total_annual_data_transfer_cost=0 ) - mean_item_size_bytes = _mean_item_size_bytes(desires) + mean_item_size_bytes = _mean_write_item_size_bytes(desires) writes_per_second = desires.query_pattern.estimated_write_per_second.mid # 31,536,000 seconds in a year (365 * 24 * 60 * 60) # 1024 * 1024 * 1024 = 1Gib diff --git a/tests/netflix/test_ddb.py b/tests/netflix/test_ddb.py index a2bde78..171c102 100644 --- a/tests/netflix/test_ddb.py +++ b/tests/netflix/test_ddb.py @@ -53,6 +53,7 @@ ), estimated_write_per_second=certain_int(0), estimated_mean_write_size_bytes=certain_int(5798), + estimated_mean_read_size_bytes=certain_int(5798), ), data_shape=DataShape( estimated_state_size_gib=Interval(low=10, mid=100, high=1000, confidence=0.98), @@ -78,6 +79,7 @@ low=100, mid=1000, high=10000, confidence=0.98 ), estimated_mean_write_size_bytes=certain_int(5798), + estimated_mean_read_size_bytes=certain_int(5798), ), data_shape=DataShape( estimated_state_size_gib=Interval(low=10, mid=100, high=1000, confidence=0.98),