@@ -47,7 +47,24 @@ class Stream extends Base {
4747 * Useful to speed up the initial render (Time To First Content).
4848 * @member {Number|null} initialChunkSize=100
4949 */
50- initialChunkSize : 100
50+ initialChunkSize : 100 ,
51+ /**
52+ * True to automatically increase the chunkSize based on the total number of loaded items.
53+ *
54+ * **Progressive Enhancement Strategy:**
55+ * When loading large datasets (e.g., 30k+ records), the cost of `store.add()` (sorting + event propagation)
56+ * becomes the dominant bottleneck. However, the user needs immediate feedback.
57+ *
58+ * - **Phase 1 (Start):** Small chunks (100-250) for immediate "Time to First Content" and frequent UI updates.
59+ * - **Phase 2 (Ramp):** Medium chunks (500-1500) as the user processes the initial data.
60+ * - **Phase 3 (Bulk):** Massive chunks (2500-10000) for the tail end of the dataset. At this point,
61+ * throughput matters more than interactivity, as the user already has a screen full of data.
62+ *
63+ * This mode overrides `initialBurstCount` and `chunkSize`.
64+ *
65+ * @member {Boolean} progressiveChunkSize_=false
66+ */
67+ progressiveChunkSize_ : false
5168 }
5269
5370 /**
@@ -57,7 +74,7 @@ class Stream extends Base {
5774 async read ( operation ) {
5875 let me = this ,
5976 chunk = [ ] ,
60- { chunkSize} = me ,
77+ { chunkSize, progressiveChunkSize } = me ,
6178 currentChunkSize = me . initialChunkSize || chunkSize ,
6279 burstCount = 0 ,
6380 count = 0 ,
@@ -120,13 +137,15 @@ class Stream extends Base {
120137
121138 burstCount ++ ;
122139
123- if ( burstCount >= me . initialBurstCount ) {
140+ if ( progressiveChunkSize ) {
141+ currentChunkSize = me . getProgressiveChunkSize ( count ) ;
142+ } else if ( burstCount >= me . initialBurstCount ) {
124143 currentChunkSize = chunkSize
125144 }
126145
127- // Give the App Worker 5ms time to breathe, so that logic can act upon events.
128- // E .g., sending out vdom updates.
129- await me . timeout ( 5 ) ;
146+ // Give the App Worker a minimal amount of time to breathe,
147+ // so that logic can act upon events (e .g. sending out vdom updates) .
148+ await me . timeout ( 1 ) ;
130149
131150 chunk = [ ]
132151 }
@@ -137,6 +156,24 @@ class Stream extends Base {
137156 return { success : true , count}
138157 }
139158
159+ /**
160+ * Calculates the next chunk size based on the total number of records processed so far.
161+ * Implements a tiered ramping strategy to balance initial responsiveness with long-term throughput.
162+ *
163+ * @param {Number } total The total number of records processed
164+ * @returns {Number } The recommended chunk size for the next batch
165+ */
166+ getProgressiveChunkSize ( total ) {
167+ if ( total < 100 ) return 100 ;
168+ if ( total < 250 ) return 150 ;
169+ if ( total < 500 ) return 250 ;
170+ if ( total < 1000 ) return 500 ;
171+ if ( total < 2500 ) return 1500 ;
172+ if ( total < 10000 ) return 2500 ;
173+ if ( total < 20000 ) return 5000 ;
174+ return 10000
175+ }
176+
140177 /**
141178 * @param {String } line
142179 * @param {Array } chunk
0 commit comments