@@ -42,8 +42,14 @@ struct __btrfs_workqueue {
4242
4343 /* Thresholding related variants */
4444 atomic_t pending ;
45- int max_active ;
46- int current_max ;
45+
46+ /* Up limit of concurrency workers */
47+ int limit_active ;
48+
49+ /* Current number of concurrency workers */
50+ int current_active ;
51+
52+ /* Threshold to change current_active */
4753 int thresh ;
4854 unsigned int count ;
4955 spinlock_t thres_lock ;
@@ -88,34 +94,39 @@ BTRFS_WORK_HELPER(scrubnc_helper);
8894BTRFS_WORK_HELPER (scrubparity_helper );
8995
9096static struct __btrfs_workqueue *
91- __btrfs_alloc_workqueue (const char * name , unsigned int flags , int max_active ,
97+ __btrfs_alloc_workqueue (const char * name , unsigned int flags , int limit_active ,
9298 int thresh )
9399{
94100 struct __btrfs_workqueue * ret = kzalloc (sizeof (* ret ), GFP_NOFS );
95101
96102 if (!ret )
97103 return NULL ;
98104
99- ret -> max_active = max_active ;
105+ ret -> limit_active = limit_active ;
100106 atomic_set (& ret -> pending , 0 );
101107 if (thresh == 0 )
102108 thresh = DFT_THRESHOLD ;
103109 /* For low threshold, disabling threshold is a better choice */
104110 if (thresh < DFT_THRESHOLD ) {
105- ret -> current_max = max_active ;
111+ ret -> current_active = limit_active ;
106112 ret -> thresh = NO_THRESHOLD ;
107113 } else {
108- ret -> current_max = 1 ;
114+ /*
115+ * For threshold-able wq, let its concurrency grow on demand.
116+ * Use minimal max_active at alloc time to reduce resource
117+ * usage.
118+ */
119+ ret -> current_active = 1 ;
109120 ret -> thresh = thresh ;
110121 }
111122
112123 if (flags & WQ_HIGHPRI )
113124 ret -> normal_wq = alloc_workqueue ("%s-%s-high" , flags ,
114- ret -> max_active ,
115- "btrfs" , name );
125+ ret -> current_active , "btrfs" ,
126+ name );
116127 else
117128 ret -> normal_wq = alloc_workqueue ("%s-%s" , flags ,
118- ret -> max_active , "btrfs" ,
129+ ret -> current_active , "btrfs" ,
119130 name );
120131 if (!ret -> normal_wq ) {
121132 kfree (ret );
@@ -134,7 +145,7 @@ __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
134145
135146struct btrfs_workqueue * btrfs_alloc_workqueue (const char * name ,
136147 unsigned int flags ,
137- int max_active ,
148+ int limit_active ,
138149 int thresh )
139150{
140151 struct btrfs_workqueue * ret = kzalloc (sizeof (* ret ), GFP_NOFS );
@@ -143,14 +154,14 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
143154 return NULL ;
144155
145156 ret -> normal = __btrfs_alloc_workqueue (name , flags & ~WQ_HIGHPRI ,
146- max_active , thresh );
157+ limit_active , thresh );
147158 if (!ret -> normal ) {
148159 kfree (ret );
149160 return NULL ;
150161 }
151162
152163 if (flags & WQ_HIGHPRI ) {
153- ret -> high = __btrfs_alloc_workqueue (name , flags , max_active ,
164+ ret -> high = __btrfs_alloc_workqueue (name , flags , limit_active ,
154165 thresh );
155166 if (!ret -> high ) {
156167 __btrfs_destroy_workqueue (ret -> normal );
@@ -180,7 +191,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
180191 */
181192static inline void thresh_exec_hook (struct __btrfs_workqueue * wq )
182193{
183- int new_max_active ;
194+ int new_current_active ;
184195 long pending ;
185196 int need_change = 0 ;
186197
@@ -197,27 +208,27 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
197208 wq -> count %= (wq -> thresh / 4 );
198209 if (!wq -> count )
199210 goto out ;
200- new_max_active = wq -> current_max ;
211+ new_current_active = wq -> current_active ;
201212
202213 /*
203214 * pending may be changed later, but it's OK since we really
204215 * don't need it so accurate to calculate new_max_active.
205216 */
206217 pending = atomic_read (& wq -> pending );
207218 if (pending > wq -> thresh )
208- new_max_active ++ ;
219+ new_current_active ++ ;
209220 if (pending < wq -> thresh / 2 )
210- new_max_active -- ;
211- new_max_active = clamp_val (new_max_active , 1 , wq -> max_active );
212- if (new_max_active != wq -> current_max ) {
221+ new_current_active -- ;
222+ new_current_active = clamp_val (new_current_active , 1 , wq -> limit_active );
223+ if (new_current_active != wq -> current_active ) {
213224 need_change = 1 ;
214- wq -> current_max = new_max_active ;
225+ wq -> current_active = new_current_active ;
215226 }
216227out :
217228 spin_unlock (& wq -> thres_lock );
218229
219230 if (need_change ) {
220- workqueue_set_max_active (wq -> normal_wq , wq -> current_max );
231+ workqueue_set_max_active (wq -> normal_wq , wq -> current_active );
221232 }
222233}
223234
@@ -351,13 +362,13 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
351362 kfree (wq );
352363}
353364
354- void btrfs_workqueue_set_max (struct btrfs_workqueue * wq , int max )
365+ void btrfs_workqueue_set_max (struct btrfs_workqueue * wq , int limit_active )
355366{
356367 if (!wq )
357368 return ;
358- wq -> normal -> max_active = max ;
369+ wq -> normal -> limit_active = limit_active ;
359370 if (wq -> high )
360- wq -> high -> max_active = max ;
371+ wq -> high -> limit_active = limit_active ;
361372}
362373
363374void btrfs_set_work_high_priority (struct btrfs_work * work )
0 commit comments