7474#include <linux/dept.h>
7575#include <linux/utsname.h>
7676#include <linux/sched/task_stack.h>
77+ #include <linux/workqueue.h>
78+ #include <linux/irq_work.h>
79+ #include <linux/vmalloc.h>
7780#include "dept_internal.h"
7881
7982static int dept_stop ;
@@ -122,9 +125,11 @@ static int dept_per_cpu_ready;
122125 WARN(1, "DEPT_STOP: " s); \
123126 })
124127
125- #define DEPT_INFO_ONCE (s ...) pr_warn_once("DEPT_INFO_ONCE: " s)
128+ #define DEPT_INFO_ONCE (s ...) pr_warn_once("DEPT_INFO_ONCE: " s)
129+ #define DEPT_INFO (s ...) pr_warn("DEPT_INFO: " s)
126130
127131static arch_spinlock_t dept_spin = (arch_spinlock_t )__ARCH_SPIN_LOCK_UNLOCKED ;
132+ static arch_spinlock_t dept_pool_spin = (arch_spinlock_t )__ARCH_SPIN_LOCK_UNLOCKED ;
128133
129134/*
130135 * DEPT internal engine should be careful in using outside functions
@@ -244,6 +249,7 @@ static inline bool dept_working(void)
244249
245250#define OBJECT (id , nr ) \
246251static struct dept_##id spool_##id[nr]; \
252+ static struct dept_##id rpool_##id[nr]; \
247253static DEFINE_PER_CPU(struct llist_head, lpool_##id);
248254 #include "dept_object.h"
249255#undef OBJECT
@@ -252,14 +258,70 @@ struct dept_pool dept_pool[OBJECT_NR] = {
252258#define OBJECT (id , nr ) { \
253259 .name = #id , \
254260 .obj_sz = sizeof(struct dept_##id), \
255- .obj_nr = ATOMIC_INIT(nr), \
261+ .obj_nr = nr, \
262+ .tot_nr = nr, \
263+ .acc_sz = ATOMIC_INIT(sizeof(spool_##id) + sizeof(rpool_##id)), \
256264 .node_off = offsetof(struct dept_##id, pool_node), \
257265 .spool = spool_##id, \
266+ .rpool = rpool_##id, \
258267 .lpool = &lpool_##id, },
259268 #include "dept_object.h"
260269#undef OBJECT
261270};
262271
272+ static void dept_wq_work_fn (struct work_struct * work )
273+ {
274+ int i ;
275+
276+ for (i = 0 ; i < OBJECT_NR ; i ++ ) {
277+ struct dept_pool * p = dept_pool + i ;
278+ int sz = p -> tot_nr * p -> obj_sz ;
279+ void * rpool ;
280+ bool need ;
281+
282+ arch_spin_lock (& dept_pool_spin );
283+ need = !p -> rpool ;
284+ arch_spin_unlock (& dept_pool_spin );
285+
286+ if (!need )
287+ continue ;
288+
289+ rpool = vmalloc (sz );
290+
291+ if (!rpool ) {
292+ DEPT_STOP ("Failed to extend internal resources.\n" );
293+ break ;
294+ }
295+
296+ arch_spin_lock (& dept_pool_spin );
297+ if (!p -> rpool ) {
298+ p -> rpool = rpool ;
299+ rpool = NULL ;
300+ atomic_add (sz , & p -> acc_sz );
301+ }
302+ arch_spin_unlock (& dept_pool_spin );
303+
304+ if (rpool )
305+ vfree (rpool );
306+ else
307+ DEPT_INFO ("Dept object(%s) just got refilled successfully.\n" , p -> name );
308+ }
309+ }
310+
311+ static DECLARE_WORK (dept_wq_work , dept_wq_work_fn ) ;
312+
313+ static void dept_irq_work_fn (struct irq_work * w )
314+ {
315+ schedule_work (& dept_wq_work );
316+ }
317+
318+ static DEFINE_IRQ_WORK (dept_irq_work , dept_irq_work_fn ) ;
319+
320+ static void request_rpool_refill (void )
321+ {
322+ irq_work_queue (& dept_irq_work );
323+ }
324+
263325/*
264326 * Can use llist no matter whether CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG is
265327 * enabled or not because NMI and other contexts in the same CPU never
@@ -295,19 +357,31 @@ static void *from_pool(enum object_t t)
295357 /*
296358 * Try static pool.
297359 */
298- if (atomic_read (& p -> obj_nr ) > 0 ) {
299- int idx = atomic_dec_return (& p -> obj_nr );
360+ arch_spin_lock (& dept_pool_spin );
361+
362+ if (!p -> obj_nr ) {
363+ p -> spool = p -> rpool ;
364+ p -> obj_nr = p -> rpool ? p -> tot_nr : 0 ;
365+ p -> rpool = NULL ;
366+ request_rpool_refill ();
367+ }
368+
369+ if (p -> obj_nr ) {
370+ void * ret ;
371+
372+ p -> obj_nr -- ;
373+ ret = p -> spool + (p -> obj_nr * p -> obj_sz );
374+ arch_spin_unlock (& dept_pool_spin );
300375
301- if (idx >= 0 )
302- return p -> spool + (idx * p -> obj_sz );
376+ return ret ;
303377 }
378+ arch_spin_unlock (& dept_pool_spin );
304379
305- DEPT_INFO_ONCE ("---------------------------------------------\n"
306- " Some of Dept internal resources are run out.\n"
307- " Dept might still work if the resources get freed.\n"
308- " However, the chances are Dept will suffer from\n"
309- " the lack from now. Needs to extend the internal\n"
310- " resource pools. Ask max.byungchul.park@gmail.com\n" );
380+ DEPT_INFO ("------------------------------------------\n"
381+ " Dept object(%s) is run out.\n"
382+ " Dept is trying to refill the object.\n"
383+ " Nevertheless, if it fails, Dept will stop.\n" ,
384+ p -> name );
311385 return NULL ;
312386}
313387
@@ -2964,15 +3038,15 @@ void __init dept_init(void)
29643038 pr_info ("... DEPT_MAX_ECXT_HELD : %d\n" , DEPT_MAX_ECXT_HELD );
29653039 pr_info ("... DEPT_MAX_SUBCLASSES : %d\n" , DEPT_MAX_SUBCLASSES );
29663040#define OBJECT (id , nr ) \
2967- pr_info("... memory used by %s: %zu KB\n", \
2968- #id, B2KB(sizeof(struct dept_ ##id) * nr ));
3041+ pr_info("... memory initially used by %s: %zu KB\n", \
3042+ #id, B2KB(sizeof(spool_ ##id) + sizeof(rpool_##id) ));
29693043 #include "dept_object.h"
29703044#undef OBJECT
29713045#define HASH (id , bits ) \
29723046 pr_info("... hash list head used by %s: %zu KB\n", \
29733047 #id, B2KB(sizeof(struct hlist_head) * (1UL << bits)));
29743048 #include "dept_hash.h"
29753049#undef HASH
2976- pr_info ("... total memory used by objects and hashs: %zu KB\n" , B2KB (mem_total ));
3050+ pr_info ("... total memory initially used by objects and hashs: %zu KB\n" , B2KB (mem_total ));
29773051 pr_info ("... per task memory footprint: %zu bytes\n" , sizeof (struct dept_task ));
29783052}
0 commit comments