diff --git a/Makefile b/Makefile index c56d365e7b2e..3e099550a8d4 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 6 SUBLEVEL = 4 -EXTRAVERSION = -gvfs +EXTRAVERSION = -gvts NAME = Charred Weasel # To prevent adding plus sign diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 9208dd5a0922..0d93aa4dd4b8 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -386,4 +386,4 @@ 377 i386 copy_file_range sys_copy_file_range 378 i386 preadv2 sys_preadv2 compat_sys_preadv2 379 i386 pwritev2 sys_pwritev2 compat_sys_pwritev2 -380 i386 gvfs sys_gvfs +380 i386 gvts sys_gvts diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 002bb646ee14..24b67f89c507 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -335,7 +335,7 @@ 326 common copy_file_range sys_copy_file_range 327 64 preadv2 sys_preadv2 328 64 pwritev2 sys_pwritev2 -329 common gvfs sys_gvfs +329 common gvts sys_gvts # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/include/linux/sched.h b/include/linux/sched.h index 73eb66f8721a..a955bec2f84f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -5,16 +5,16 @@ #include -#ifdef CONFIG_GVFS_AMP -#define NUM_CPU_TYPES CONFIG_GVFS_NUM_CPU_TYPES -#if CONFIG_GVFS_BASE_CPU_TYPE >= CONFIG_GVFS_NUM_CPU_TYPES -ERROR "CONFIG_GVFS_BASE_CPU_TYPE should be smaller than CONFIG_GVFS_NUM_CPU_TYPES" +#ifdef CONFIG_GVTS_AMP +#define NUM_CPU_TYPES CONFIG_GVTS_NUM_CPU_TYPES +#if CONFIG_GVTS_BASE_CPU_TYPE >= CONFIG_GVTS_NUM_CPU_TYPES +ERROR "CONFIG_GVTS_BASE_CPU_TYPE should be smaller than CONFIG_GVTS_NUM_CPU_TYPES" #endif -#else /* !CONFIG_GVFS_AMP */ +#else /* !CONFIG_GVTS_AMP */ #define NUM_CPU_TYPES 2 /* default value */ #endif -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS #define NUM_MAX_TARGET_DIFF 10 #endif @@ -1054,11 +1054,11 @@ struct sched_domain { struct sched_domain *parent; /* top domain must be null terminated */ struct sched_domain *child; /* bottom domain must be null terminated */ struct sched_group *groups; /* the balancing groups of the domain */ -#ifdef CONFIG_GVFS - struct sd_vruntime *vruntime; /* the balancing criteria of GVFS */ +#ifdef CONFIG_GVTS + struct sd_vruntime *vruntime; /* the balancing criteria of GVTS */ u64 vruntime_interval; u64 vruntime_tolerance; -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ unsigned long min_interval; /* Minimum balance interval ms */ unsigned long max_interval; /* Maximum balance interval ms */ unsigned int busy_factor; /* less balancing by factor if busy */ @@ -1085,7 +1085,7 @@ struct sched_domain { unsigned long next_decay_max_lb_cost; #ifdef CONFIG_SCHEDSTATS -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS /* _target_vruntime_balance() */ unsigned int target_update_racing; /* __target_vruntime_balance() */ @@ -1128,7 +1128,7 @@ struct sched_domain { unsigned int atb_pushed; unsigned int atb_pushed_under; unsigned int target_diff[NUM_MAX_TARGET_DIFF]; -#endif /* CONFIG_GVFS_STATS */ +#endif /* CONFIG_GVTS_STATS */ /* load_balance() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; @@ -1202,7 +1202,7 @@ struct sd_data { struct sched_domain **__percpu sd; struct sched_group **__percpu sg; struct sched_group_capacity **__percpu sgc; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS struct sd_vruntime **__percpu sdv; #endif }; @@ -1284,7 +1284,7 @@ struct sched_avg { unsigned long load_avg, util_avg; }; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS struct remember_info; #endif @@ -1334,19 +1334,19 @@ struct sched_entity { u64 sum_exec_runtime; u64 prev_sum_exec_runtime; u64 vruntime; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP u64 sum_perf_runtime; u32 vruntime_rem; u32 perf_rem; u64 sum_type_runtime[NUM_CPU_TYPES]; -#endif /* CONFIG_GVFS_AMP */ -#ifdef CONFIG_GVFS +#endif /* CONFIG_GVTS_AMP */ +#ifdef CONFIG_GVTS u64 sleep_start; /* remember the sleep start time to prevent vruntime normalization for short sleep */ u64 sleep_target; /* remember the target when start to sleep for vruntime normalization */ -#endif /* CONFIG_GVFS_AMP */ -#ifdef CONFIG_GVFS_DEBUG_NORMALIZATION /* for debug */ +#endif /* CONFIG_GVTS_AMP */ +#ifdef CONFIG_GVTS_DEBUG_NORMALIZATION /* for debug */ unsigned int num_normalization; u64 added_normalization; u64 max_added_normalization; @@ -1365,7 +1365,7 @@ struct sched_entity { struct cfs_rq *cfs_rq; /* rq "owned" by this entity/group: */ struct cfs_rq *my_q; -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH struct list_head *state_q; /* refer to active_q or throt_q. NULL while running */ struct list_head state_node; /* node inserted to active_q or throt_q */ @@ -1381,7 +1381,7 @@ struct sched_entity { */ struct sched_avg avg ____cacheline_aligned_in_smp; #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS struct load_weight eff_load; /* for vruntime update */ struct sched_entity *curr_child; /* for update effective_load */ unsigned long eff_weight_real; /* NOT affected by MIN_SHARES and MAX_SHARES */ @@ -1390,14 +1390,14 @@ struct sched_entity { or eff_weight_real * util_avg << ADDED_BITS / efficiency[rq->cpu_type] */ -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP unsigned long *effi; /* a pointer to task->effi related to this se */ unsigned long __lagged_weight[NUM_CPU_TYPES]; /* lagged_weight for each type */ #endif s64 lagged; /* necessary time to reach se->vruntime to lagged_target */ u64 lagged_target; /* target used when calculating @lagged. */ unsigned long tg_load_sum_contrib; -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ }; struct sched_rt_entity { @@ -1517,7 +1517,7 @@ struct task_struct { int wake_cpu; #endif int on_rq; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int effi_mode; unsigned long __effi[NUM_CPU_TYPES]; /* set by users or estimations */ unsigned long effi[NUM_CPU_TYPES]; /* normalized value of __efficiency */ @@ -1611,7 +1611,7 @@ struct task_struct { pid_t pid; pid_t tgid; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS struct remember_info *remember; #endif diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index d07a3f71c338..f4bb28e1002d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -898,8 +898,8 @@ asmlinkage long sys_copy_file_range(int fd_in, loff_t __user *off_in, asmlinkage long sys_mlock2(unsigned long start, size_t len, int flags); -#ifdef CONFIG_GVFS_SYSCALL -asmlinkage long sys_gvfs(int op, int id, u64 num, void __user *vars); +#ifdef CONFIG_GVTS_SYSCALL +asmlinkage long sys_gvts(int op, int id, u64 num, void __user *vars); #endif #endif diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index a4efb765bbcd..70c496b12b54 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -722,8 +722,8 @@ __SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2) #define __NR_pwritev2 287 __SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2) -#define __NR_gvfs 288 -__SYSCALL(__NR_gvfs, sys_gvfs) +#define __NR_gvts 288 +__SYSCALL(__NR_gvts, sys_gvts) #undef __NR_syscalls #define __NR_syscalls 289 diff --git a/init/Kconfig b/init/Kconfig index 5313102b2190..4c37cd05858a 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -787,7 +787,7 @@ config BUILD_BIN2C bool default n -config GVFS +config GVTS bool "Global Virtual-time Fair Scheduler by CDKIM" default y help @@ -800,7 +800,7 @@ config GVFS * Topology-aware hierarchical balancing for scalability The 'interval' length, the amount of target increase for each interval, determines the accuracy of fairness and the frequency of thread migreation. - GVFS awares the topology and uses the long interval length for NUMA, + GVTS awares the topology and uses the long interval length for NUMA, and the short interval length for SMT. This mitigates the thread migration overhead and improves fairness accuracy. @@ -821,7 +821,7 @@ config GVFS the next target as simultaneously as possible. - When calculating lagged, consider se->avg.util_avg value. eff_weight_real = eff_load.weight(w/o normalization) * util_avg - >> (SCHED_LOAD_SHIFT - CONFIG_GVFS_LAGGED_WEIGHT_ADDED_BITS) + >> (SCHED_LOAD_SHIFT - CONFIG_GVTS_LAGGED_WEIGHT_ADDED_BITS) and use eff_weight_real instead of eff_load.weight to calculat lagged. * New vruntime normalization for slept or throttled tasks @@ -840,120 +840,120 @@ config GVFS since the local group CPUs was balanced at the lower level domain. -config GVFS_BANDWIDTH - bool "Support for bandwidth control in GVFS (EXPERIMENTAL)" +config GVTS_BANDWIDTH + bool "Support for bandwidth control in GVTS (EXPERIMENTAL)" default y - depends on GVFS && CFS_BANDWIDTH + depends on GVTS && CFS_BANDWIDTH help - Support for bandwidth control (CFS_BANDWIDTH) in GVFS + Support for bandwidth control (CFS_BANDWIDTH) in GVTS Currently, this does not work properly for some cases. -config GVFS_DEBUG_NORMALIZATION +config GVTS_DEBUG_NORMALIZATION bool "Debug normalization of vruntime" default n - depends on GVFS + depends on GVTS help Debug information for virtual time normalization when a task is waking up -config GVFS_AMP - bool "AMP support of GVFS" +config GVTS_AMP + bool "AMP support of GVTS" default y - depends on GVFS + depends on GVTS help - GVFS for AMP. + GVTS for AMP. efficiency (and efficiency modes) for tasks. define cpu types for each rq. minimize max(lagged_src, lagged_dst) at targer vruntime balancing. Try to balance even if only 1 task at source runqueue when source is slower than destination. - faster-core-first for NOHZ of GVFS_AMP + faster-core-first for NOHZ of GVTS_AMP -config GVFS_NUM_CPU_TYPES +config GVTS_NUM_CPU_TYPES int "Number of CPU types" range 1 16 default 2 - depends on GVFS_AMP + depends on GVTS_AMP help - The number of CPU types supported by GVFS. + The number of CPU types supported by GVTS. -config GVFS_BASE_CPU_TYPE +config GVTS_BASE_CPU_TYPE int "CPU type id for the base performance" range -1 15 - depends on GVFS_AMP + depends on GVTS_AMP default -1 help The CPU type id used to define the base performance. - This value should be smaller than GVFS_NUM_CPU_TYPES + This value should be smaller than GVTS_NUM_CPU_TYPES If this says -1, FAIR_SHARE is used. FAIR_SHARE = SUM(num_type_i_cpus * efficiency[i]) / SUM(num_type_i_cpus) -config GVFS_STATS - bool "Schedstats for GVFS" +config GVTS_STATS + bool "Schedstats for GVTS" default y - depends on GVFS && SCHEDSTATS + depends on GVTS && SCHEDSTATS help - Various statistics for debugging GVFS + Various statistics for debugging GVTS -config GVFS_INTERVAL_SMT_SHARED +config GVTS_INTERVAL_SMT_SHARED int "Target interval for cores sharing pipelines (ms)" range 1 3000 - depends on GVFS + depends on GVTS default 30 -config GVFS_INTERVAL_LLC_SHARED +config GVTS_INTERVAL_LLC_SHARED int "Target interval for cores sharing a LLC (ms)" range 1 3000 - depends on GVFS + depends on GVTS default 90 -config GVFS_INTERVAL_NUMA +config GVTS_INTERVAL_NUMA int "Target interval for cores in different nodes (ms)" range 1 3000 - depends on GVFS + depends on GVTS default 150 help 1000 indicates 1s target interval for local nodes, and 2s target interval for remote nodes. interval = value / 10 * numa_distance -config GVFS_TOLERANCE_PERCENT +config GVTS_TOLERANCE_PERCENT int "Tolerance value as a percentage of target interval." range 0 100 - depends on GVFS + depends on GVTS default 30 -config GVFS_TARGET_DIFF_THRESHOLD +config GVTS_TARGET_DIFF_THRESHOLD int "Target difference threshold for source activated balancing" range 2 100 default 3 - depends on GVFS + depends on GVTS help When difference between cfs_rq->target_vruntime and rq->sd_vruntime->target - is larger than CONFIG_GVFS_TARGET_DIFF_THRESHOLD * sd_vruntime->interval, + is larger than CONFIG_GVTS_TARGET_DIFF_THRESHOLD * sd_vruntime->interval, the source cpu activated the target vruntime balancing. -config GVFS_TARGET_DIFF_INFEASIBLE_WEIGHT +config GVTS_TARGET_DIFF_INFEASIBLE_WEIGHT int "Target difference threshold for detecting infeasible weight" range 2 100 default 10 - depends on GVFS + depends on GVTS help Target difference threshold for detecting infeasible weight - GVFS detects infeasible weight if - 1. target diff >= GVFS_TARGET_DIFF_INFEASIBLE_WEIGHT + GVTS detects infeasible weight if + 1. target diff >= GVTS_TARGET_DIFF_INFEASIBLE_WEIGHT 2. nr_running = 1 3. lagged_weight > others' lagged_weight Then, delete_min_target and do not update min_target for the task The infeasibility of a CPU will be cleared 1. when the CPU becomes idle. - 2. when target diff < GVFS_TARGET_DIFF_INFEASIBLE_WEIGHT + 2. when target diff < GVTS_TARGET_DIFF_INFEASIBLE_WEIGHT -config GVFS_LAGGED_WEIGHT_ADDED_BITS +config GVTS_LAGGED_WEIGHT_ADDED_BITS int 'The number of added bits to improve the accuracy of lagged_weight' range 0 10 - depends on GVFS + depends on GVTS default 4 help If this value is 0, the accuracy of lagged_weight is 10. diff --git a/kernel/exit.c b/kernel/exit.c index db350c7e73e0..126544e6efca 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -649,7 +649,7 @@ static void check_stack_usage(void) static inline void check_stack_usage(void) {} #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS void remember_task_exit(struct task_struct *p); #endif @@ -658,7 +658,7 @@ void do_exit(long code) struct task_struct *tsk = current; int group_dead; TASKS_RCU(int tasks_rcu_i); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS remember_task_exit(tsk); #endif profile_task_exit(tsk); diff --git a/kernel/panic.c b/kernel/panic.c index b8273a5f5d38..bf6999e74fb6 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -93,7 +93,7 @@ void nmi_panic(struct pt_regs *regs, const char *msg) } EXPORT_SYMBOL(nmi_panic); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS void dump_sched(void); #endif /** @@ -153,7 +153,7 @@ void panic(const char *fmt, ...) */ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) { dump_stack(); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS dump_sched(); #endif } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1c75493d2c21..244c40cbc6f0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -111,14 +111,14 @@ void update_rq_clock(struct rq *rq) update_rq_clock_task(rq, delta); } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP #if NUM_CPU_TYPES == 2 unsigned long DEFAULT_EFFICIENCY[NUM_CPU_TYPES] = {1024, 1703}; #else ERROR "DEFAULT_EFFICIENCY is not defined." #endif -#if CONFIG_GVFS_BASE_CPU_TYPE == -1 /* fair share base */ +#if CONFIG_GVTS_BASE_CPU_TYPE == -1 /* fair share base */ static __read_mostly atomic_t num_cpus_type_ver = {0}; /* odd: changing, even: stable */ static __read_mostly atomic_t num_cpus_type[NUM_CPU_TYPES]; @@ -142,10 +142,10 @@ static void __set_rq_cpu_type(struct rq *rq, int new) atomic_inc(&num_cpus_type_ver); /* now, even; stable... */ smp_mb(); } -#else /* CONFIG_GVFS_BASE_CPU_TYPE >= 0 */ +#else /* CONFIG_GVTS_BASE_CPU_TYPE >= 0 */ static inline void __init init_rq_cpu_type(struct rq *rq) { rq->cpu_type = 0; } static inline void __set_rq_cpu_type(struct rq *rq, int new) { rq->cpu_type = new; } -#endif /* CONFIG_GVFS_BASE_CPU_TYPE >= 0 */ +#endif /* CONFIG_GVTS_BASE_CPU_TYPE >= 0 */ static void set_rq_cpu_type(struct rq *rq, int type) { if (rq->cpu_type == type) @@ -165,9 +165,9 @@ void normalize_efficiency(struct task_struct *p) { unsigned long base; int type; -#if CONFIG_GVFS_BASE_CPU_TYPE >= 0 - base = p->__effi[CONFIG_GVFS_BASE_CPU_TYPE]; -#else /* CONFIG_GVFS_BASE_CPU_TYPE == -1, that is, fair share base */ +#if CONFIG_GVTS_BASE_CPU_TYPE >= 0 + base = p->__effi[CONFIG_GVTS_BASE_CPU_TYPE]; +#else /* CONFIG_GVTS_BASE_CPU_TYPE == -1, that is, fair share base */ int num_cpus, num_cpus_total = 0; int version; @@ -191,7 +191,7 @@ printk(KERN_ERR "num_cpus_total: %d num_cpus_type_ver: %d base: %ld __effi: %ld if (unlikely(!num_cpus_total)) num_cpus_total = 1; base = (base + (num_cpus_total >> 1)) / num_cpus_total; -#endif /* CONFIG_GVFS_BASE_CPU_TYPE == -1 */ +#endif /* CONFIG_GVTS_BASE_CPU_TYPE == -1 */ if (unlikely(!base)) base = 1024; for_each_type(type) @@ -208,13 +208,13 @@ printk(KERN_ERR "num_cpus_total: %d num_cpus_type_ver: %d base: %ld __effi: %ld #define set_efficiency_default(p) __set_efficiency_mode(p, EFFICIENCY_DEFAULT, DEFAULT_EFFICIENCY) -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* remember tasks and related things */ struct task_runtime_info { u64 sum_exec_runtime; - /* below two are meaningless if CONFIG_GVFS_AMP is not defined */ + /* below two are meaningless if CONFIG_GVTS_AMP is not defined */ u64 sum_perf_runtime; u64 sum_type_runtime[NUM_CPU_TYPES]; }; @@ -230,10 +230,10 @@ struct remember_info { u64 vruntime_init; u64 vruntime_end; u64 sum_exec_runtime; - /* below two are meaningless if CONFIG_GVFS_AMP is not defined */ + /* below two are meaningless if CONFIG_GVTS_AMP is not defined */ u64 sum_perf_runtime; u64 sum_type_runtime[NUM_CPU_TYPES]; -#ifdef CONFIG_GVFS_DEBUG_NORMALIZATION +#ifdef CONFIG_GVTS_DEBUG_NORMALIZATION unsigned int num_normalization; u64 added_normalization; u64 max_added_normalization; @@ -369,12 +369,12 @@ remember_task_exit(struct task_struct *p) { info->vruntime_end = p->se.vruntime; info->walltime = jiffies_to_nsecs(now - (unsigned long) info->walltime); info->sum_exec_runtime = p->se.sum_exec_runtime; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP info->sum_perf_runtime = p->se.sum_perf_runtime; for_each_type(type) info->sum_type_runtime[type] = p->se.sum_type_runtime[type]; #endif -#ifdef CONFIG_GVFS_DEBUG_NORMALIZATION +#ifdef CONFIG_GVTS_DEBUG_NORMALIZATION info->num_normalization = p->se.num_normalization; info->added_normalization = p->se.added_normalization; info->max_added_normalization = p->se.max_added_normalization; @@ -500,7 +500,7 @@ static int get_remembered_info(int num_entry, struct remember_info *user) { return ret; } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ /* * Debugging: various feature bits @@ -1080,7 +1080,7 @@ int tg_nop(struct task_group *tg, void *data) } #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS void update_tg_load_sum(struct sched_entity *se, struct task_group *tg, unsigned long old, unsigned long new, int no_update_if_zero); @@ -1090,10 +1090,10 @@ static void set_load_weight(struct task_struct *p) { int prio = p->static_prio - MAX_RT_PRIO; struct load_weight *load = &p->se.load; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS unsigned long old_weight = load->weight; #endif -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int type; #endif @@ -1108,15 +1108,15 @@ static void set_load_weight(struct task_struct *p) load->weight = scale_load(sched_prio_to_weight[prio]); load->inv_weight = sched_prio_to_wmult[prio]; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (p->sched_class == &fair_sched_class) { p->se.eff_load.weight = 0; p->se.eff_load.inv_weight = 0; p->se.eff_weight_real = 0; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP for_each_type(type) p->se.__lagged_weight[type] = 0; -#endif /* !CONFIG_GVFS_AMP */ +#endif /* !CONFIG_GVTS_AMP */ p->se.lagged_weight = 0; update_tg_load_sum(&p->se, p->sched_task_group, old_weight, load->weight, TG_LOAD_SUM_CHANGE); @@ -2493,22 +2493,22 @@ void __dl_clear_params(struct task_struct *p) */ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) { -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP struct task_struct *curr = current; #endif -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int type; #endif p->on_rq = 0; p->se.on_rq = 0; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS p->se.eff_load.weight = 0; /* Not yet set. This is context dependent value. */ p->se.eff_load.inv_weight = 0; p->se.curr_child = NULL; /* always NULL for leaf sched_entity */ p->se.eff_weight_real = 0; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (curr && curr->effi_mode) { p->effi_mode = curr->effi_mode; for_each_type(type) @@ -2521,29 +2521,29 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->se.effi = p->effi; for_each_type(type) p->se.__lagged_weight[type] = 0; -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ p->se.lagged_weight = 0; -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ p->se.exec_start = 0; p->se.sum_exec_runtime = 0; p->se.prev_sum_exec_runtime = 0; p->se.nr_migrations = 0; p->se.vruntime = 0; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP p->se.sum_perf_runtime = 0; p->se.vruntime_rem = 0; p->se.perf_rem = 0; for_each_type(type) p->se.sum_type_runtime[type] = 0; -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ INIT_LIST_HEAD(&p->se.group_node); #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = NULL; -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH p->se.state_q = NULL; INIT_LIST_HEAD(&p->se.state_node); -#endif /* CONFIG_GVFS_BANDWIDTH */ +#endif /* CONFIG_GVTS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_SCHEDSTATS @@ -2551,10 +2551,10 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) memset(&p->se.statistics, 0, sizeof(p->se.statistics)); #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS p->se.tg_load_sum_contrib = 0; #endif -#ifdef CONFIG_GVFS_DEBUG_NORMALIZATION +#ifdef CONFIG_GVTS_DEBUG_NORMALIZATION p->se.num_normalization = 0; p->se.added_normalization = 0; p->se.max_added_normalization = 0; @@ -2771,7 +2771,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) raw_spin_lock_irqsave(&p->pi_lock, flags); set_task_cpu(p, cpu); raw_spin_unlock_irqrestore(&p->pi_lock, flags); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS remember_task_fork(p, current); #endif @@ -3581,7 +3581,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev) BUG(); /* the idle class will always have a runnable task */ } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS void transit_busy_to_idle(struct rq *rq); void transit_idle_to_busy(struct rq *rq); #endif @@ -3702,7 +3702,7 @@ static void __sched notrace __schedule(bool preempt) rq->nr_switches++; rq->curr = next; ++*switch_count; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (unlikely(next == rq->idle && rq->cfs.was_idle == CFS_RQ_WAS_BUSY)) transit_busy_to_idle(rq); #endif @@ -5426,7 +5426,7 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, return retval; } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS static int do_remember_task(pid_t pid) { struct task_struct *p; long retval; @@ -5461,7 +5461,7 @@ static void __do_get_task_runtime(struct task_struct *p, struct task_runtime_inf struct task_struct *t = p; struct task_struct *pos; int init_tid = 0; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int type; #endif @@ -5476,11 +5476,11 @@ static void __do_get_task_runtime(struct task_struct *p, struct task_runtime_inf } info->sum_exec_runtime += t->se.sum_exec_runtime; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP info->sum_perf_runtime += t->se.sum_perf_runtime; for_each_type(type) info->sum_type_runtime[type] += t->se.sum_type_runtime[type]; -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ if (!list_empty(&t->children)) { int init_child_tid = 0; @@ -5546,7 +5546,7 @@ static long do_get_task_runtime(pid_t pid, void __user * __info) return 0; } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP static long set_efficiency(struct task_struct *p, unsigned int mode, unsigned long *effi) { int type; @@ -5655,10 +5655,10 @@ static int do_get_efficiency(pid_t pid, unsigned int raw, void __user *vars) { return mode; } -#endif /* CONFIG_GVFS_AMP */ -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS_AMP */ +#endif /* CONFIG_GVTS */ -/* Definition of operations of gvfs system call */ +/* Definition of operations of gvts system call */ #define SET_FAST_CORE 0 /* obsolute */ #define SET_SLOW_CORE 1 /* obsolute */ #define SET_UNIT_VRUNTIME 2 /* obsolute */ @@ -5674,16 +5674,16 @@ static int do_get_efficiency(pid_t pid, unsigned int raw, void __user *vars) { #define REMEMBER_TASK 13 #define GET_REMEMBERED_INFO 14 /** - * sys_gvfs - set/change the gvfs related things, especially for gvfs_amp + * sys_gvts - set/change the gvts related things, especially for gvts_amp * @op: the operation id * @id: cpu id or pid, mostly * @num: a number, e.g., unit_vruntime * @vars: a user space pointer for large parameter */ -SYSCALL_DEFINE4(gvfs, int, op, int, id, u64, num, void __user *, vars) +SYSCALL_DEFINE4(gvts, int, op, int, id, u64, num, void __user *, vars) { switch(op) { -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* for compatibility */ case SET_FAST_CORE: case SET_SLOW_CORE: @@ -5705,7 +5705,7 @@ SYSCALL_DEFINE4(gvfs, int, op, int, id, u64, num, void __user *, vars) return -EINVAL; return do_get_task_runtime(id, vars); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP case SET_CPU_TYPE: if (num >= NUM_CPU_TYPES || vars) return -EINVAL; @@ -5723,13 +5723,13 @@ SYSCALL_DEFINE4(gvfs, int, op, int, id, u64, num, void __user *, vars) case GET_EFFICIENCY: return do_get_efficiency(id, num, vars); -#else /* !CONFIG_GVFS_AMP */ +#else /* !CONFIG_GVTS_AMP */ case SET_CPU_TYPE: case GET_CPU_TYPE: case SET_EFFICIENCY: case GET_EFFICIENCY: return -EINVAL; -#endif /* !CONFIG_GVFS_AMP */ +#endif /* !CONFIG_GVTS_AMP */ case REMEMBER_TASK: if (num || vars) return -EINVAL; @@ -5755,7 +5755,7 @@ SYSCALL_DEFINE4(gvfs, int, op, int, id, u64, num, void __user *, vars) } else return -EINVAL; -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ default: /* invalid operation */ return -EINVAL; } @@ -6704,7 +6704,7 @@ static void update_top_cache_domain(int cpu) rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* @sd: sched domain which will be assigned to rq->sd */ static void cpu_attach_sdv(struct rq *rq, struct sched_domain *sd) { @@ -6744,7 +6744,7 @@ cpu_attach_sdv(struct rq *rq, struct sched_domain *sd) { } } } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ static struct sched_domain *cpu_merge_domain(struct sched_domain *sd, int cpu) { @@ -6768,7 +6768,7 @@ static struct sched_domain *cpu_merge_domain(struct sched_domain *sd, int cpu) if (parent->flags & SD_PREFER_SIBLING) tmp->flags |= SD_PREFER_SIBLING; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (parent->vruntime) { struct sd_vruntime *sdv = parent->vruntime; /* to be deleted */ struct sd_vruntime *parent_sdv = sdv->parent; @@ -6815,14 +6815,14 @@ static struct sched_domain *cpu_merge_domain(struct sched_domain *sd, int cpu) atomic_dec(&sdv->ref); parent->vruntime = NULL; } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ destroy_sched_domain(parent, cpu); } else tmp = tmp->parent; } if (sd && sd_degenerate(sd)) { -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (sd->vruntime) { struct sd_vruntime *sdv, *parent_sdv, *prev; sdv = sd->vruntime; @@ -6853,7 +6853,7 @@ static struct sched_domain *cpu_merge_domain(struct sched_domain *sd, int cpu) atomic_dec(&sdv->ref); sd->vruntime = NULL; } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ tmp = sd; sd = sd->parent; destroy_sched_domain(tmp, cpu); @@ -6878,7 +6878,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) rq_attach_root(rq, rd); tmp = rq->sd; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS cpu_attach_sdv(rq, sd); #endif rcu_assign_pointer(rq->sd, sd); @@ -7101,7 +7101,7 @@ build_sched_groups(struct sched_domain *sd, int cpu) return 0; } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* show the structure of scheduing domains */ static char cpu_str[nr_cpumask_bits + 1]; static char *cpumask_str(const struct cpumask *cpu_map) { @@ -7349,7 +7349,7 @@ static void claim_allocations(int cpu, struct sched_domain *sd) WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); *per_cpu_ptr(sdd->sd, cpu) = NULL; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (atomic_read(&(*per_cpu_ptr(sdd->sdv, cpu))->ref)) *per_cpu_ptr(sdd->sdv, cpu) = NULL; #endif @@ -7454,10 +7454,10 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) sd->flags |= SD_PREFER_SIBLING; sd->imbalance_pct = 110; sd->smt_gain = 1178; /* ~15% */ -#ifdef CONFIG_GVFS - sd->vruntime_interval = CONFIG_GVFS_INTERVAL_SMT_SHARED * NSEC_PER_MSEC; - sd->vruntime_tolerance = CONFIG_GVFS_INTERVAL_SMT_SHARED - * CONFIG_GVFS_TOLERANCE_PERCENT / 100 +#ifdef CONFIG_GVTS + sd->vruntime_interval = CONFIG_GVTS_INTERVAL_SMT_SHARED * NSEC_PER_MSEC; + sd->vruntime_tolerance = CONFIG_GVTS_INTERVAL_SMT_SHARED + * CONFIG_GVTS_TOLERANCE_PERCENT / 100 * NSEC_PER_MSEC; #endif @@ -7465,10 +7465,10 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) sd->imbalance_pct = 117; sd->cache_nice_tries = 1; sd->busy_idx = 2; -#ifdef CONFIG_GVFS - sd->vruntime_interval = CONFIG_GVFS_INTERVAL_LLC_SHARED * NSEC_PER_MSEC; - sd->vruntime_tolerance = CONFIG_GVFS_INTERVAL_LLC_SHARED - * CONFIG_GVFS_TOLERANCE_PERCENT / 100 +#ifdef CONFIG_GVTS + sd->vruntime_interval = CONFIG_GVTS_INTERVAL_LLC_SHARED * NSEC_PER_MSEC; + sd->vruntime_tolerance = CONFIG_GVTS_INTERVAL_LLC_SHARED + * CONFIG_GVTS_TOLERANCE_PERCENT / 100 * NSEC_PER_MSEC; #endif @@ -7484,15 +7484,15 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) SD_BALANCE_FORK | SD_WAKE_AFFINE); } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* according to include/linux/topology.h, * numa distance is 10 (for local nodes), 20 (for remote nodes), etc. * We decide to vruntime_interval as 1s, 2s, etc. */ - sd->vruntime_interval = CONFIG_GVFS_INTERVAL_NUMA * NSEC_PER_MSEC + sd->vruntime_interval = CONFIG_GVTS_INTERVAL_NUMA * NSEC_PER_MSEC * sched_domains_numa_distance[tl->numa_level] / 10; - sd->vruntime_tolerance = CONFIG_GVFS_INTERVAL_NUMA - * CONFIG_GVFS_TOLERANCE_PERCENT / 100 /* for tolerance percentage */ + sd->vruntime_tolerance = CONFIG_GVTS_INTERVAL_NUMA + * CONFIG_GVTS_TOLERANCE_PERCENT / 100 /* for tolerance percentage */ * NSEC_PER_MSEC * sched_domains_numa_distance[tl->numa_level] / 10; /* for numa distance unit */ @@ -7504,11 +7504,11 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) sd->cache_nice_tries = 1; sd->busy_idx = 2; sd->idle_idx = 1; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* I don't know... just similar to SD_SHARE_PKG_RESOURCES... */ - sd->vruntime_interval = CONFIG_GVFS_INTERVAL_LLC_SHARED * NSEC_PER_MSEC; - sd->vruntime_tolerance = CONFIG_GVFS_INTERVAL_LLC_SHARED - * CONFIG_GVFS_TOLERANCE_PERCENT / 100 + sd->vruntime_interval = CONFIG_GVTS_INTERVAL_LLC_SHARED * NSEC_PER_MSEC; + sd->vruntime_tolerance = CONFIG_GVTS_INTERVAL_LLC_SHARED + * CONFIG_GVTS_TOLERANCE_PERCENT / 100 * NSEC_PER_MSEC; #endif } @@ -7851,7 +7851,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) if (!sdd->sd) return -ENOMEM; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS sdd->sdv = alloc_percpu(struct sd_vruntime *); if (!sdd->sdv) return -ENOMEM; @@ -7867,7 +7867,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) for_each_cpu(j, cpu_map) { struct sched_domain *sd; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS struct sd_vruntime *sdv; #endif struct sched_group *sg; @@ -7880,7 +7880,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map) *per_cpu_ptr(sdd->sd, j) = sd; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS sdv = kzalloc_node(sizeof(struct sd_vruntime) + cpumask_size(), GFP_KERNEL, cpu_to_node(j)); if (!sdv) @@ -7927,7 +7927,7 @@ static void __sdt_free(const struct cpumask *cpu_map) kfree(*per_cpu_ptr(sdd->sd, j)); } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (sdd->sdv) kfree(*per_cpu_ptr(sdd->sdv, j)); #endif @@ -7938,7 +7938,7 @@ static void __sdt_free(const struct cpumask *cpu_map) } free_percpu(sdd->sd); sdd->sd = NULL; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS free_percpu(sdd->sdv); sdd->sdv = NULL; #endif @@ -8029,7 +8029,7 @@ static int build_sched_domains(const struct cpumask *cpu_map, } } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* build sd_vruntime */ for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { @@ -8057,7 +8057,7 @@ static int build_sched_domains(const struct cpumask *cpu_map, } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS #if 0 // To show sdv topology while booting for_each_cpu(i, cpu_map) { int level = 0; @@ -8093,7 +8093,7 @@ static int build_sched_domains(const struct cpumask *cpu_map, *per_cpu_ptr(d.sd, i) = sd; } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS #if 0 /* build sd_vruntime */ for_each_cpu(i, cpu_map) { @@ -8143,7 +8143,7 @@ static int build_sched_domains(const struct cpumask *cpu_map, rcu_read_unlock(); ret = 0; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS #if 0 // To show sdv topology while booting for_each_cpu(i, cpu_map) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { @@ -8572,7 +8572,7 @@ void __init sched_init(void) rq = cpu_rq(i); raw_spin_lock_init(&rq->lock); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP init_rq_cpu_type(rq); #endif rq->nr_running = 0; @@ -8620,7 +8620,7 @@ void __init sched_init(void) #ifdef CONFIG_SMP rq->sd = NULL; rq->rd = NULL; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS rq->sd_vruntime = NULL; rq->infeasible_weight = 0; #endif diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 313f117e3c25..680cc650084b 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -397,7 +397,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group P(se->avg.load_avg); P(se->avg.util_avg); #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS P(se->eff_load.weight); P(se->eff_weight_real); #endif @@ -431,13 +431,13 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) SPLIT_NS(p->se.vruntime), (long long)(p->nvcsw + p->nivcsw), p->prio); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS SEQ_printf(m, " %6ld %10ld %8ld", p->se.load.weight, p->se.eff_weight_real, p->se.avg.util_avg ); SEQ_printf(m, " %10ld %9Ld.%06ld", p->se.lagged_weight, SPLIT_NS(p->se.lagged / NICE_0_LOAD)); -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ #ifdef CONFIG_SCHEDSTATS if (schedstat_enabled()) { SEQ_printf(m, " %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", @@ -473,7 +473,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) SEQ_printf(m, "\nrunnable tasks:\n" " task PID tree-key switches prio" -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS " weight eff_weight load_avg" " lag_weight lagged " #endif @@ -486,7 +486,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) #endif "\n" "-------------------------------------------------------" -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS "---------------------------" "-----------------" #endif @@ -516,7 +516,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) struct rq *rq = cpu_rq(cpu); struct sched_entity *last; unsigned long flags; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS u64 real_min_vruntime = -1; u64 target_vruntime = -1, vruntime_interval = -1, vruntime_tolerance = -1; struct sd_vruntime *sdv; @@ -546,12 +546,12 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SPLIT_NS(min_vruntime)); SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", SPLIT_NS(max_vruntime)); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS real_min_vruntime = cfs_rq->real_min_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "real_min_vruntime", SPLIT_NS(real_min_vruntime)); #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS target_vruntime = cfs_rq->target_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "target_vruntime", SPLIT_NS(target_vruntime)); @@ -593,7 +593,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", atomic_long_read(&cfs_rq->tg->load_avg)); #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS SEQ_printf(m, " .%-30s: %ld\n", "tg_load_sum", atomic_long_read(&cfs_rq->tg->load_sum)); #ifdef CONFIG_FAIR_GROUP_SCHED @@ -606,7 +606,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) #ifdef CONFIG_FAIR_GROUP_SCHED } #endif -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ #endif /* CONFIG_SMP */ #ifdef CONFIG_CFS_BANDWIDTH SEQ_printf(m, " .%-30s: %d\n", "throttled", @@ -1030,7 +1030,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) P(se.avg.util_avg); P(se.avg.last_update_time); #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS P(se.tg_load_sum_contrib); P(se.eff_load.weight); #endif @@ -1061,7 +1061,7 @@ void proc_sched_set_task(struct task_struct *p) #endif } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS #define pr_emerg(fmt, ...) \ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) @@ -1101,12 +1101,12 @@ static void dump_task(struct task_struct *p) { static void dump_rq(struct rq *rq) { pr_emerg("%-3d" -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP " %4d" #endif " %6d %6ld %8d %20s %16lld %16lld %16lld %16lld %16lld %16lld %16lld %10ld\n", cpu_of(rq), -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP rq->cpu_type, #endif rq->nr_running, @@ -1134,12 +1134,12 @@ void dump_sched(void) { /* header for dump_rq() */ pr_emerg("DUMP_RQ\n"); pr_emerg("%-3s" -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP " %4s" #endif " %6s %6s %8s %20s %16s %16s %16s %16s %16s %16s %16s %10s\n", "cpu", -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP "type", #endif "nr_run", diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c181ff828f83..c6b859e6d182 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -482,7 +482,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) #endif } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS static u64 real_min_vruntime(struct cfs_rq *cfs_rq) { return cfs_rq->real_min_vruntime; } @@ -516,11 +516,11 @@ static inline u64 cfs_rq_target_vruntime(struct cfs_rq *cfs_rq) { return cfs_rq->target_vruntime; } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP static inline s64 task_lagged_type(struct sched_entity *se, u64 target, int type) { return (target - se->vruntime) * se->__lagged_weight[type]; } -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ static inline s64 task_lagged(struct sched_entity *se, u64 target) { return (target - se->vruntime) * se->lagged_weight; } @@ -538,7 +538,7 @@ static inline s64 cpu_lagged(int cpu, u64 target) { return rq_lagged(cpu_rq(cpu), target); } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP #ifdef CONFIG_FAIR_GROUP_SCHED static inline unsigned long se_effi(struct sched_entity *se, int type) { @@ -565,7 +565,7 @@ static inline unsigned long se_effi(struct sched_entity *se, int type) { #define set_curr_effi(p) do{}while(0) #define put_prev_effi(p) do{}while(0) #endif /* !CONFIG_FAIR_GROUP_SCHED */ -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ static void update_rq_lagged_weight(struct cfs_rq *cfs_rq, struct sched_entity *se, @@ -636,7 +636,7 @@ static u64 __get_min_target_traverse(struct rq *this_rq) { int running = 0, cpu; struct rq *rq; - gvfs_stat_inc(this_rq, get_traverse_rq_count); + gvts_stat_inc(this_rq, get_traverse_rq_count); for_each_possible_cpu(cpu) { if (idle_cpu(cpu)) @@ -709,7 +709,7 @@ static u64 get_min_target(struct rq *rq) { sdv = min_child; /* to prevent double counting, increment here */ - gvfs_stat_inc(rq, get_traverse_child_count); + gvts_stat_inc(rq, get_traverse_child_count); } else { return __get_min_target_traverse(rq); } @@ -858,7 +858,7 @@ void update_target_vruntime_cache(struct cfs_rq *cfs_rq, u64 target, int locked) cfs_rq->target_vruntime = target; update_min_target_rq(cfs_rq, target); } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ /* * Enqueue an entity into the rb-tree: @@ -970,7 +970,7 @@ int sched_proc_update_handler(struct ctl_table *table, int write, } #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS static int update_eff_load(struct sched_entity *task_se, struct sched_entity *plast); /* * delta /= w @@ -989,7 +989,7 @@ static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se) return delta; } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP static inline u64 __calc_delta_effi(u64 delta, unsigned long effi, u32 *rem) { if (effi == NICE_0_LOAD) @@ -1014,7 +1014,7 @@ static inline u64 calc_delta_vruntime(u64 delta, unsigned long effi, struct sche static inline u64 calc_delta_perf(u64 delta, unsigned long effi, struct sched_entity *se) { return __calc_delta_effi(delta, effi, &se->perf_rem); } -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ #else /* * delta /= w @@ -1130,7 +1130,7 @@ static void update_curr(struct cfs_rq *cfs_rq) struct sched_entity *curr = cfs_rq->curr; u64 now = rq_clock_task(rq_of(cfs_rq)); u64 delta_exec; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int type = rq_of(cfs_rq)->cpu_type; #endif @@ -1149,18 +1149,18 @@ static void update_curr(struct cfs_rq *cfs_rq) curr->sum_exec_runtime += delta_exec; schedstat_add(cfs_rq, exec_clock, delta_exec); -#ifdef CONFIG_GVFS -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS +#ifdef CONFIG_GVTS_AMP curr->sum_type_runtime[type] += delta_exec; #endif if (entity_is_task(curr)) { struct task_struct *curtask = task_of(curr); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP curr->vruntime += calc_delta_vruntime(delta_exec, se_effi(curr, type), curr); -#else /* !CONFIG_GVFS_AMP */ +#else /* !CONFIG_GVTS_AMP */ curr->vruntime += calc_delta_fair(delta_exec, curr); -#endif /* !CONFIG_GVFS_AMP */ +#endif /* !CONFIG_GVTS_AMP */ update_lagged(curr, &rq_of(cfs_rq)->cfs); update_min_vruntime(cfs_rq); @@ -1174,14 +1174,14 @@ static void update_curr(struct cfs_rq *cfs_rq) update_min_vruntime(cfs_rq); } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP /* On AMP systems, scale delta_exec based on efficiency for CFS_BANDWIDTH. Now, CFS_BANDWIDTH controls the received performance. */ delta_exec = calc_delta_perf(delta_exec, se_effi(curr, type), curr); curr->sum_perf_runtime += delta_exec; #endif account_cfs_rq_runtime(cfs_rq, delta_exec); -#else /* !CONFIG_GVFS */ +#else /* !CONFIG_GVTS */ curr->vruntime += calc_delta_fair(delta_exec, curr); update_min_vruntime(cfs_rq); @@ -1194,7 +1194,7 @@ static void update_curr(struct cfs_rq *cfs_rq) } account_cfs_rq_runtime(cfs_rq, delta_exec); -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ } static void update_curr_fair(struct rq *rq) @@ -1643,9 +1643,9 @@ static unsigned long weighted_cpuload(const int cpu); static unsigned long source_load(int cpu, int type); static unsigned long target_load(int cpu, int type); static unsigned long capacity_of(int cpu); -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS static long effective_load(struct task_group *tg, int cpu, long wl, long wg); -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* Cached statistics for all CPUs within a node */ struct numa_stats { @@ -2934,7 +2934,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) { long tg_weight, load, shares; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS tg_weight = atomic_long_read(&tg->load_sum); #else tg_weight = calc_tg_weight(tg, cfs_rq); @@ -3241,7 +3241,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) } } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* rq->lock held */ static inline void __update_tg_load_sum(struct task_group *tg, unsigned long old, unsigned long new) { @@ -3302,7 +3302,7 @@ inline void update_tg_load_sum(struct sched_entity *se, struct task_group *tg, se->tg_load_sum_contrib = new; __update_tg_load_sum(tg, old, new); } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ /* * Called within set_task_rq() right before setting a task's cpu. The @@ -3561,10 +3561,10 @@ static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq) return cfs_rq->avg.load_avg; } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS static int idle_balance(struct rq *this_rq); #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS static inline int target_vruntime_balance(struct rq *rq, enum cpu_idle_type idle); void transit_idle_to_busy(struct rq *rq); #endif @@ -3595,7 +3595,7 @@ static inline int idle_balance(struct rq *rq) return 0; } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS static inline int target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle) { return 0; @@ -3679,9 +3679,9 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se) #endif } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS static void -__gvfs_enqueue_normalization(struct rq *rq, struct sched_entity *se, int throttled) { +__gvts_enqueue_normalization(struct rq *rq, struct sched_entity *se, int throttled) { struct cfs_rq *cfs_rq; u64 target; u64 interval; @@ -3747,7 +3747,7 @@ __gvfs_enqueue_normalization(struct rq *rq, struct sched_entity *se, int throttl if (vruntime_passed(se->vruntime, vruntime)) goto out; -#ifdef CONFIG_GVFS_DEBUG_NORMALIZATION +#ifdef CONFIG_GVTS_DEBUG_NORMALIZATION se->num_normalization++; se->added_normalization += vruntime - se->vruntime; if ((vruntime - se->vruntime) > se->max_added_normalization) @@ -3762,12 +3762,12 @@ __gvfs_enqueue_normalization(struct rq *rq, struct sched_entity *se, int throttl /* call only when wake up or waking up a task */ static void -gvfs_enqueue_sleeper(struct rq *rq, struct sched_entity *se) { - __gvfs_enqueue_normalization(rq, se, 0); +gvts_enqueue_sleeper(struct rq *rq, struct sched_entity *se) { + __gvts_enqueue_normalization(rq, se, 0); } static void -gvfs_dequeue_sleeper(struct rq *rq, struct sched_entity *se) { +gvts_dequeue_sleeper(struct rq *rq, struct sched_entity *se) { if (!se->sleep_start) { se->sleep_start = rq_clock(rq); se->sleep_target = rq->cfs.target_vruntime; @@ -3775,8 +3775,8 @@ gvfs_dequeue_sleeper(struct rq *rq, struct sched_entity *se) { } static void -gvfs_enqueue_throttled(struct rq *rq, struct sched_entity *se) { - __gvfs_enqueue_normalization(rq, se, 1); +gvts_enqueue_throttled(struct rq *rq, struct sched_entity *se) { + __gvts_enqueue_normalization(rq, se, 1); } static inline void @@ -3796,7 +3796,7 @@ static inline void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { } -#else /* !CONFIG_GVFS */ +#else /* !CONFIG_GVTS */ static void place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) { @@ -3828,12 +3828,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) /* ensure we never gain time by being placed backwards. */ se->vruntime = max_vruntime(se->vruntime, vruntime); } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ static void check_enqueue_throttle(struct cfs_rq *cfs_rq); -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH static int cfs_rq_throttled(struct cfs_rq *cfs_rq); -#endif /* CONFIG_GVFS_BANDWIDTH */ +#endif /* CONFIG_GVTS_BANDWIDTH */ static inline void check_schedstat_required(void) { @@ -3858,14 +3858,14 @@ static inline void check_schedstat_required(void) static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { -#ifndef CONFIG_GVFS /* for GVFS, do not normalize vruntime based on min_vruntime */ +#ifndef CONFIG_GVTS /* for GVTS, do not normalize vruntime based on min_vruntime */ /* * Update the normalized vruntime before updating min_vruntime * through calling update_curr(). */ if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING)) se->vruntime += cfs_rq->min_vruntime; -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * Update run-time statistics of the 'current'. @@ -3894,7 +3894,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) list_add_leaf_cfs_rq(cfs_rq); check_enqueue_throttle(cfs_rq); } -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH /* do this after calling check_enqueue_throttle() since it re-check the state of cfs_rq */ if (!cfs_rq_throttled(cfs_rq)) { /* not throttled */ @@ -3904,7 +3904,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) list_add(&se->state_node, cfs_rq->thrott_q); se->state_q = cfs_rq->thrott_q; } -#endif /* CONFIG_GVFS_BANDWIDTH */ +#endif /* CONFIG_GVTS_BANDWIDTH */ } static void __clear_buddies_last(struct sched_entity *se) @@ -3973,7 +3973,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) se->on_rq = 0; account_entity_dequeue(cfs_rq, se); -#ifndef CONFIG_GVFS /* for GVFS, do not normalize vruntime while dequeueing */ +#ifndef CONFIG_GVTS /* for GVTS, do not normalize vruntime while dequeueing */ /* * Normalize the entity after updating the min_vruntime because the * update can refer to the ->curr item and we need to reflect this @@ -3981,8 +3981,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) */ if (!(flags & DEQUEUE_SLEEP)) se->vruntime -= cfs_rq->min_vruntime; -#endif /* !CONFIG_GVFS */ -#ifdef CONFIG_GVFS_BANDWIDTH +#endif /* !CONFIG_GVTS */ +#ifdef CONFIG_GVTS_BANDWIDTH if (se->state_q == cfs_rq->thrott_q && entity_is_task(se) && (se->sleep_start == 0 || cfs_rq->throttled_clock < se->sleep_start)) { se->sleep_start = cfs_rq->throttled_clock; @@ -3990,7 +3990,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) } list_del(&se->state_node); se->state_q = NULL; -#endif /* CONFIG_GVFS_BANDWIDTH */ +#endif /* CONFIG_GVTS_BANDWIDTH */ /* return excess runtime on last dequeue */ return_cfs_rq_runtime(cfs_rq); @@ -4070,18 +4070,18 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) } #endif se->prev_sum_exec_runtime = se->sum_exec_runtime; -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH if (se->state_q == cfs_rq->thrott_q) { if (entity_is_task(se) && !se->sleep_start) { se->sleep_start = cfs_rq->throttled_clock; se->sleep_target = cfs_rq->throttled_target; } - //gvfs_enqueue_sleeper(rq_of(cfs_rq), se); - gvfs_enqueue_throttled(rq_of(cfs_rq), se); + //gvts_enqueue_sleeper(rq_of(cfs_rq), se); + gvts_enqueue_throttled(rq_of(cfs_rq), se); list_move(&se->state_node, cfs_rq->active_q); se->state_q = cfs_rq->active_q; } -#endif /* CONFIG_GVFS_BANDWIDTH */ +#endif /* CONFIG_GVTS_BANDWIDTH */ } static int @@ -4446,16 +4446,16 @@ static int tg_throttle_down(struct task_group *tg, void *data) return 0; } -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH /* call before updating cfs_rq->throttled_clock */ -static void gvfs_throttle_cfs_rq(struct cfs_rq *cfs_rq) +static void gvts_throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct sched_entity *se, *n; struct list_head *temp_list; if (unlikely(!list_empty(cfs_rq->thrott_q))) { /* empty the thrtt_q before switch queues. */ list_for_each_entry_safe(se, n, cfs_rq->thrott_q, state_node) { - gvfs_stat_inc(rq_of(cfs_rq), iterate_thrott_q); + gvts_stat_inc(rq_of(cfs_rq), iterate_thrott_q); /* remember throttled time before cfs_rq replace it the recent value */ if (entity_is_task(se) && @@ -4474,7 +4474,7 @@ static void gvfs_throttle_cfs_rq(struct cfs_rq *cfs_rq) cfs_rq->thrott_q = cfs_rq->active_q; cfs_rq->active_q = temp_list; } -#endif /* CONFIG_GVFS_BANDWIDTH */ +#endif /* CONFIG_GVTS_BANDWIDTH */ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) { @@ -4509,13 +4509,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se) sub_nr_running(rq, task_delta); -#ifdef CONFIG_GVFS_BANDWIDTH - gvfs_throttle_cfs_rq(cfs_rq); +#ifdef CONFIG_GVTS_BANDWIDTH + gvts_throttle_cfs_rq(cfs_rq); #endif cfs_rq->throttled = 1; cfs_rq->throttled_clock = rq_clock(rq); -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH cfs_rq->throttled_target = __cfs_rq_target_vruntime(cfs_rq); #endif raw_spin_lock(&cfs_b->lock); @@ -4902,7 +4902,7 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) { cfs_rq->runtime_enabled = 0; INIT_LIST_HEAD(&cfs_rq->throttled_list); -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH cfs_rq->active_q = &cfs_rq->state_q[0]; cfs_rq->thrott_q = &cfs_rq->state_q[1]; INIT_LIST_HEAD(cfs_rq->active_q); @@ -5063,9 +5063,9 @@ static inline void hrtick_update(struct rq *rq) } #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP static inline unsigned long calc_lagged_weight(struct sched_entity *se) { unsigned long base, lw; @@ -5073,7 +5073,7 @@ calc_lagged_weight(struct sched_entity *se) { int rq_type = rq_of(cfs_rq_of(se))->cpu_type; base = se->eff_weight_real * se->avg.util_avg - << CONFIG_GVFS_LAGGED_WEIGHT_ADDED_BITS; + << CONFIG_GVTS_LAGGED_WEIGHT_ADDED_BITS; for_each_type(type) { lw = base / se_effi(se, type); @@ -5082,13 +5082,13 @@ calc_lagged_weight(struct sched_entity *se) { return se->__lagged_weight[rq_type]; } -#else /* !CONFIG_GVFS_AMP */ +#else /* !CONFIG_GVTS_AMP */ static inline unsigned long calc_lagged_weight(struct sched_entity *se) { unsigned long eff_weight_real = se->eff_weight_real; unsigned long util_avg = se->avg.util_avg; - util_avg = (util_avg + ((1 << (SCHED_LOAD_SHIFT - CONFIG_GVFS_LAGGED_WEIGHT_ADDED_BITS)) - 1)) - >> (SCHED_LOAD_SHIFT - CONFIG_GVFS_LAGGED_WEIGHT_ADDED_BITS); + util_avg = (util_avg + ((1 << (SCHED_LOAD_SHIFT - CONFIG_GVTS_LAGGED_WEIGHT_ADDED_BITS)) - 1)) + >> (SCHED_LOAD_SHIFT - CONFIG_GVTS_LAGGED_WEIGHT_ADDED_BITS); if (likely(eff_weight_real && util_avg)) return eff_weight_real * util_avg; else if (eff_weight_real) @@ -5096,7 +5096,7 @@ calc_lagged_weight(struct sched_entity *se) { else return 0; } -#endif /* !CONFIG_GVFS_AMP */ +#endif /* !CONFIG_GVTS_AMP */ static inline int update_lagged_weight(struct sched_entity *pse) { unsigned long lagged_weight = calc_lagged_weight(pse); @@ -5185,7 +5185,7 @@ update_eff_load(struct sched_entity *pse, struct sched_entity *plast) /* update lagged_weight */ return update_lagged_weight(pse); } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ /* * The enqueue_task method is called before nr_running is @@ -5198,7 +5198,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (flags & ENQUEUE_WAKEUP) { /* tg_load_sum should be updated before calling update_cfs_shares() or enqueue_entity(). */ @@ -5206,7 +5206,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) } if (se->sleep_start) /* slept tasks or throttled tasks */ - gvfs_enqueue_sleeper(rq, se); + gvts_enqueue_sleeper(rq, se); #endif for_each_sched_entity(se) { @@ -5242,7 +5242,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) add_nr_running(rq, 1); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS update_rq_lagged_weight_enqueue(&rq->cfs, &p->se, 0, p->se.lagged_weight); update_eff_load(&p->se, se); update_lagged_enqueue(&p->se, &rq->cfs); @@ -5264,7 +5264,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct sched_entity *se = &p->se; int task_sleep = flags & DEQUEUE_SLEEP; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* tg_load_sum should be updated before calling update_cfs_shares() or enqueue_entity(). */ if (task_sleep) @@ -5323,10 +5323,10 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) sub_nr_running(rq, 1); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* this should be after dequeue */ if (task_sleep) - gvfs_dequeue_sleeper(rq, &p->se); + gvts_dequeue_sleeper(rq, &p->se); update_rq_lagged_weight_dequeue(&rq->cfs, &p->se, p->se.lagged_weight, 0); update_eff_load(&p->se, se); @@ -5609,7 +5609,7 @@ static unsigned long capacity_orig_of(int cpu) return cpu_rq(cpu)->cpu_capacity_orig; } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS static unsigned long cpu_avg_load_per_task(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -5621,7 +5621,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) return 0; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ static void record_wakee(struct task_struct *p) { @@ -5643,7 +5643,7 @@ static void record_wakee(struct task_struct *p) static void task_waking_fair(struct task_struct *p) { -#ifndef CONFIG_GVFS /* for GVFS, do not normalize vruntime base on min_vruntime */ +#ifndef CONFIG_GVTS /* for GVTS, do not normalize vruntime base on min_vruntime */ struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 min_vruntime; @@ -5661,11 +5661,11 @@ static void task_waking_fair(struct task_struct *p) #endif se->vruntime -= min_vruntime; -#endif /* !GVFS */ +#endif /* !GVTS */ record_wakee(p); } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS #ifdef CONFIG_FAIR_GROUP_SCHED /* * effective_load() calculates the load change as seen from the root_task_group @@ -5780,7 +5780,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) } #endif -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * Detect M:N waker/wakee relationships via a switching-frequency heuristic. @@ -5807,7 +5807,7 @@ static int wake_wide(struct task_struct *p) return 1; } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) { s64 this_load, load; @@ -5980,7 +5980,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * Try and locate an idle CPU in the sched_domain. @@ -6016,7 +6016,7 @@ static int select_idle_sibling(struct task_struct *p, int target) * idle. */ sd = rcu_dereference(per_cpu(sd_llc, target)); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS target = -1; #endif for_each_lower_domain(sd) { @@ -6028,7 +6028,7 @@ static int select_idle_sibling(struct task_struct *p, int target) /* Ensure the entire group is idle */ for_each_cpu(i, sched_group_cpus(sg)) { -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS if (i == target || !idle_cpu(i)) goto next; #else @@ -6086,7 +6086,7 @@ static int cpu_util(int cpu) return (util >= capacity) ? capacity : util; } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* return the target vruntime value even if the sched_domain is not linked with sd_vruntime. */ static inline @@ -6145,7 +6145,7 @@ find_fastest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) for_each_cpu(i, sched_group_cpus(group)) { if (idle_cpu(i)) { -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP /* for AMP, idleness is determined with default efficiency */ num_idle += DEFAULT_EFFICIENCY[cpu_rq(i)->cpu_type]; #else @@ -6182,7 +6182,7 @@ static int find_fastest_cpu(struct sched_domain *sd, struct sched_group *group, u64 latest_idle_timestamp = 0; int fastest_cpu = this_cpu; int shallowest_idle_cpu = -1; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int shallowest_idle_type = -1; #endif int i; @@ -6194,7 +6194,7 @@ static int find_fastest_cpu(struct sched_domain *sd, struct sched_group *group, if (idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (rq->cpu_type < shallowest_idle_type) { /* rq->cpu_type >= 0 * shallowest_idle_type >= 0 only if shallowest_idle_cpu >= 0 @@ -6208,7 +6208,7 @@ static int find_fastest_cpu(struct sched_domain *sd, struct sched_group *group, shallowest_idle_type = rq->cpu_type; continue; } -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ if (idle && idle->exit_latency < min_exit_latency) { /* * We give priority to a CPU whose idle state @@ -6218,9 +6218,9 @@ static int find_fastest_cpu(struct sched_domain *sd, struct sched_group *group, min_exit_latency = idle->exit_latency; latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP shallowest_idle_type = rq->cpu_type; -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ } else if ((!idle || idle->exit_latency == min_exit_latency) && rq->idle_stamp > latest_idle_timestamp) { /* @@ -6230,9 +6230,9 @@ static int find_fastest_cpu(struct sched_domain *sd, struct sched_group *group, */ latest_idle_timestamp = rq->idle_stamp; shallowest_idle_cpu = i; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP shallowest_idle_type = rq->cpu_type; -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ } } else if (shallowest_idle_cpu == -1) { lagged = cpu_lagged(i, target); @@ -6338,14 +6338,14 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f /* while loop will break here if sd == NULL */ } -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS if (new_cpu == -1) - gvfs_stat_inc(cpu_rq(cpu), select_fail); + gvts_stat_inc(cpu_rq(cpu), select_fail); else { if (idle_cpu(new_cpu)) - gvfs_stat_inc(cpu_rq(cpu), select_idle); + gvts_stat_inc(cpu_rq(cpu), select_idle); else - gvfs_stat_inc(cpu_rq(cpu), select_busy); + gvts_stat_inc(cpu_rq(cpu), select_busy); } #endif out: @@ -6360,7 +6360,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f return new_cpu; } -#else /* !CONFIG_GVFS */ +#else /* !CONFIG_GVTS */ /* * select_task_rq_fair: Select target runqueue for the waking task in domains * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE, @@ -6454,7 +6454,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f return new_cpu; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * Called immediately before a task is migrated to a new cpu; task_cpu(p) and @@ -6653,7 +6653,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) struct task_struct *p; int new_tasks; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (vruntime_passed(real_min_vruntime(cfs_rq), cfs_rq->target_vruntime)) goto idle; #endif @@ -6729,7 +6729,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) put_prev_entity(cfs_rq, pse); set_next_entity(cfs_rq, se); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP put_prev_effi(prev); set_curr_effi(p); #endif @@ -6748,7 +6748,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) put_prev_task(rq, prev); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* Here is good place to detect idle-to-busy transition. * Even with FAIR_GROUP_SCHED, this function returns above * only if (prev->sched_class == &fair_sched_class). @@ -6766,7 +6766,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) } while (cfs_rq); p = task_of(se); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP /* put_prev_effi() was done at put_prev_task() above */ set_curr_effi(p); #endif @@ -6784,9 +6784,9 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev) * re-start the picking loop. */ lockdep_unpin_lock(&rq->lock); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS new_tasks = target_vruntime_balance(rq, !cfs_rq->nr_running ? CPU_NEWLY_IDLE : CPU_NOT_IDLE); -#else /* !CONFIG_GVFS */ +#else /* !CONFIG_GVTS */ new_tasks = idle_balance(rq); #endif lockdep_pin_lock(&rq->lock); @@ -6816,7 +6816,7 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) cfs_rq = cfs_rq_of(se); put_prev_entity(cfs_rq, se); } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP put_prev_effi(prev); #endif } @@ -7019,13 +7019,13 @@ struct lb_env { unsigned int flags; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS u64 target; u64 interval; u64 tolerance; s64 lagged_diff; #endif -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int slower_src; /* src_rq->cpu_type < dst_rq->cpu_type */ #endif @@ -7037,7 +7037,7 @@ struct lb_env { struct list_head tasks; }; -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS /* * Is this task likely cache-hot: */ @@ -7206,7 +7206,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) schedstat_inc(p, se.statistics.nr_failed_migrations_hot); return 0; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * detach_task() -- detach the task for the migration specified in env @@ -7220,7 +7220,7 @@ static void detach_task(struct task_struct *p, struct lb_env *env) set_task_cpu(p, env->dst_cpu); } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS /* * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as * part of active balancing operations within "domain". @@ -7250,11 +7250,11 @@ static struct task_struct *detach_one_task(struct lb_env *env) } return NULL; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ static const unsigned int sched_nr_migrate_break = 32; -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS /* * detach_tasks() -- tries to detach up to imbalance weighted load from * busiest_rq, as part of a balancing operation within domain "sd". @@ -7343,7 +7343,7 @@ static int detach_tasks(struct lb_env *env) return detached; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * attach_task() -- attach the task detached by detach_task() to its new rq. @@ -8004,7 +8004,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS /** * check_asym_packing - Check to see if the group is packed into the * sched doman. @@ -8368,7 +8368,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, return busiest; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but @@ -8379,7 +8379,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, /* Working cpumask for load_balance and load_balance_newidle. */ DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; @@ -8713,7 +8713,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, out: return ld_moved; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ static inline unsigned long get_sd_balance_interval(struct sched_domain *sd, int cpu_busy) @@ -8742,7 +8742,7 @@ update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_b *next_balance = next; } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS /* * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. @@ -8840,9 +8840,9 @@ static int idle_balance(struct rq *this_rq) return pulled_task; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS void set_min_vruntime_idle_to_busy(struct rq *rq) { struct cfs_rq *cfs_rq = &rq->cfs; @@ -8929,7 +8929,7 @@ void transit_busy_to_idle(struct rq *rq) { do { min_vruntime = largest_vruntime; largest_vruntime = atomic64_xchg(&sdv->largest_idle_min_vruntime, min_vruntime); - gvfs_stat_inc(rq, largest_idle_min_vruntime_racing); + gvts_stat_inc(rq, largest_idle_min_vruntime_racing); } while (vruntime_passed_ne(largest_vruntime, min_vruntime)); } } else /* min_vruntime > largest_vruntime */ @@ -8984,7 +8984,7 @@ find_most_lagged_child(struct lb_env *env) { int num_busy; unsigned int sum_nr_running; struct rq *rq; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int min_cpu_type = INT_MAX; #endif @@ -9005,7 +9005,7 @@ find_most_lagged_child(struct lb_env *env) { lagged_sum += lagged; num_busy++; sum_nr_running += rq->cfs.h_nr_running; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (rq->cpu_type < min_cpu_type) min_cpu_type = rq->cpu_type; #endif @@ -9017,22 +9017,22 @@ find_most_lagged_child(struct lb_env *env) { continue; } // TODO: include cpu_type < dst_rq->cpu_type => ignore sum_nr_running condition -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (min_cpu_type >= env->dst_rq->cpu_type && sum_nr_running < child->nr_cpus) continue; -#else /* !CONFIG_GVFS_AMP */ +#else /* !CONFIG_GVTS_AMP */ if (sum_nr_running < child->nr_cpus) continue; -#endif /* !CONFIG_GVFS_AMP */ +#endif /* !CONFIG_GVTS_AMP */ -#if CONFIG_GVFS_TOLERANCE_PERCENT > 0 +#if CONFIG_GVTS_TOLERANCE_PERCENT > 0 /* Note that 1) tolerance = 0 for idle destination cpus * 2) tolerance is time (ns) * 3) lagged = remaining time (ns) to reach target * eff_weight * cpu_util - * but, cpu util remains only CONFIG_GVFS_LAGGED_WEIGHT_ADDED_BITS. + * but, cpu util remains only CONFIG_GVTS_LAGGED_WEIGHT_ADDED_BITS. */ - if (lagged_sum < env->tolerance * (1 << (SCHED_LOAD_SHIFT + CONFIG_GVFS_LAGGED_WEIGHT_ADDED_BITS))) + if (lagged_sum < env->tolerance * (1 << (SCHED_LOAD_SHIFT + CONFIG_GVTS_LAGGED_WEIGHT_ADDED_BITS))) continue; #endif @@ -9065,8 +9065,8 @@ static struct rq *find_most_lagged_rq(struct lb_env *env, if (env->idle == CPU_NOT_IDLE) { this_lagged = rq_lagged(env->dst_rq, env->target); -#if CONFIG_GVFS_TOLERANCE_PERCENT > 0 - this_lagged += env->tolerance * (1 << (SCHED_LOAD_SHIFT + CONFIG_GVFS_LAGGED_WEIGHT_ADDED_BITS)); +#if CONFIG_GVTS_TOLERANCE_PERCENT > 0 + this_lagged += env->tolerance * (1 << (SCHED_LOAD_SHIFT + CONFIG_GVTS_LAGGED_WEIGHT_ADDED_BITS)); #endif } else { /* for idle or newly idle cases, rq_lagged() = 0. @@ -9076,22 +9076,22 @@ static struct rq *find_most_lagged_rq(struct lb_env *env, max_lagged = this_lagged; for_each_cpu_and(cpu, sd_vruntime_span(sdv), env->cpus) { - gvfs_stat_inc(env->sd, lagged_count[env->idle]); + gvts_stat_inc(env->sd, lagged_count[env->idle]); rq = cpu_rq(cpu); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (rq->cpu_type >= env->dst_rq->cpu_type && rq->nr_running < 2) { - gvfs_stat_inc(env->sd, lagged_little_tasks[env->idle]); + gvts_stat_inc(env->sd, lagged_little_tasks[env->idle]); continue; } -#else /* !CONFIG_GVFS_AMP */ +#else /* !CONFIG_GVTS_AMP */ if (rq->nr_running < 2) { - gvfs_stat_inc(env->sd, lagged_little_tasks[env->idle]); + gvts_stat_inc(env->sd, lagged_little_tasks[env->idle]); continue; } -#endif /* !CONFIG_GVFS_AMP */ +#endif /* !CONFIG_GVTS_AMP */ if (rq->cfs.h_nr_running == 0) { - gvfs_stat_inc(env->sd, lagged_no_cfs_tasks[env->idle]); + gvts_stat_inc(env->sd, lagged_no_cfs_tasks[env->idle]); continue; } @@ -9099,16 +9099,16 @@ static struct rq *find_most_lagged_rq(struct lb_env *env, if (lagged <= this_lagged) { /* for stats... this is not necessary. * Note that the initial value of max_lagged = this_lagged */ - gvfs_stat_inc(env->sd, lagged_pass_soon[env->idle]); + gvts_stat_inc(env->sd, lagged_pass_soon[env->idle]); continue; } if (lagged <= max_lagged) { - gvfs_stat_inc(env->sd, lagged_not_min[env->idle]); + gvts_stat_inc(env->sd, lagged_not_min[env->idle]); continue; } - gvfs_stat_inc(env->sd, lagged_found[env->idle]); + gvts_stat_inc(env->sd, lagged_found[env->idle]); lagged_rq = rq; max_lagged = lagged; } @@ -9177,7 +9177,7 @@ int can_migrate_lagged_task(struct task_struct *p, struct lb_env *env) return 1; } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP static inline s64 __migration_benefit(s64 src_lagged, s64 dst_lagged, s64 prev_max, u64 target, int src_type, int dst_type, struct sched_entity *se) @@ -9258,7 +9258,7 @@ static struct task_struct *detach_one_lagged_task(struct lb_env *env) && __migration_benefit(src_lagged, dst_lagged, prev_max, target, src_type, dst_type, &p->se) > 0) { detach_task(p, env); - gvfs_stat_inc(env->sd, atb_pushed_under); + gvts_stat_inc(env->sd, atb_pushed_under); return p; } @@ -9275,10 +9275,10 @@ static struct task_struct *detach_one_lagged_task(struct lb_env *env) continue; detach_task(p, env); - gvfs_stat_inc(env->sd, atb_pushed); + gvts_stat_inc(env->sd, atb_pushed); return p; } - gvfs_stat_inc(env->sd, atb_failed); + gvts_stat_inc(env->sd, atb_failed); return NULL; } @@ -9296,7 +9296,7 @@ static int detach_lagged_tasks(struct lb_env *env) lockdep_assert_held(&env->src_rq->lock); - gvfs_stat_inc(env->sd, detach_count[env->idle]); + gvts_stat_inc(env->sd, detach_count[env->idle]); target = env->target + env->interval; if (env->idle == CPU_NOT_IDLE) { @@ -9304,7 +9304,7 @@ static int detach_lagged_tasks(struct lb_env *env) src_lagged = rq_lagged(env->src_rq, target); /* source is faster than destination */ if (unlikely(dst_lagged >= src_lagged)) { - gvfs_stat_inc(env->sd, detach_neg_diff[env->idle]); + gvts_stat_inc(env->sd, detach_neg_diff[env->idle]); return 0; } max_lagged = dst_lagged; @@ -9325,11 +9325,11 @@ static int detach_lagged_tasks(struct lb_env *env) /* TODO: we do not consider ASYM_PACKING */ if (env->src_rq->nr_running <= 1 -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP && !env->slower_src #endif ) { - gvfs_stat_inc(env->sd, detach_loop_stop[env->idle]); + gvts_stat_inc(env->sd, detach_loop_stop[env->idle]); break; } @@ -9337,33 +9337,33 @@ static int detach_lagged_tasks(struct lb_env *env) env->loop++; if (env->loop > env->loop_max) { - gvfs_stat_inc(env->sd, detach_loop_stop[env->idle]); + gvts_stat_inc(env->sd, detach_loop_stop[env->idle]); break; } if (env->loop > env->loop_break) { env->loop_break += sched_nr_migrate_break; env->flags |= LBF_NEED_BREAK; - gvfs_stat_inc(env->sd, detach_loop_stop[env->idle]); + gvts_stat_inc(env->sd, detach_loop_stop[env->idle]); break; } consider_a_task: - gvfs_stat_inc(env->sd, detach_task_count[env->idle]); + gvts_stat_inc(env->sd, detach_task_count[env->idle]); if (!can_migrate_lagged_task(p, env)) { - gvfs_stat_inc(env->sd, detach_task_cannot[env->idle]); + gvts_stat_inc(env->sd, detach_task_cannot[env->idle]); goto next; } benefit = __migration_benefit(src_lagged, dst_lagged, max_lagged, target, src_type, dst_type, &p->se); if (benefit <= 0) { - gvfs_stat_inc(env->sd, detach_task_not_lag[env->idle]); + gvts_stat_inc(env->sd, detach_task_not_lag[env->idle]); goto next; } - gvfs_stat_inc(env->sd, detach_task_detach[env->idle]); + gvts_stat_inc(env->sd, detach_task_detach[env->idle]); detach_task(p, env); list_add(&p->se.group_node, &env->tasks); @@ -9386,7 +9386,7 @@ static int detach_lagged_tasks(struct lb_env *env) /* this may not be the optimal solution */ if (dst_lagged >= src_lagged) { /* even if CPU_IDLE case, we got a task */ - gvfs_stat_inc(env->sd, detach_complete[env->idle]); + gvts_stat_inc(env->sd, detach_complete[env->idle]); break; } @@ -9395,11 +9395,11 @@ static int detach_lagged_tasks(struct lb_env *env) list_move_tail(&p->se.group_node, tasks); } - gvfs_stat_add(env->sd, tb_gained[env->idle], detached); + gvts_stat_add(env->sd, tb_gained[env->idle], detached); return detached; } -#else /* !CONFIG_GVFS_AMP - SMP version */ +#else /* !CONFIG_GVTS_AMP - SMP version */ static struct task_struct *detach_one_lagged_task(struct lb_env *env) { struct task_struct *p, *n, *min_p = NULL; @@ -9414,7 +9414,7 @@ static struct task_struct *detach_one_lagged_task(struct lb_env *env) if (!vruntime_passed(p->se.vruntime, env->target)) { detach_task(p, env); - gvfs_stat_inc(env->sd, atb_pushed_under); + gvts_stat_inc(env->sd, atb_pushed_under); return p; } else if (min_p == NULL || vruntime_passed(min_vruntime, p->se.vruntime)) { min_p = p; @@ -9428,9 +9428,9 @@ static struct task_struct *detach_one_lagged_task(struct lb_env *env) if (min_p) { detach_task(min_p, env); - gvfs_stat_inc(env->sd, atb_pushed); + gvts_stat_inc(env->sd, atb_pushed); } else { - gvfs_stat_inc(env->sd, atb_failed); + gvts_stat_inc(env->sd, atb_failed); } return min_p; } @@ -9448,7 +9448,7 @@ static int detach_lagged_tasks(struct lb_env *env) lockdep_assert_held(&env->src_rq->lock); - gvfs_stat_inc(env->sd, detach_count[env->idle]); + gvts_stat_inc(env->sd, detach_count[env->idle]); if (env->idle == CPU_NOT_IDLE) { target = env->target + env->interval; @@ -9463,7 +9463,7 @@ static int detach_lagged_tasks(struct lb_env *env) /* source is faster than destination */ if (unlikely(lagged_diff <= 0)) { - gvfs_stat_inc(env->sd, detach_neg_diff[env->idle]); + gvts_stat_inc(env->sd, detach_neg_diff[env->idle]); return 0; } @@ -9479,7 +9479,7 @@ static int detach_lagged_tasks(struct lb_env *env) /* TODO: we do not consider ASYM_PACKING */ if (env->src_rq->nr_running <= 1) { - gvfs_stat_inc(env->sd, detach_loop_stop[env->idle]); + gvts_stat_inc(env->sd, detach_loop_stop[env->idle]); break; } @@ -9487,21 +9487,21 @@ static int detach_lagged_tasks(struct lb_env *env) env->loop++; if (env->loop > env->loop_max) { - gvfs_stat_inc(env->sd, detach_loop_stop[env->idle]); + gvts_stat_inc(env->sd, detach_loop_stop[env->idle]); break; } if (env->loop > env->loop_break) { env->loop_break += sched_nr_migrate_break; env->flags |= LBF_NEED_BREAK; - gvfs_stat_inc(env->sd, detach_loop_stop[env->idle]); + gvts_stat_inc(env->sd, detach_loop_stop[env->idle]); break; } - gvfs_stat_inc(env->sd, detach_task_count[env->idle]); + gvts_stat_inc(env->sd, detach_task_count[env->idle]); if (!can_migrate_lagged_task(p, env)) { - gvfs_stat_inc(env->sd, detach_task_cannot[env->idle]); + gvts_stat_inc(env->sd, detach_task_cannot[env->idle]); goto next; } @@ -9512,7 +9512,7 @@ static int detach_lagged_tasks(struct lb_env *env) not_lagged_p = p; not_lagged = lagged; } - gvfs_stat_inc(env->sd, detach_task_not_lag[env->idle]); + gvts_stat_inc(env->sd, detach_task_not_lag[env->idle]); goto next; } @@ -9521,11 +9521,11 @@ static int detach_lagged_tasks(struct lb_env *env) over_p = p; over_lagged = lagged; } - gvfs_stat_inc(env->sd, detach_task_too_lag[env->idle]); + gvts_stat_inc(env->sd, detach_task_too_lag[env->idle]); goto next; } - gvfs_stat_inc(env->sd, detach_task_detach[env->idle]); + gvts_stat_inc(env->sd, detach_task_detach[env->idle]); detach_task(p, env); list_add(&p->se.group_node, &env->tasks); @@ -9545,7 +9545,7 @@ static int detach_lagged_tasks(struct lb_env *env) #endif if (lagged_diff <= 0) { - gvfs_stat_inc(env->sd, detach_complete[env->idle]); + gvts_stat_inc(env->sd, detach_complete[env->idle]); break; } @@ -9558,7 +9558,7 @@ static int detach_lagged_tasks(struct lb_env *env) detach_task(over_p, env); list_add(&over_p->se.group_node, &env->tasks); detached++; - gvfs_stat_inc(env->sd, detach_task_too_detach[env->idle]); + gvts_stat_inc(env->sd, detach_task_too_detach[env->idle]); } /* for idle cases, @@ -9568,20 +9568,20 @@ static int detach_lagged_tasks(struct lb_env *env) detach_task(not_lagged_p, env); list_add(¬_lagged_p->se.group_node, &env->tasks); detached++; - gvfs_stat_inc(env->sd, detach_task_not_detach[env->idle]); + gvts_stat_inc(env->sd, detach_task_not_detach[env->idle]); } - gvfs_stat_add(env->sd, tb_gained[env->idle], detached); + gvts_stat_add(env->sd, tb_gained[env->idle], detached); return detached; } -#endif /* !CONFIG_GVFS_AMP - SMP version */ +#endif /* !CONFIG_GVTS_AMP - SMP version */ static int find_dst_cpu(int __sd_level, int src_cpu) { struct sched_domain *sd; int sd_level = -(__sd_level + 1); int cpu, dst_cpu = -1, dst_idle_cpu = -1; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int dst_idle_type = -1; #endif unsigned int min_exit_latency = UINT_MAX; @@ -9607,7 +9607,7 @@ static int find_dst_cpu(int __sd_level, int src_cpu) { struct rq *rq = cpu_rq(cpu); struct cpuidle_state *idle = idle_get_state(rq); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (rq->cpu_type < dst_idle_type) /* when dst_idle_cpu < 0, dst_idle_type < 0. * Also, rq->cpu_type >= 0 always. */ @@ -9615,7 +9615,7 @@ static int find_dst_cpu(int __sd_level, int src_cpu) { #endif if (dst_idle_cpu < 0 -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP || rq->cpu_type > dst_idle_type #endif ) { /* first */ @@ -9623,7 +9623,7 @@ static int find_dst_cpu(int __sd_level, int src_cpu) { min_exit_latency = idle->exit_latency; latest_idle_timestamp = rq->idle_stamp; dst_idle_cpu = cpu; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP dst_idle_type = rq->cpu_type; #endif } else if (idle && idle->exit_latency < min_exit_latency) { @@ -9717,11 +9717,11 @@ static int active_target_vruntime_balance_cpu_stop(void *data) .idle = CPU_IDLE, }; - gvfs_stat_inc(sd, atb_count); + gvts_stat_inc(sd, atb_count); p = detach_one_lagged_task(&env); if (p) - gvfs_stat_inc(sd, tb_gained[CPU_IDLE]); + gvts_stat_inc(sd, tb_gained[CPU_IDLE]); } rcu_read_unlock(); @@ -9766,7 +9766,7 @@ static int __target_vruntime_balance(int this_cpu, struct rq *this_rq, .tolerance = idle == CPU_NOT_IDLE ? tolerance : 0, }; -#if CONFIG_GVFS_TOLERANCE_PERCENT > 0 +#if CONFIG_GVTS_TOLERANCE_PERCENT > 0 /* if idle != CPU_NOT_IDLE, ignore the tolerance */ /* if (idle == CPU_NOT_IDLE) env.target -= tolerance; */ @@ -9786,31 +9786,31 @@ static int __target_vruntime_balance(int this_cpu, struct rq *this_rq, cpumask_andnot(cpus, cpus, sd_vruntime_span(sd_child->vruntime)); } - gvfs_stat_inc(sd, tb_count[idle]); + gvts_stat_inc(sd, tb_count[idle]); redo: child = find_most_lagged_child(&env); if (child == NULL) { - gvfs_stat_inc(sd, tb_nolaggedgroup[idle]); + gvts_stat_inc(sd, tb_nolaggedgroup[idle]); goto out_balanced; } lagged_rq = find_most_lagged_rq(&env, child); if (lagged_rq == NULL) { - gvfs_stat_inc(sd, tb_nolaggedcpu[idle]); + gvts_stat_inc(sd, tb_nolaggedcpu[idle]); goto out_balanced; } env.src_cpu = lagged_rq->cpu; env.src_rq = lagged_rq; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP env.slower_src = env.src_rq->cpu_type < env.dst_rq->cpu_type; #endif pulled_tasks = 0; if (env.src_rq->nr_running > 1 -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP || env.slower_src #endif ) { @@ -9846,7 +9846,7 @@ static int __target_vruntime_balance(int this_cpu, struct rq *this_rq, } else if (pulled_tasks == 0) { /* !(env.flags & LBF_ALL_PINNED) && pulled_tasks == 0 => the only task possible to pull is the running task */ - gvfs_stat_inc(sd, tb_all_pinned_but_running[idle]); + gvts_stat_inc(sd, tb_all_pinned_but_running[idle]); need_active_balance = 1; } @@ -9884,12 +9884,12 @@ static int __target_vruntime_balance(int this_cpu, struct rq *this_rq, goto unlock_one_pinned; } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (migration_benefit(env.src_rq->curr, &env) < 0) { env.flags |= LBF_ALL_PINNED; goto unlock_one_pinned; }; -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ } /* @@ -9956,10 +9956,10 @@ static int _target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle_ interval = sd_vruntime->interval; tolerance = sd_vruntime->tolerance; - gvfs_stat_inc(sd, tvb_count[idle_init]); + gvts_stat_inc(sd, tvb_count[idle_init]); if (vruntime_passed(target, min_vruntime) && idle == CPU_NOT_IDLE) { - gvfs_stat_inc(sd, tvb_not_reach[idle_init]); + gvts_stat_inc(sd, tvb_not_reach[idle_init]); /* we do not reach the target yet. See you later. */ break; } @@ -9968,19 +9968,19 @@ static int _target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle_ * if this is a newly idle case and there is any pulled tasks, * do not pull tasks further and just check the higher level domains. */ if (pulled_tasks == 0 || idle_init != CPU_NEWLY_IDLE) { -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS int temp; temp = __target_vruntime_balance(this_cpu, this_rq, sd, idle, target, interval, tolerance); - gvfs_stat_inc(sd, tvb_pull_count[idle_init]); + gvts_stat_inc(sd, tvb_pull_count[idle_init]); if (temp > 0) - gvfs_stat_add(sd, tvb_pull_gained[idle_init], temp); + gvts_stat_add(sd, tvb_pull_gained[idle_init], temp); else - gvfs_stat_inc(sd, tvb_pull_no_gain[idle_init]); -#else /* !CONFIG_GVFS_STATS */ + gvts_stat_inc(sd, tvb_pull_no_gain[idle_init]); +#else /* !CONFIG_GVTS_STATS */ pulled_tasks += __target_vruntime_balance(this_cpu, this_rq, sd, idle, target, interval, tolerance); -#endif /* !CONFIG_GVFS_STATS */ +#endif /* !CONFIG_GVTS_STATS */ } if (pulled_tasks > 0) { @@ -9992,7 +9992,7 @@ static int _target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle_ /* cpu is idle, but nothing to pull... */ /* even if min_vruntime > target, do not update the target. The actual value of min_vruntime = 0. */ - gvfs_stat_inc(sd, tvb_idle_continue[idle_init]); + gvts_stat_inc(sd, tvb_idle_continue[idle_init]); continue; } @@ -10003,14 +10003,14 @@ static int _target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle_ update_min_target(sd_vruntime, target, 0); if (pulled_tasks > 0) { - gvfs_stat_inc(sd, tvb_stay[idle_init]); + gvts_stat_inc(sd, tvb_stay[idle_init]); break; /* stay in this round */ } else { /* pulled_tasks == 0 */ /* We are the lagged or idle cpu, but no jobs to pull. * Thus, we do not need to update the target of domain at this level. * Let's go to the higher level. */ - gvfs_stat_inc(sd, tvb_not_update[idle_init]); + gvts_stat_inc(sd, tvb_not_update[idle_init]); continue; } } @@ -10027,9 +10027,9 @@ static int _target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle_ target = atomic64_cmpxchg(&sd_vruntime->target, old_target, target); if (target == old_target) { atomic_set(&sd_vruntime->updated_by, this_cpu); - gvfs_stat_inc(sd, tvb_update_target[idle_init]); + gvts_stat_inc(sd, tvb_update_target[idle_init]); } else { - gvfs_stat_inc(sd, target_update_racing); + gvts_stat_inc(sd, target_update_racing); if (my_target >= target) goto again; } @@ -10046,7 +10046,7 @@ static int _target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle_ return pulled_tasks; } -/* for GVFS_STATS, source activated balancing and detecting infeasible weight tasks */ +/* for GVTS_STATS, source activated balancing and detecting infeasible weight tasks */ static u64 check_target_diff(struct rq *rq, struct sched_domain **large_diff_sd) { int cpu = cpu_of(rq); struct sched_domain *sd; @@ -10065,19 +10065,19 @@ static u64 check_target_diff(struct rq *rq, struct sched_domain **large_diff_sd) if (target > my_target) { diff = (target - my_target) / interval; - if (diff > CONFIG_GVFS_TARGET_DIFF_THRESHOLD) { + if (diff > CONFIG_GVTS_TARGET_DIFF_THRESHOLD) { *large_diff_sd = sd; if (diff > max_diff) max_diff = diff; } } -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS else diff = 0; if (diff >= NUM_MAX_TARGET_DIFF) diff = NUM_MAX_TARGET_DIFF - 1; - gvfs_stat_inc(sd, target_diff[diff]); -#endif /* CONFIG_GVFS_STATS */ + gvts_stat_inc(sd, target_diff[diff]); +#endif /* CONFIG_GVTS_STATS */ } rcu_read_unlock(); @@ -10090,13 +10090,13 @@ void check_infeasible_weight(struct rq *this_rq, u64 max_diff, struct sched_doma int cpu; int infeasible_weight; - if (max_diff < CONFIG_GVFS_TARGET_DIFF_INFEASIBLE_WEIGHT) { /* target diff < GVFS_TARGET_DIFF_INFEASIBLE_WEIGHT */ + if (max_diff < CONFIG_GVTS_TARGET_DIFF_INFEASIBLE_WEIGHT) { /* target diff < GVTS_TARGET_DIFF_INFEASIBLE_WEIGHT */ if (this_rq->infeasible_weight) this_rq->infeasible_weight = 0; return; } -#if CONFIG_GVFS_TARGET_DIFF_INFEASIBLE_WEIGHT < CONFIG_GVFS_TARGET_DIFF_THRESHOLD +#if CONFIG_GVTS_TARGET_DIFF_INFEASIBLE_WEIGHT < CONFIG_GVTS_TARGET_DIFF_THRESHOLD if (unlikely(!sd)) return; #endif @@ -10160,13 +10160,13 @@ static int target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle) What is the proper value for initial target_vruntime? */ return 0; - gvfs_stat_inc(this_rq, tvb_count[idle]); + gvts_stat_inc(this_rq, tvb_count[idle]); max_diff = check_target_diff(this_rq, &large_diff_sd); check_infeasible_weight(this_rq, max_diff, large_diff_sd); if (large_diff_sd && this_rq->nr_running > 1) { - gvfs_stat_inc(this_rq, satb_cond); + gvts_stat_inc(this_rq, satb_cond); /* if cpu_stopper->thread->on_cpu == 1, * cpu_stopper may be the previous task, * then kernel/sched/core.c:try_to_wake_up()=>smp_cond_acquire(!p->on_cpu) causes infinite loop. */ @@ -10174,7 +10174,7 @@ static int target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle) this_rq->active_balance = 2; this_rq->push_cpu = -large_diff_sd->level - 1; do_active_balance = 1; - gvfs_stat_inc(this_rq, satb_try); + gvts_stat_inc(this_rq, satb_try); goto skip_fast_check; } } @@ -10189,7 +10189,7 @@ static int target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle) /* we are not the fastest one. Just update the target. */ update_target_vruntime_cache(cfs_rq, target, 1); - gvfs_stat_inc(this_rq, tvb_fast_path[idle]); + gvts_stat_inc(this_rq, tvb_fast_path[idle]); return 0; } } @@ -10213,7 +10213,7 @@ static int target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle) raw_spin_lock(&this_rq->lock); - /* for load aware GVFS, target vruntime cache is tightly related to cfs_rq->lagged. + /* for load aware GVTS, target vruntime cache is tightly related to cfs_rq->lagged. * Thus, we need to synchronize the values, * and we update it under rq->lock. */ @@ -10241,9 +10241,9 @@ static int target_vruntime_balance(struct rq *this_rq, enum cpu_idle_type idle) return pulled_tasks; } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS /* * active_load_balance_cpu_stop is run by cpu stopper. It pushes * running tasks off the busiest CPU onto idle CPUs. It requires at @@ -10315,7 +10315,7 @@ static int active_load_balance_cpu_stop(void *data) return 0; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ static inline int on_null_domain(struct rq *rq) { @@ -10331,7 +10331,7 @@ static inline int on_null_domain(struct rq *rq) */ static struct { cpumask_var_t idle_cpus_mask; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP cpumask_var_t idle_cpus_mask_type[NUM_CPU_TYPES]; atomic_t nr_cpus_acc[NUM_CPU_TYPES]; #endif @@ -10379,7 +10379,7 @@ static void nohz_balancer_kick(void) static inline void nohz_balance_exit_idle(int cpu) { -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int type; #endif if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { @@ -10388,12 +10388,12 @@ static inline void nohz_balance_exit_idle(int cpu) */ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP type = cpu_rq(cpu)->cpu_type; cpumask_clear_cpu(cpu, nohz.idle_cpus_mask_type[type]); for (; type >= 0; type--) atomic_dec(&nohz.nr_cpus_acc[type]); -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ atomic_dec(&nohz.nr_cpus); } clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); @@ -10440,7 +10440,7 @@ void set_cpu_sd_state_idle(void) */ void nohz_balance_enter_idle(int cpu) { -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int type; #endif /* @@ -10459,12 +10459,12 @@ void nohz_balance_enter_idle(int cpu) return; cpumask_set_cpu(cpu, nohz.idle_cpus_mask); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP type = cpu_rq(cpu)->cpu_type; cpumask_set_cpu(cpu, nohz.idle_cpus_mask_type[type]); for (; type >= 0; type--) atomic_inc(&nohz.nr_cpus_acc[type]); -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ atomic_inc(&nohz.nr_cpus); set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } @@ -10482,9 +10482,9 @@ static int sched_ilb_notifier(struct notifier_block *nfb, } #endif -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS static DEFINE_SPINLOCK(balancing); -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* * Scale the max load_balance interval with the number of CPUs in the system. @@ -10495,7 +10495,7 @@ void update_max_interval(void) max_load_balance_interval = HZ*num_online_cpus()/10; } -#ifndef CONFIG_GVFS +#ifndef CONFIG_GVTS /* * It checks each scheduling domain to see if it is due to be balanced, * and initiates a balancing operation if so. @@ -10604,7 +10604,7 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle) #endif } } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ #ifdef CONFIG_NO_HZ_COMMON /* @@ -10619,7 +10619,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) /* Earliest time when we have to do rebalance again */ unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int type; #endif @@ -10627,7 +10627,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) goto end; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP type = NUM_CPU_TYPES - 1; balance_cpu = -1; while (type >= 0) { @@ -10661,18 +10661,18 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) update_rq_clock(rq); update_cpu_load_idle(rq); raw_spin_unlock_irq(&rq->lock); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS _target_vruntime_balance(rq, CPU_IDLE); -#else /* !CONFIG_GVFS */ +#else /* !CONFIG_GVTS */ rebalance_domains(rq, CPU_IDLE); -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ } if (time_after(next_balance, rq->next_balance)) { next_balance = rq->next_balance; update_next_balance = 1; } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP } /* for vim...*/ #else } @@ -10731,7 +10731,7 @@ static inline bool nohz_kick_needed(struct rq *rq) if (rq->nr_running >= 2) return true; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP if (rq->cpu_type + 1 < NUM_CPU_TYPES && rq->cfs.h_nr_running >= 1 && atomic_read(&nohz.nr_cpus_acc[rq->cpu_type + 1]) > 0) @@ -10794,11 +10794,11 @@ static void run_rebalance_domains(struct softirq_action *h) * and abort nohz_idle_balance altogether if we pull some load. */ nohz_idle_balance(this_rq, idle); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS _target_vruntime_balance(this_rq, idle); -#else /* !CONFIG_GVFS */ +#else /* !CONFIG_GVTS */ rebalance_domains(this_rq, idle); -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ } /* @@ -10810,7 +10810,7 @@ void trigger_load_balance(struct rq *rq) if (unlikely(on_null_domain(rq))) return; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* call the target_vruntime_balance() for each tick. */ #ifdef CONFIG_NO_HZ_COMMON if (rq->idle_balance && time_after_eq(jiffies, rq->next_balance)) @@ -10819,10 +10819,10 @@ void trigger_load_balance(struct rq *rq) if (rq->idle_balance) raise_softirq(SCHED_SOFTIRQ); #endif -#else /* !CONFIG_GVFS */ +#else /* !CONFIG_GVTS */ if (time_after_eq(jiffies, rq->next_balance)) raise_softirq(SCHED_SOFTIRQ); -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ #ifdef CONFIG_NO_HZ_COMMON if (nohz_kick_needed(rq)) nohz_balancer_kick(); @@ -10859,7 +10859,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) entity_tick(cfs_rq, se, queued); } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS if (update_eff_load(&curr->se, se)) /* if eff_load is updated, update the lagged. */ update_lagged(&curr->se, &rq->cfs); @@ -10868,7 +10868,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) if (static_branch_unlikely(&sched_numa_balancing)) task_tick_numa(rq, curr); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* current task reach at the target. * when rq->cfs.nr_running > 1 => schedule one of other tasks that do not reach the target. * If all tasks reach the target, call target_vruntime_balance(). @@ -10878,7 +10878,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) clear_buddies(task_cfs_rq(curr), &curr->se); return; } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ } /* @@ -10916,7 +10916,7 @@ static void task_fork_fair(struct task_struct *p) if (curr) se->vruntime = curr->vruntime; place_entity(cfs_rq, se, 1); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS account_start_debit(cfs_rq, se); #endif @@ -10929,9 +10929,9 @@ static void task_fork_fair(struct task_struct *p) resched_curr(rq); } -#ifndef CONFIG_GVFS /* for GVFS, do not normalize vruntime based on min_vruntime */ +#ifndef CONFIG_GVTS /* for GVTS, do not normalize vruntime based on min_vruntime */ se->vruntime -= cfs_rq->min_vruntime; -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ raw_spin_unlock_irqrestore(&rq->lock, flags); } @@ -10990,7 +10990,7 @@ static void detach_task_cfs_rq(struct task_struct *p) struct sched_entity *se = &p->se; struct cfs_rq *cfs_rq = cfs_rq_of(se); -#ifndef CONFIG_GVFS /* for GVFS, do not normalize vruntime while detaching */ +#ifndef CONFIG_GVTS /* for GVTS, do not normalize vruntime while detaching */ if (!vruntime_normalized(p)) { /* * Fix up our vruntime so that the current sleep doesn't @@ -10999,11 +10999,11 @@ static void detach_task_cfs_rq(struct task_struct *p) place_entity(cfs_rq, se, 0); se->vruntime -= cfs_rq->min_vruntime; } -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ /* Catch up with the cfs_rq and remove our load when we leave */ detach_entity_load_avg(cfs_rq, se); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS update_tg_load_sum(se, cfs_rq->tg, se->load.weight, 0, TG_LOAD_SUM_DETACH); #endif } @@ -11023,14 +11023,14 @@ static void attach_task_cfs_rq(struct task_struct *p) /* Synchronize task with its cfs_rq */ attach_entity_load_avg(cfs_rq, se); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS update_tg_load_sum(se, cfs_rq->tg, 0, se->load.weight, TG_LOAD_SUM_ATTACH); #endif -#if !defined(CONFIG_GVFS) /* for GVFS, do not normalize vruntime base on min_vruntime */ +#if !defined(CONFIG_GVTS) /* for GVTS, do not normalize vruntime base on min_vruntime */ if (!vruntime_normalized(p)) se->vruntime += cfs_rq->min_vruntime; -#endif /* !CONFIG_GVFS */ +#endif /* !CONFIG_GVTS */ } static void switched_from_fair(struct rq *rq, struct task_struct *p) @@ -11071,7 +11071,7 @@ static void set_curr_task_fair(struct rq *rq) /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); } -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP set_curr_effi(rq->curr); #endif } @@ -11223,7 +11223,7 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, static DEFINE_MUTEX(shares_mutex); -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS static void __sched_group_set_shares(struct task_group *tg, unsigned long old, unsigned long new) { unsigned long ret; @@ -11264,7 +11264,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) if (tg->shares == shares) goto done; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* update load_sum of parent */ __sched_group_set_shares(tg, tg->shares, shares); #else @@ -11398,7 +11398,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) __init void init_sched_fair_class(void) { -#if defined(CONFIG_GVFS_AMP) && defined(CONFIG_NO_HZ_COMMON) +#if defined(CONFIG_GVTS_AMP) && defined(CONFIG_NO_HZ_COMMON) int type; #endif #ifdef CONFIG_SMP @@ -11407,7 +11407,7 @@ __init void init_sched_fair_class(void) #ifdef CONFIG_NO_HZ_COMMON nohz.next_balance = jiffies; zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP for_each_type(type) { zalloc_cpumask_var(&nohz.idle_cpus_mask_type[type], GFP_NOWAIT); } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index af9ec3b8489f..65eac3b209f0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -36,7 +36,7 @@ extern void update_cpu_load_active(struct rq *this_rq); static inline void update_cpu_load_active(struct rq *this_rq) { } #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS /* refer to max_vruntime() */ #define vruntime_passed(min_vruntime, target_vruntime) \ ((s64)((min_vruntime) - (target_vruntime)) >= 0) @@ -53,7 +53,7 @@ static inline void update_cpu_load_active(struct rq *this_rq) { } #define TG_LOAD_SUM_CHANGE 0x10 #endif -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP #define for_each_type(type) \ for (type = 0; type < NUM_CPU_TYPES; type++) @@ -64,7 +64,7 @@ static inline void update_cpu_load_active(struct rq *this_rq) { } #define EFFICIENCY_ESTIMATE 3 /* estimated */ extern __read_mostly unsigned long DEFAULT_EFFICIENCY[NUM_CPU_TYPES]; -#endif /* CONFIG_GVFS_AMP */ +#endif /* CONFIG_GVTS_AMP */ /* * Helpers for converting nanosecond timing to jiffy resolution @@ -285,9 +285,9 @@ struct task_group { * will also be accessed at each tick. */ atomic_long_t load_avg ____cacheline_aligned; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS atomic_long_t load_sum ____cacheline_aligned; /* this is also heavily contended at clock tick time */ -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ #endif /* CONFIG_SMP */ #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -399,7 +399,7 @@ struct cfs_rq { #ifndef CONFIG_64BIT u64 min_vruntime_copy; #endif -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS u64 real_min_vruntime; /* can go backward */ #ifndef CONFIG_64BIT u64 real_min_vruntime_copy; @@ -419,7 +419,7 @@ struct cfs_rq { */ s64 lagged; unsigned long lagged_weight; -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ struct rb_root tasks_timeline; struct rb_node *rb_leftmost; @@ -484,16 +484,16 @@ struct cfs_rq { u64 throttled_clock, throttled_clock_task; u64 throttled_clock_task_time; -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH u64 throttled_target; #endif int throttled, throttle_count; struct list_head throttled_list; -#ifdef CONFIG_GVFS_BANDWIDTH +#ifdef CONFIG_GVTS_BANDWIDTH struct list_head state_q[2]; struct list_head *active_q, *thrott_q; -#endif /* CONFIG_GVFS_BANDWIDTH */ +#endif /* CONFIG_GVTS_BANDWIDTH */ #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ }; @@ -636,7 +636,7 @@ struct rq { /* runqueue lock: */ raw_spinlock_t lock; -#ifdef CONFIG_GVFS_AMP +#ifdef CONFIG_GVTS_AMP int cpu_type; #endif /* @@ -693,7 +693,7 @@ struct rq { #ifdef CONFIG_SMP struct root_domain *rd; struct sched_domain *sd; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS struct sd_vruntime *sd_vruntime; int infeasible_weight; #endif @@ -764,7 +764,7 @@ struct rq { unsigned int ttwu_local; #endif -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS unsigned int largest_idle_min_vruntime_racing; /* target_vruntime_balance() stats */ unsigned int tvb_count[CPU_MAX_IDLE_TYPES]; @@ -782,9 +782,9 @@ struct rq { /* get_min_target() stats */ unsigned int get_traverse_rq_count; unsigned int get_traverse_child_count; - /* GVFS_BANDWIDTH */ + /* GVTS_BANDWIDTH */ unsigned int iterate_thrott_q; -#endif /* CONFIG_GVFS_STATS */ +#endif /* CONFIG_GVTS_STATS */ #ifdef CONFIG_SMP struct llist_head wake_list; @@ -977,7 +977,7 @@ struct sched_group { unsigned long cpumask[0]; }; -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS struct sd_vruntime { atomic_t updated_by; /* cpu id who updates the target */ atomic64_t target; diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 6f43a082a110..f55f61163c3e 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -25,7 +25,7 @@ static int show_schedstat(struct seq_file *seq, void *v) struct sched_domain *sd; int dcount = 0; #endif -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS enum cpu_idle_type itype; #endif @@ -40,7 +40,7 @@ static int show_schedstat(struct seq_file *seq, void *v) rq->ttwu_count, rq->ttwu_local, rq->rq_cpu_time, rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS seq_printf(seq, " %u %u", rq->nr_running, rq->cfs.h_nr_running @@ -73,7 +73,7 @@ static int show_schedstat(struct seq_file *seq, void *v) rq->get_traverse_child_count, rq->iterate_thrott_q ); -#endif /* CONFIG_GVFS_STATS */ +#endif /* CONFIG_GVTS_STATS */ #ifdef CONFIG_SMP /* domain-specific stats */ @@ -102,7 +102,7 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance); -#ifdef CONFIG_GVFS_STATS +#ifdef CONFIG_GVTS_STATS seq_printf(seq, " %u %u %u %u", sd->atb_count, sd->atb_failed, sd->atb_pushed, sd->atb_pushed_under); @@ -168,7 +168,7 @@ static int show_schedstat(struct seq_file *seq, void *v) for (itype = 0; itype < NUM_MAX_TARGET_DIFF; itype++) { seq_printf(seq, " %u", sd->target_diff[itype]); } -#endif /* CONFIG_GVFS_STATS */ +#endif /* CONFIG_GVTS_STATS */ seq_printf(seq, "\n"); } rcu_read_unlock(); diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 04810418f233..5066a4243fc1 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -50,13 +50,13 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) #endif #ifdef CONFIG_GVFS_STATS /* depends on CONFIG_SCHEDSTATS */ -# define gvfs_stat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) -# define gvfs_stat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) -# define gvfs_stat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) +# define gvts_stat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) +# define gvts_stat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) +# define gvts_stat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) #else -# define gvfs_stat_inc(rq, field) do { } while (0) -# define gvfs_stat_add(rq, field, amt) do { } while (0) -# define gvfs_stat_set(var, val) do { } while (0) +# define gvts_stat_inc(rq, field) do { } while (0) +# define gvts_stat_add(rq, field, amt) do { } while (0) +# define gvts_stat_set(var, val) do { } while (0) #endif #ifdef CONFIG_SCHED_INFO diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 993eafbf621c..2f9d71f4da73 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -75,12 +75,12 @@ static void __cpu_stop_queue_work(struct cpu_stopper *stopper, wake_up_process(stopper->thread); } -#ifdef CONFIG_GVFS +#ifdef CONFIG_GVTS int cpu_stop_thread_on_cpu(unsigned int cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); return stopper->thread->on_cpu; } -#endif /* CONFIG_GVFS */ +#endif /* CONFIG_GVTS */ /* queue @work to @stopper. if offline, @work is completed immediately */ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index c46254055895..14f62cc467b1 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -175,7 +175,7 @@ cond_syscall(sys_setfsgid); cond_syscall(sys_capget); cond_syscall(sys_capset); cond_syscall(sys_copy_file_range); -cond_syscall(sys_gvfs); +cond_syscall(sys_gvts); /* arch-specific weak syscall entries */ cond_syscall(sys_pciconfig_read);