Permalink
Browse files

incorrect unlikely() and likely() cleanups for 2.6.35.7

  • Loading branch information...
1 parent 647d82c commit bb09446d261f406ec913aa3f4c402d622998f2f5 @rallegade committed May 14, 2011
Showing with 8 additions and 8 deletions.
  1. +1 −1 fs/file_table.c
  2. +1 −1 include/linux/file.h
  3. +1 −1 include/linux/pagemap.h
  4. +2 −2 kernel/sched.c
  5. +2 −2 kernel/sched_rt.c
  6. +1 −1 mm/filemap.c
View
@@ -301,7 +301,7 @@ struct file *fget_light(unsigned int fd, int *fput_needed)
struct files_struct *files = current->files;
*fput_needed = 0;
- if (likely((atomic_read(&files->count) == 1))) {
+ if (atomic_read(&files->count) == 1) {
file = fcheck_files(files, fd);
} else {
rcu_read_lock();
@@ -23,7 +23,7 @@ extern struct file *alloc_file(struct path *, fmode_t mode,
static inline void fput_light(struct file *file, int fput_needed)
{
- if (unlikely(fput_needed))
+ if (fput_needed)
fput(file);
}
@@ -47,7 +47,7 @@ static inline void mapping_clear_unevictable(struct address_space *mapping)
static inline int mapping_unevictable(struct address_space *mapping)
{
- if (likely(mapping))
+ if (mapping)
return test_bit(AS_UNEVICTABLE, &mapping->flags);
return !!mapping;
}
View
@@ -125,7 +125,7 @@
static inline int rt_policy(int policy)
{
- if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
+ if (policy == SCHED_FIFO || policy == SCHED_RR)
return 1;
return 0;
}
@@ -2390,7 +2390,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
- if (unlikely(rq->idle_stamp)) {
+ if (rq->idle_stamp) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;
View
@@ -1061,7 +1061,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
rt_rq = &rq->rt;
- if (unlikely(!rt_rq->rt_nr_running))
+ if (likely(!rt_rq->rt_nr_running))
return NULL;
if (rt_rq_throttled(rt_rq))
@@ -1473,7 +1473,7 @@ static int pull_rt_task(struct rq *this_rq)
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
- if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
+ if (likely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
pull_rt_task(rq);
}
View
@@ -2203,7 +2203,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
gfp_notmask = __GFP_FS;
repeat:
page = find_lock_page(mapping, index);
- if (likely(page))
+ if (page)
return page;
page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);

0 comments on commit bb09446

Please sign in to comment.