Skip to content

Commit

Permalink
Merge branch 'akpm' (fixes from Andrew)
Browse files Browse the repository at this point in the history
Merge misc fixes from Andrew Morton:

 - A bunch of fixes

 - Finish off the idr API conversions before someone starts to use the
   old interfaces again.

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  idr: idr_alloc() shouldn't trigger lowmem warning when preloaded
  UAPI: fix endianness conditionals in M32R's asm/stat.h
  UAPI: fix endianness conditionals in linux/raid/md_p.h
  UAPI: fix endianness conditionals in linux/acct.h
  UAPI: fix endianness conditionals in linux/aio_abi.h
  decompressors: fix typo "POWERPC"
  mm/fremap.c: fix oops on error path
  idr: deprecate idr_pre_get() and idr_get_new[_above]()
  tidspbridge: convert to idr_alloc()
  zcache: convert to idr_alloc()
  mlx4: remove leftover idr_pre_get() call
  workqueue: convert to idr_alloc()
  nfsd: convert to idr_alloc()
  nfsd: remove unused get_new_stid()
  kernel/signal.c: use __ARCH_HAS_SA_RESTORER instead of SA_RESTORER
  signal: always clear sa_restorer on execve
  mm: remove_memory(): fix end_pfn setting
  include/linux/res_counter.h needs errno.h
  • Loading branch information
torvalds committed Mar 13, 2013
2 parents ad8395e + 59bfbcf commit 842d223
Show file tree
Hide file tree
Showing 16 changed files with 144 additions and 174 deletions.
4 changes: 2 additions & 2 deletions arch/m32r/include/uapi/asm/stat.h
Expand Up @@ -63,10 +63,10 @@ struct stat64 {
long long st_size; long long st_size;
unsigned long st_blksize; unsigned long st_blksize;


#if defined(__BIG_ENDIAN) #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
unsigned long __pad4; /* future possible st_blocks high bits */ unsigned long __pad4; /* future possible st_blocks high bits */
unsigned long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
#elif defined(__LITTLE_ENDIAN) #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
unsigned long st_blocks; /* Number 512-byte blocks allocated. */ unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __pad4; /* future possible st_blocks high bits */ unsigned long __pad4; /* future possible st_blocks high bits */
#else #else
Expand Down
1 change: 0 additions & 1 deletion drivers/infiniband/hw/mlx4/cm.c
Expand Up @@ -362,7 +362,6 @@ void mlx4_ib_cm_paravirt_init(struct mlx4_ib_dev *dev)
INIT_LIST_HEAD(&dev->sriov.cm_list); INIT_LIST_HEAD(&dev->sriov.cm_list);
dev->sriov.sl_id_map = RB_ROOT; dev->sriov.sl_id_map = RB_ROOT;
idr_init(&dev->sriov.pv_id_table); idr_init(&dev->sriov.pv_id_table);
idr_pre_get(&dev->sriov.pv_id_table, GFP_KERNEL);
} }


/* slave = -1 ==> all slaves */ /* slave = -1 ==> all slaves */
Expand Down
70 changes: 26 additions & 44 deletions drivers/staging/tidspbridge/rmgr/drv.c
Expand Up @@ -76,37 +76,28 @@ int drv_insert_node_res_element(void *hnode, void *node_resource,
struct node_res_object **node_res_obj = struct node_res_object **node_res_obj =
(struct node_res_object **)node_resource; (struct node_res_object **)node_resource;
struct process_context *ctxt = (struct process_context *)process_ctxt; struct process_context *ctxt = (struct process_context *)process_ctxt;
int status = 0;
int retval; int retval;


*node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL); *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
if (!*node_res_obj) { if (!*node_res_obj)
status = -ENOMEM; return -ENOMEM;
goto func_end;
}


(*node_res_obj)->node = hnode; (*node_res_obj)->node = hnode;
retval = idr_get_new(ctxt->node_id, *node_res_obj, retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL);
&(*node_res_obj)->id); if (retval >= 0) {
if (retval == -EAGAIN) { (*node_res_obj)->id = retval;
if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) { return 0;
pr_err("%s: OUT OF MEMORY\n", __func__);
status = -ENOMEM;
goto func_end;
}

retval = idr_get_new(ctxt->node_id, *node_res_obj,
&(*node_res_obj)->id);
} }
if (retval) {
kfree(*node_res_obj);

if (retval == -ENOSPC) {
pr_err("%s: FAILED, IDR is FULL\n", __func__); pr_err("%s: FAILED, IDR is FULL\n", __func__);
status = -EFAULT; return -EFAULT;
} else {
pr_err("%s: OUT OF MEMORY\n", __func__);
return -ENOMEM;
} }
func_end:
if (status)
kfree(*node_res_obj);

return status;
} }


/* Release all Node resources and its context /* Release all Node resources and its context
Expand Down Expand Up @@ -201,35 +192,26 @@ int drv_proc_insert_strm_res_element(void *stream_obj,
struct strm_res_object **pstrm_res = struct strm_res_object **pstrm_res =
(struct strm_res_object **)strm_res; (struct strm_res_object **)strm_res;
struct process_context *ctxt = (struct process_context *)process_ctxt; struct process_context *ctxt = (struct process_context *)process_ctxt;
int status = 0;
int retval; int retval;


*pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL); *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
if (*pstrm_res == NULL) { if (*pstrm_res == NULL)
status = -EFAULT; return -EFAULT;
goto func_end;
}


(*pstrm_res)->stream = stream_obj; (*pstrm_res)->stream = stream_obj;
retval = idr_get_new(ctxt->stream_id, *pstrm_res, retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL);
&(*pstrm_res)->id); if (retval >= 0) {
if (retval == -EAGAIN) { (*pstrm_res)->id = retval;
if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) { return 0;
pr_err("%s: OUT OF MEMORY\n", __func__);
status = -ENOMEM;
goto func_end;
}

retval = idr_get_new(ctxt->stream_id, *pstrm_res,
&(*pstrm_res)->id);
} }
if (retval) {
if (retval == -ENOSPC) {
pr_err("%s: FAILED, IDR is FULL\n", __func__); pr_err("%s: FAILED, IDR is FULL\n", __func__);
status = -EPERM; return -EPERM;
} else {
pr_err("%s: OUT OF MEMORY\n", __func__);
return -ENOMEM;
} }

func_end:
return status;
} }


static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt) static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
Expand Down
25 changes: 10 additions & 15 deletions drivers/staging/zcache/ramster/tcp.c
Expand Up @@ -300,27 +300,22 @@ static u8 r2net_num_from_nn(struct r2net_node *nn)


static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw) static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
{ {
int ret = 0; int ret;


do { spin_lock(&nn->nn_lock);
if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) { ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
ret = -EAGAIN; if (ret >= 0) {
break; nsw->ns_id = ret;
} list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
spin_lock(&nn->nn_lock); }
ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id); spin_unlock(&nn->nn_lock);
if (ret == 0)
list_add_tail(&nsw->ns_node_item,
&nn->nn_status_list);
spin_unlock(&nn->nn_lock);
} while (ret == -EAGAIN);


if (ret == 0) { if (ret >= 0) {
init_waitqueue_head(&nsw->ns_wq); init_waitqueue_head(&nsw->ns_wq);
nsw->ns_sys_status = R2NET_ERR_NONE; nsw->ns_sys_status = R2NET_ERR_NONE;
nsw->ns_status = 0; nsw->ns_status = 0;
return 0;
} }

return ret; return ret;
} }


Expand Down
36 changes: 2 additions & 34 deletions fs/nfsd/nfs4state.c
Expand Up @@ -230,37 +230,6 @@ static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
__nfs4_file_put_access(fp, oflag); __nfs4_file_put_access(fp, oflag);
} }


static inline int get_new_stid(struct nfs4_stid *stid)
{
static int min_stateid = 0;
struct idr *stateids = &stid->sc_client->cl_stateids;
int new_stid;
int error;

error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
/*
* Note: the necessary preallocation was done in
* nfs4_alloc_stateid(). The idr code caps the number of
* preallocations that can exist at a time, but the state lock
* prevents anyone from using ours before we get here:
*/
WARN_ON_ONCE(error);
/*
* It shouldn't be a problem to reuse an opaque stateid value.
* I don't think it is for 4.1. But with 4.0 I worry that, for
* example, a stray write retransmission could be accepted by
* the server when it should have been rejected. Therefore,
* adopt a trick from the sctp code to attempt to maximize the
* amount of time until an id is reused, by ensuring they always
* "increase" (mod INT_MAX):
*/

min_stateid = new_stid+1;
if (min_stateid == INT_MAX)
min_stateid = 0;
return new_stid;
}

static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
kmem_cache *slab) kmem_cache *slab)
{ {
Expand All @@ -273,9 +242,8 @@ kmem_cache *slab)
if (!stid) if (!stid)
return NULL; return NULL;


if (!idr_pre_get(stateids, GFP_KERNEL)) new_id = idr_alloc(stateids, stid, min_stateid, 0, GFP_KERNEL);
goto out_free; if (new_id < 0)
if (idr_get_new_above(stateids, stid, min_stateid, &new_id))
goto out_free; goto out_free;
stid->sc_client = cl; stid->sc_client = cl;
stid->sc_type = 0; stid->sc_type = 0;
Expand Down
66 changes: 50 additions & 16 deletions include/linux/idr.h
Expand Up @@ -73,8 +73,6 @@ struct idr {
*/ */


void *idr_find_slowpath(struct idr *idp, int id); void *idr_find_slowpath(struct idr *idp, int id);
int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void idr_preload(gfp_t gfp_mask); void idr_preload(gfp_t gfp_mask);
int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask); int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
int idr_for_each(struct idr *idp, int idr_for_each(struct idr *idp,
Expand Down Expand Up @@ -119,19 +117,6 @@ static inline void *idr_find(struct idr *idr, int id)
return idr_find_slowpath(idr, id); return idr_find_slowpath(idr, id);
} }


/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
*/
static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
{
return idr_get_new_above(idp, ptr, 0, id);
}

/** /**
* idr_for_each_entry - iterate over an idr's elements of a given type * idr_for_each_entry - iterate over an idr's elements of a given type
* @idp: idr handle * @idp: idr handle
Expand All @@ -143,7 +128,56 @@ static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
entry != NULL; \ entry != NULL; \
++id, entry = (typeof(entry))idr_get_next((idp), &(id))) ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))


void __idr_remove_all(struct idr *idp); /* don't use */ /*
* Don't use the following functions. These exist only to suppress
* deprecated warnings on EXPORT_SYMBOL()s.
*/
int __idr_pre_get(struct idr *idp, gfp_t gfp_mask);
int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
void __idr_remove_all(struct idr *idp);

/**
* idr_pre_get - reserve resources for idr allocation
* @idp: idr handle
* @gfp_mask: memory allocation flags
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_pre_get(struct idr *idp, gfp_t gfp_mask)
{
return __idr_pre_get(idp, gfp_mask);
}

/**
* idr_get_new_above - allocate new idr entry above or equal to a start id
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @starting_id: id to start search at
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new_above(struct idr *idp, void *ptr,
int starting_id, int *id)
{
return __idr_get_new_above(idp, ptr, starting_id, id);
}

/**
* idr_get_new - allocate new idr entry
* @idp: idr handle
* @ptr: pointer you want associated with the id
* @id: pointer to the allocated handle
*
* Part of old alloc interface. This is going away. Use
* idr_preload[_end]() and idr_alloc() instead.
*/
static inline int __deprecated idr_get_new(struct idr *idp, void *ptr, int *id)
{
return __idr_get_new_above(idp, ptr, 0, id);
}


/** /**
* idr_remove_all - remove all ids from the given idr tree * idr_remove_all - remove all ids from the given idr tree
Expand Down
1 change: 1 addition & 0 deletions include/linux/res_counter.h
Expand Up @@ -14,6 +14,7 @@
*/ */


#include <linux/cgroup.h> #include <linux/cgroup.h>
#include <linux/errno.h>


/* /*
* The core object. the cgroup that wishes to account for some * The core object. the cgroup that wishes to account for some
Expand Down
6 changes: 4 additions & 2 deletions include/uapi/linux/acct.h
Expand Up @@ -107,10 +107,12 @@ struct acct_v3
#define ACORE 0x08 /* ... dumped core */ #define ACORE 0x08 /* ... dumped core */
#define AXSIG 0x10 /* ... was killed by a signal */ #define AXSIG 0x10 /* ... was killed by a signal */


#ifdef __BIG_ENDIAN #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
#define ACCT_BYTEORDER 0x80 /* accounting file is big endian */ #define ACCT_BYTEORDER 0x80 /* accounting file is big endian */
#else #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
#define ACCT_BYTEORDER 0x00 /* accounting file is little endian */ #define ACCT_BYTEORDER 0x00 /* accounting file is little endian */
#else
#error unspecified endianness
#endif #endif


#ifndef __KERNEL__ #ifndef __KERNEL__
Expand Down
4 changes: 2 additions & 2 deletions include/uapi/linux/aio_abi.h
Expand Up @@ -62,9 +62,9 @@ struct io_event {
__s64 res2; /* secondary result */ __s64 res2; /* secondary result */
}; };


#if defined(__LITTLE_ENDIAN) #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
#define PADDED(x,y) x, y #define PADDED(x,y) x, y
#elif defined(__BIG_ENDIAN) #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
#define PADDED(x,y) y, x #define PADDED(x,y) y, x
#else #else
#error edit for your odd byteorder. #error edit for your odd byteorder.
Expand Down
6 changes: 4 additions & 2 deletions include/uapi/linux/raid/md_p.h
Expand Up @@ -145,16 +145,18 @@ typedef struct mdp_superblock_s {
__u32 failed_disks; /* 4 Number of failed disks */ __u32 failed_disks; /* 4 Number of failed disks */
__u32 spare_disks; /* 5 Number of spare disks */ __u32 spare_disks; /* 5 Number of spare disks */
__u32 sb_csum; /* 6 checksum of the whole superblock */ __u32 sb_csum; /* 6 checksum of the whole superblock */
#ifdef __BIG_ENDIAN #if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
__u32 events_hi; /* 7 high-order of superblock update count */ __u32 events_hi; /* 7 high-order of superblock update count */
__u32 events_lo; /* 8 low-order of superblock update count */ __u32 events_lo; /* 8 low-order of superblock update count */
__u32 cp_events_hi; /* 9 high-order of checkpoint update count */ __u32 cp_events_hi; /* 9 high-order of checkpoint update count */
__u32 cp_events_lo; /* 10 low-order of checkpoint update count */ __u32 cp_events_lo; /* 10 low-order of checkpoint update count */
#else #elif defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
__u32 events_lo; /* 7 low-order of superblock update count */ __u32 events_lo; /* 7 low-order of superblock update count */
__u32 events_hi; /* 8 high-order of superblock update count */ __u32 events_hi; /* 8 high-order of superblock update count */
__u32 cp_events_lo; /* 9 low-order of checkpoint update count */ __u32 cp_events_lo; /* 9 low-order of checkpoint update count */
__u32 cp_events_hi; /* 10 high-order of checkpoint update count */ __u32 cp_events_hi; /* 10 high-order of checkpoint update count */
#else
#error unspecified endianness
#endif #endif
__u32 recovery_cp; /* 11 recovery checkpoint sector count */ __u32 recovery_cp; /* 11 recovery checkpoint sector count */
/* There are only valid for minor_version > 90 */ /* There are only valid for minor_version > 90 */
Expand Down
3 changes: 3 additions & 0 deletions kernel/signal.c
Expand Up @@ -485,6 +485,9 @@ flush_signal_handlers(struct task_struct *t, int force_default)
if (force_default || ka->sa.sa_handler != SIG_IGN) if (force_default || ka->sa.sa_handler != SIG_IGN)
ka->sa.sa_handler = SIG_DFL; ka->sa.sa_handler = SIG_DFL;
ka->sa.sa_flags = 0; ka->sa.sa_flags = 0;
#ifdef __ARCH_HAS_SA_RESTORER
ka->sa.sa_restorer = NULL;
#endif
sigemptyset(&ka->sa.sa_mask); sigemptyset(&ka->sa.sa_mask);
ka++; ka++;
} }
Expand Down
7 changes: 4 additions & 3 deletions kernel/workqueue.c
Expand Up @@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool)
int ret; int ret;


mutex_lock(&worker_pool_idr_mutex); mutex_lock(&worker_pool_idr_mutex);
idr_pre_get(&worker_pool_idr, GFP_KERNEL); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
ret = idr_get_new(&worker_pool_idr, pool, &pool->id); if (ret >= 0)
pool->id = ret;
mutex_unlock(&worker_pool_idr_mutex); mutex_unlock(&worker_pool_idr_mutex);


return ret; return ret < 0 ? ret : 0;
} }


/* /*
Expand Down

0 comments on commit 842d223

Please sign in to comment.