Skip to content

Commit

Permalink
xen/spinlock: rename recursive lock functions
Browse files Browse the repository at this point in the history
Rename the recursive spin_lock() functions by replacing the trailing
"_recursive" with a leading "r".

Switch the parameter to be a pointer to rspinlock_t.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
Acked-by: Julien Grall <jgrall@amazon.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
  • Loading branch information
jgross1 authored and jbeulich committed Mar 19, 2024
1 parent 475080b commit c9aca2a
Show file tree
Hide file tree
Showing 11 changed files with 68 additions and 70 deletions.
4 changes: 2 additions & 2 deletions xen/arch/arm/domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -987,7 +987,7 @@ static int relinquish_memory(struct domain *d, struct page_list_head *list)
int ret = 0;

/* Use a recursive lock, as we may enter 'free_domheap_page'. */
spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);

page_list_for_each_safe( page, tmp, list )
{
Expand All @@ -1014,7 +1014,7 @@ static int relinquish_memory(struct domain *d, struct page_list_head *list)
}

out:
spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);
return ret;
}

Expand Down
8 changes: 4 additions & 4 deletions xen/arch/x86/domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -1323,7 +1323,7 @@ int arch_set_info_guest(
{
bool done = false;

spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);

for ( i = 0; ; )
{
Expand All @@ -1344,7 +1344,7 @@ int arch_set_info_guest(
break;
}

spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);

if ( !done )
return -ERESTART;
Expand Down Expand Up @@ -2183,7 +2183,7 @@ static int relinquish_memory(
int ret = 0;

/* Use a recursive lock, as we may enter 'free_domheap_page'. */
spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);

while ( (page = page_list_remove_head(list)) )
{
Expand Down Expand Up @@ -2324,7 +2324,7 @@ static int relinquish_memory(
page_list_move(list, &d->arch.relmem_list);

out:
spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);
return ret;
}

Expand Down
8 changes: 4 additions & 4 deletions xen/arch/x86/mm/mem_sharing.c
Original file line number Diff line number Diff line change
Expand Up @@ -682,7 +682,7 @@ static int page_make_sharable(struct domain *d,
int rc = 0;
bool drop_dom_ref = false;

spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);

if ( d->is_dying )
{
Expand Down Expand Up @@ -725,7 +725,7 @@ static int page_make_sharable(struct domain *d,
}

out:
spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);

if ( drop_dom_ref )
put_domain(d);
Expand Down Expand Up @@ -1936,7 +1936,7 @@ int mem_sharing_fork_reset(struct domain *d, bool reset_state,
goto state;

/* need recursive lock because we will free pages */
spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);
page_list_for_each_safe(page, tmp, &d->page_list)
{
shr_handle_t sh;
Expand Down Expand Up @@ -1965,7 +1965,7 @@ int mem_sharing_fork_reset(struct domain *d, bool reset_state,
put_page_alloc_ref(page);
put_page_and_type(page);
}
spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);

state:
if ( reset_state )
Expand Down
4 changes: 2 additions & 2 deletions xen/arch/x86/mm/mm-locks.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ static always_inline void _mm_lock(const struct domain *d, mm_lock_t *l,
{
if ( !((mm_locked_by_me(l)) && rec) )
_check_lock_level(d, level);
spin_lock_recursive(&l->lock);
rspin_lock(&l->lock);
if ( l->lock.recurse_cnt == 1 )
{
l->locker_function = func;
Expand Down Expand Up @@ -202,7 +202,7 @@ static inline void mm_unlock(mm_lock_t *l)
l->locker_function = "nobody";
_set_lock_level(l->unlock_level);
}
spin_unlock_recursive(&l->lock);
rspin_unlock(&l->lock);
}

static inline void mm_enforce_order_unlock(int unlock_level,
Expand Down
52 changes: 26 additions & 26 deletions xen/common/ioreq.c
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
unsigned int id;
bool found = false;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

FOR_EACH_IOREQ_SERVER(d, id, s)
{
Expand All @@ -340,7 +340,7 @@ bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
}
}

spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return found;
}
Expand Down Expand Up @@ -658,7 +658,7 @@ static int ioreq_server_create(struct domain *d, int bufioreq_handling,
return -ENOMEM;

domain_pause(d);
spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
{
Expand Down Expand Up @@ -686,13 +686,13 @@ static int ioreq_server_create(struct domain *d, int bufioreq_handling,
if ( id )
*id = i;

spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);
domain_unpause(d);

return 0;

fail:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);
domain_unpause(d);

xfree(s);
Expand All @@ -704,7 +704,7 @@ static int ioreq_server_destroy(struct domain *d, ioservid_t id)
struct ioreq_server *s;
int rc;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

s = get_ioreq_server(d, id);

Expand Down Expand Up @@ -736,7 +736,7 @@ static int ioreq_server_destroy(struct domain *d, ioservid_t id)
rc = 0;

out:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return rc;
}
Expand All @@ -749,7 +749,7 @@ static int ioreq_server_get_info(struct domain *d, ioservid_t id,
struct ioreq_server *s;
int rc;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

s = get_ioreq_server(d, id);

Expand Down Expand Up @@ -783,7 +783,7 @@ static int ioreq_server_get_info(struct domain *d, ioservid_t id,
rc = 0;

out:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return rc;
}
Expand All @@ -796,7 +796,7 @@ int ioreq_server_get_frame(struct domain *d, ioservid_t id,

ASSERT(is_hvm_domain(d));

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

s = get_ioreq_server(d, id);

Expand Down Expand Up @@ -834,7 +834,7 @@ int ioreq_server_get_frame(struct domain *d, ioservid_t id,
}

out:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return rc;
}
Expand All @@ -850,7 +850,7 @@ static int ioreq_server_map_io_range(struct domain *d, ioservid_t id,
if ( start > end )
return -EINVAL;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

s = get_ioreq_server(d, id);

Expand Down Expand Up @@ -886,7 +886,7 @@ static int ioreq_server_map_io_range(struct domain *d, ioservid_t id,
rc = rangeset_add_range(r, start, end);

out:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return rc;
}
Expand All @@ -902,7 +902,7 @@ static int ioreq_server_unmap_io_range(struct domain *d, ioservid_t id,
if ( start > end )
return -EINVAL;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

s = get_ioreq_server(d, id);

Expand Down Expand Up @@ -938,7 +938,7 @@ static int ioreq_server_unmap_io_range(struct domain *d, ioservid_t id,
rc = rangeset_remove_range(r, start, end);

out:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return rc;
}
Expand All @@ -963,7 +963,7 @@ int ioreq_server_map_mem_type(struct domain *d, ioservid_t id,
if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
return -EINVAL;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

s = get_ioreq_server(d, id);

Expand All @@ -978,7 +978,7 @@ int ioreq_server_map_mem_type(struct domain *d, ioservid_t id,
rc = arch_ioreq_server_map_mem_type(d, s, flags);

out:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

if ( rc == 0 )
arch_ioreq_server_map_mem_type_completed(d, s, flags);
Expand All @@ -992,7 +992,7 @@ static int ioreq_server_set_state(struct domain *d, ioservid_t id,
struct ioreq_server *s;
int rc;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

s = get_ioreq_server(d, id);

Expand All @@ -1016,7 +1016,7 @@ static int ioreq_server_set_state(struct domain *d, ioservid_t id,
rc = 0;

out:
spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);
return rc;
}

Expand All @@ -1026,7 +1026,7 @@ int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v)
unsigned int id;
int rc;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

FOR_EACH_IOREQ_SERVER(d, id, s)
{
Expand All @@ -1035,7 +1035,7 @@ int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v)
goto fail;
}

spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return 0;

Expand All @@ -1050,7 +1050,7 @@ int ioreq_server_add_vcpu_all(struct domain *d, struct vcpu *v)
ioreq_server_remove_vcpu(s, v);
}

spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);

return rc;
}
Expand All @@ -1060,12 +1060,12 @@ void ioreq_server_remove_vcpu_all(struct domain *d, struct vcpu *v)
struct ioreq_server *s;
unsigned int id;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

FOR_EACH_IOREQ_SERVER(d, id, s)
ioreq_server_remove_vcpu(s, v);

spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);
}

void ioreq_server_destroy_all(struct domain *d)
Expand All @@ -1076,7 +1076,7 @@ void ioreq_server_destroy_all(struct domain *d)
if ( !arch_ioreq_server_destroy_all(d) )
return;

spin_lock_recursive(&d->ioreq_server.lock);
rspin_lock(&d->ioreq_server.lock);

/* No need to domain_pause() as the domain is being torn down */

Expand All @@ -1094,7 +1094,7 @@ void ioreq_server_destroy_all(struct domain *d)
xfree(s);
}

spin_unlock_recursive(&d->ioreq_server.lock);
rspin_unlock(&d->ioreq_server.lock);
}

struct ioreq_server *ioreq_server_select(struct domain *d,
Expand Down
12 changes: 6 additions & 6 deletions xen/common/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -2500,15 +2500,15 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
if ( unlikely(is_xen_heap_page(pg)) )
{
/* NB. May recursively lock from relinquish_memory(). */
spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);

for ( i = 0; i < (1 << order); i++ )
arch_free_heap_page(d, &pg[i]);

d->xenheap_pages -= 1 << order;
drop_dom_ref = (d->xenheap_pages == 0);

spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);
}
else
{
Expand All @@ -2517,7 +2517,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)
if ( likely(d) && likely(d != dom_cow) )
{
/* NB. May recursively lock from relinquish_memory(). */
spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);

for ( i = 0; i < (1 << order); i++ )
{
Expand All @@ -2540,7 +2540,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order)

drop_dom_ref = !domain_adjust_tot_pages(d, -(1 << order));

spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);

/*
* Normally we expect a domain to clear pages before freeing them,
Expand Down Expand Up @@ -2756,7 +2756,7 @@ void free_domstatic_page(struct page_info *page)
ASSERT_ALLOC_CONTEXT();

/* NB. May recursively lock from relinquish_memory(). */
spin_lock_recursive(&d->page_alloc_lock);
rspin_lock(&d->page_alloc_lock);

arch_free_heap_page(d, page);

Expand All @@ -2767,7 +2767,7 @@ void free_domstatic_page(struct page_info *page)
/* Add page on the resv_page_list *after* it has been freed. */
page_list_add_tail(page, &d->resv_page_list);

spin_unlock_recursive(&d->page_alloc_lock);
rspin_unlock(&d->page_alloc_lock);

if ( drop_dom_ref )
put_domain(d);
Expand Down

0 comments on commit c9aca2a

Please sign in to comment.