Skip to content

Commit

Permalink
Merge patch series "riscv: Fix issues with module loading"
Browse files Browse the repository at this point in the history
Charlie Jenkins <charlie@rivosinc.com> says:

Module loading did not account for multiple threads concurrently loading
modules. This patch fixes that issue. There is also a small patch to fix
the type of a __le16 variable.

* b4-shazam-merge:
  riscv: Correct type casting in module loading
  riscv: Safely remove entries from relocation list

Link: https://lore.kernel.org/r/20231127-module_linking_freeing-v4-0-a2ca1d7027d0@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
  • Loading branch information
palmer-dabbelt committed Dec 4, 2023
2 parents b85ea95 + 4a92a87 commit 7c15934
Showing 1 changed file with 84 additions and 30 deletions.
114 changes: 84 additions & 30 deletions arch/riscv/kernel/module.c
Expand Up @@ -40,15 +40,6 @@ struct relocation_handlers {
long buffer);
};

unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
void process_accumulated_relocations(struct module *me);
int add_relocation_to_accumulate(struct module *me, int type, void *location,
unsigned int hashtable_bits, Elf_Addr v);

struct hlist_head *relocation_hashtable;

struct list_head used_buckets_list;

/*
* The auipc+jalr instruction pair can reach any PC-relative offset
* in the range [-2^31 - 2^11, 2^31 - 2^11)
Expand All @@ -64,7 +55,7 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)

static int riscv_insn_rmw(void *location, u32 keep, u32 set)
{
u16 *parcel = location;
__le16 *parcel = location;
u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;

insn &= keep;
Expand All @@ -77,7 +68,7 @@ static int riscv_insn_rmw(void *location, u32 keep, u32 set)

static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
{
u16 *parcel = location;
__le16 *parcel = location;
u16 insn = le16_to_cpu(*parcel);

insn &= keep;
Expand Down Expand Up @@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
/* 192-255 nonstandard ABI extensions */
};

void process_accumulated_relocations(struct module *me)
static void
process_accumulated_relocations(struct module *me,
struct hlist_head **relocation_hashtable,
struct list_head *used_buckets_list)
{
/*
* Only ADD/SUB/SET/ULEB128 should end up here.
Expand All @@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me)
* - Each relocation entry for a location address
*/
struct used_bucket *bucket_iter;
struct used_bucket *bucket_iter_tmp;
struct relocation_head *rel_head_iter;
struct hlist_node *rel_head_iter_tmp;
struct relocation_entry *rel_entry_iter;
struct relocation_entry *rel_entry_iter_tmp;
int curr_type;
void *location;
long buffer;

list_for_each_entry(bucket_iter, &used_buckets_list, head) {
hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
used_buckets_list, head) {
hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
bucket_iter->bucket, node) {
buffer = 0;
location = rel_head_iter->location;
list_for_each_entry(rel_entry_iter,
rel_head_iter->rel_entry, head) {
list_for_each_entry_safe(rel_entry_iter,
rel_entry_iter_tmp,
rel_head_iter->rel_entry,
head) {
curr_type = rel_entry_iter->type;
reloc_handlers[curr_type].reloc_handler(
me, &buffer, rel_entry_iter->value);
Expand All @@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me)
kfree(bucket_iter);
}

kfree(relocation_hashtable);
kfree(*relocation_hashtable);
}

int add_relocation_to_accumulate(struct module *me, int type, void *location,
unsigned int hashtable_bits, Elf_Addr v)
static int add_relocation_to_accumulate(struct module *me, int type,
void *location,
unsigned int hashtable_bits, Elf_Addr v,
struct hlist_head *relocation_hashtable,
struct list_head *used_buckets_list)
{
struct relocation_entry *entry;
struct relocation_head *rel_head;
Expand All @@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
unsigned long hash;

entry = kmalloc(sizeof(*entry), GFP_KERNEL);

if (!entry)
return -ENOMEM;

INIT_LIST_HEAD(&entry->head);
entry->type = type;
entry->value = v;
Expand All @@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,

current_head = &relocation_hashtable[hash];

/* Find matching location (if any) */
/*
* Search for the relocation_head for the relocations that happen at the
* provided location
*/
bool found = false;
struct relocation_head *rel_head_iter;

Expand All @@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
}
}

/*
* If there has not yet been any relocations at the provided location,
* create a relocation_head for that location and populate it with this
* relocation_entry.
*/
if (!found) {
rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);

if (!rel_head) {
kfree(entry);
return -ENOMEM;
}

rel_head->rel_entry =
kmalloc(sizeof(struct list_head), GFP_KERNEL);

if (!rel_head->rel_entry) {
kfree(entry);
kfree(rel_head);
return -ENOMEM;
}

INIT_LIST_HEAD(rel_head->rel_entry);
rel_head->location = location;
INIT_HLIST_NODE(&rel_head->node);
if (!current_head->first) {
bucket =
kmalloc(sizeof(struct used_bucket), GFP_KERNEL);

if (!bucket) {
kfree(entry);
kfree(rel_head);
kfree(rel_head->rel_entry);
return -ENOMEM;
}

INIT_LIST_HEAD(&bucket->head);
bucket->bucket = current_head;
list_add(&bucket->head, &used_buckets_list);
list_add(&bucket->head, used_buckets_list);
}
hlist_add_head(&rel_head->node, current_head);
}
Expand All @@ -704,7 +741,9 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
return 0;
}

unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
static unsigned int
initialize_relocation_hashtable(unsigned int num_relocations,
struct hlist_head **relocation_hashtable)
{
/* Can safely assume that bits is not greater than sizeof(long) */
unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
Expand All @@ -720,12 +759,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations)

hashtable_size <<= should_double_size;

relocation_hashtable = kmalloc_array(hashtable_size,
sizeof(*relocation_hashtable),
GFP_KERNEL);
__hash_init(relocation_hashtable, hashtable_size);
*relocation_hashtable = kmalloc_array(hashtable_size,
sizeof(*relocation_hashtable),
GFP_KERNEL);
if (!*relocation_hashtable)
return -ENOMEM;

INIT_LIST_HEAD(&used_buckets_list);
__hash_init(*relocation_hashtable, hashtable_size);

return hashtable_bits;
}
Expand All @@ -742,7 +782,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf_Addr v;
int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
struct hlist_head *relocation_hashtable;
struct list_head used_buckets_list;
unsigned int hashtable_bits;

hashtable_bits = initialize_relocation_hashtable(num_relocations,
&relocation_hashtable);

if (hashtable_bits < 0)
return hashtable_bits;

INIT_LIST_HEAD(&used_buckets_list);

pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
Expand Down Expand Up @@ -823,14 +873,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}

if (reloc_handlers[type].accumulate_handler)
res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
res = add_relocation_to_accumulate(me, type, location,
hashtable_bits, v,
relocation_hashtable,
&used_buckets_list);
else
res = handler(me, location, v);
if (res)
return res;
}

process_accumulated_relocations(me);
process_accumulated_relocations(me, &relocation_hashtable,
&used_buckets_list);

return 0;
}
Expand Down

0 comments on commit 7c15934

Please sign in to comment.