Skip to content

Commit 91a5409

Browse files
martin-kaiserakpm00
authored andcommitted
maple_tree: fix tracepoint string pointers
maple_tree tracepoints contain pointers to function names. Such a pointer is saved when a tracepoint logs an event. There's no guarantee that it's still valid when the event is parsed later and the pointer is dereferenced. The kernel warns about these unsafe pointers. event 'ma_read' has unsafe pointer field 'fn' WARNING: kernel/trace/trace.c:3779 at ignore_event+0x1da/0x1e4 Mark the function names as tracepoint_string() to fix the events. One case that doesn't work without my patch would be trace-cmd record to save the binary ringbuffer and trace-cmd report to parse it in userspace. The address of __func__ can't be dereferenced from userspace but tracepoint_string will add an entry to /sys/kernel/tracing/printk_formats Link: https://lkml.kernel.org/r/20251030155537.87972-1-martin@kaiser.cx Fixes: 54a611b ("Maple Tree: add new data structure") Signed-off-by: Martin Kaiser <martin@kaiser.cx> Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
1 parent 1abbdf3 commit 91a5409

File tree

1 file changed

+16
-14
lines changed

1 file changed

+16
-14
lines changed

lib/maple_tree.c

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,8 @@
6464
#define CREATE_TRACE_POINTS
6565
#include <trace/events/maple_tree.h>
6666

67+
#define TP_FCT tracepoint_string(__func__)
68+
6769
/*
6870
* Kernel pointer hashing renders much of the maple tree dump useless as tagged
6971
* pointers get hashed to arbitrary values.
@@ -2756,7 +2758,7 @@ static inline void mas_rebalance(struct ma_state *mas,
27562758
MA_STATE(l_mas, mas->tree, mas->index, mas->last);
27572759
MA_STATE(r_mas, mas->tree, mas->index, mas->last);
27582760

2759-
trace_ma_op(__func__, mas);
2761+
trace_ma_op(TP_FCT, mas);
27602762

27612763
/*
27622764
* Rebalancing occurs if a node is insufficient. Data is rebalanced
@@ -2997,7 +2999,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node)
29972999
MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
29983000
MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
29993001

3000-
trace_ma_op(__func__, mas);
3002+
trace_ma_op(TP_FCT, mas);
30013003

30023004
mast.l = &l_mas;
30033005
mast.r = &r_mas;
@@ -3172,7 +3174,7 @@ static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
31723174
return false;
31733175
}
31743176

3175-
trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3177+
trace_ma_write(TP_FCT, wr_mas->mas, wr_mas->r_max, entry);
31763178
return true;
31773179
}
31783180

@@ -3416,7 +3418,7 @@ static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas)
34163418
* of data may happen.
34173419
*/
34183420
mas = wr_mas->mas;
3419-
trace_ma_op(__func__, mas);
3421+
trace_ma_op(TP_FCT, mas);
34203422

34213423
if (unlikely(!mas->index && mas->last == ULONG_MAX))
34223424
return mas_new_root(mas, wr_mas->entry);
@@ -3552,7 +3554,7 @@ static inline void mas_wr_node_store(struct ma_wr_state *wr_mas,
35523554
} else {
35533555
memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
35543556
}
3555-
trace_ma_write(__func__, mas, 0, wr_mas->entry);
3557+
trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);
35563558
mas_update_gap(mas);
35573559
mas->end = new_end;
35583560
return;
@@ -3596,7 +3598,7 @@ static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas)
35963598
mas->offset++; /* Keep mas accurate. */
35973599
}
35983600

3599-
trace_ma_write(__func__, mas, 0, wr_mas->entry);
3601+
trace_ma_write(TP_FCT, mas, 0, wr_mas->entry);
36003602
/*
36013603
* Only update gap when the new entry is empty or there is an empty
36023604
* entry in the original two ranges.
@@ -3717,7 +3719,7 @@ static inline void mas_wr_append(struct ma_wr_state *wr_mas,
37173719
mas_update_gap(mas);
37183720

37193721
mas->end = new_end;
3720-
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
3722+
trace_ma_write(TP_FCT, mas, new_end, wr_mas->entry);
37213723
return;
37223724
}
37233725

@@ -3731,7 +3733,7 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
37313733
{
37323734
struct maple_big_node b_node;
37333735

3734-
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
3736+
trace_ma_write(TP_FCT, wr_mas->mas, 0, wr_mas->entry);
37353737
memset(&b_node, 0, sizeof(struct maple_big_node));
37363738
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
37373739
mas_commit_b_node(wr_mas, &b_node);
@@ -5062,7 +5064,7 @@ void *mas_store(struct ma_state *mas, void *entry)
50625064
{
50635065
MA_WR_STATE(wr_mas, mas, entry);
50645066

5065-
trace_ma_write(__func__, mas, 0, entry);
5067+
trace_ma_write(TP_FCT, mas, 0, entry);
50665068
#ifdef CONFIG_DEBUG_MAPLE_TREE
50675069
if (MAS_WARN_ON(mas, mas->index > mas->last))
50685070
pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last,
@@ -5163,7 +5165,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry)
51635165
}
51645166

51655167
store:
5166-
trace_ma_write(__func__, mas, 0, entry);
5168+
trace_ma_write(TP_FCT, mas, 0, entry);
51675169
mas_wr_store_entry(&wr_mas);
51685170
MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
51695171
mas_destroy(mas);
@@ -5882,7 +5884,7 @@ void *mtree_load(struct maple_tree *mt, unsigned long index)
58825884
MA_STATE(mas, mt, index, index);
58835885
void *entry;
58845886

5885-
trace_ma_read(__func__, &mas);
5887+
trace_ma_read(TP_FCT, &mas);
58865888
rcu_read_lock();
58875889
retry:
58885890
entry = mas_start(&mas);
@@ -5925,7 +5927,7 @@ int mtree_store_range(struct maple_tree *mt, unsigned long index,
59255927
MA_STATE(mas, mt, index, last);
59265928
int ret = 0;
59275929

5928-
trace_ma_write(__func__, &mas, 0, entry);
5930+
trace_ma_write(TP_FCT, &mas, 0, entry);
59295931
if (WARN_ON_ONCE(xa_is_advanced(entry)))
59305932
return -EINVAL;
59315933

@@ -6148,7 +6150,7 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index)
61486150
void *entry = NULL;
61496151

61506152
MA_STATE(mas, mt, index, index);
6151-
trace_ma_op(__func__, &mas);
6153+
trace_ma_op(TP_FCT, &mas);
61526154

61536155
mtree_lock(mt);
61546156
entry = mas_erase(&mas);
@@ -6485,7 +6487,7 @@ void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
64856487
unsigned long copy = *index;
64866488
#endif
64876489

6488-
trace_ma_read(__func__, &mas);
6490+
trace_ma_read(TP_FCT, &mas);
64896491

64906492
if ((*index) > max)
64916493
return NULL;

0 commit comments

Comments
 (0)