@@ -2789,6 +2789,12 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2789
2789
if (nr_pages < 2 )
2790
2790
nr_pages = 2 ;
2791
2791
2792
+ /*
2793
+ * Keep CPUs from coming online while resizing to synchronize
2794
+ * with new per CPU buffers being created.
2795
+ */
2796
+ guard (cpus_read_lock )();
2797
+
2792
2798
/* prevent another thread from changing buffer sizes */
2793
2799
mutex_lock (& buffer -> mutex );
2794
2800
atomic_inc (& buffer -> resizing );
@@ -2833,7 +2839,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2833
2839
cond_resched ();
2834
2840
}
2835
2841
2836
- cpus_read_lock ();
2837
2842
/*
2838
2843
* Fire off all the required work handlers
2839
2844
* We can't schedule on offline CPUs, but it's not necessary
@@ -2873,7 +2878,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2873
2878
cpu_buffer -> nr_pages_to_update = 0 ;
2874
2879
}
2875
2880
2876
- cpus_read_unlock ();
2877
2881
} else {
2878
2882
cpu_buffer = buffer -> buffers [cpu_id ];
2879
2883
@@ -2901,8 +2905,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2901
2905
goto out_err ;
2902
2906
}
2903
2907
2904
- cpus_read_lock ();
2905
-
2906
2908
/* Can't run something on an offline CPU. */
2907
2909
if (!cpu_online (cpu_id ))
2908
2910
rb_update_pages (cpu_buffer );
@@ -2921,7 +2923,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2921
2923
}
2922
2924
2923
2925
cpu_buffer -> nr_pages_to_update = 0 ;
2924
- cpus_read_unlock ();
2925
2926
}
2926
2927
2927
2928
out :
0 commit comments