@@ -6253,10 +6253,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
62536253
62546254 if (!atomic_inc_not_zero (& event -> rb -> mmap_count )) {
62556255 /*
6256- * Raced against perf_mmap_close() through
6257- * perf_event_set_output(). Try again, hope for better
6258- * luck.
6256+ * Raced against perf_mmap_close(); remove the
6257+ * event and try again.
62596258 */
6259+ ring_buffer_attach (event , NULL );
62606260 mutex_unlock (& event -> mmap_mutex );
62616261 goto again ;
62626262 }
@@ -11825,14 +11825,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
1182511825 goto out ;
1182611826}
1182711827
11828+ static void mutex_lock_double (struct mutex * a , struct mutex * b )
11829+ {
11830+ if (b < a )
11831+ swap (a , b );
11832+
11833+ mutex_lock (a );
11834+ mutex_lock_nested (b , SINGLE_DEPTH_NESTING );
11835+ }
11836+
1182811837static int
1182911838perf_event_set_output (struct perf_event * event , struct perf_event * output_event )
1183011839{
1183111840 struct perf_buffer * rb = NULL ;
1183211841 int ret = - EINVAL ;
1183311842
11834- if (!output_event )
11843+ if (!output_event ) {
11844+ mutex_lock (& event -> mmap_mutex );
1183511845 goto set ;
11846+ }
1183611847
1183711848 /* don't allow circular references */
1183811849 if (event == output_event )
@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
1187011881 event -> pmu != output_event -> pmu )
1187111882 goto out ;
1187211883
11884+ /*
11885+ * Hold both mmap_mutex to serialize against perf_mmap_close(). Since
11886+ * output_event is already on rb->event_list, and the list iteration
11887+ * restarts after every removal, it is guaranteed this new event is
11888+ * observed *OR* if output_event is already removed, it's guaranteed we
11889+ * observe !rb->mmap_count.
11890+ */
11891+ mutex_lock_double (& event -> mmap_mutex , & output_event -> mmap_mutex );
1187311892set :
11874- mutex_lock (& event -> mmap_mutex );
1187511893 /* Can't redirect output if we've got an active mmap() */
1187611894 if (atomic_read (& event -> mmap_count ))
1187711895 goto unlock ;
@@ -11881,27 +11899,26 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
1188111899 rb = ring_buffer_get (output_event );
1188211900 if (!rb )
1188311901 goto unlock ;
11902+
11903+ /* did we race against perf_mmap_close() */
11904+ if (!atomic_read (& rb -> mmap_count )) {
11905+ ring_buffer_put (rb );
11906+ goto unlock ;
11907+ }
1188411908 }
1188511909
1188611910 ring_buffer_attach (event , rb );
1188711911
1188811912 ret = 0 ;
1188911913unlock :
1189011914 mutex_unlock (& event -> mmap_mutex );
11915+ if (output_event )
11916+ mutex_unlock (& output_event -> mmap_mutex );
1189111917
1189211918out :
1189311919 return ret ;
1189411920}
1189511921
11896- static void mutex_lock_double (struct mutex * a , struct mutex * b )
11897- {
11898- if (b < a )
11899- swap (a , b );
11900-
11901- mutex_lock (a );
11902- mutex_lock_nested (b , SINGLE_DEPTH_NESTING );
11903- }
11904-
1190511922static int perf_event_set_clock (struct perf_event * event , clockid_t clk_id )
1190611923{
1190711924 bool nmi_safe = false;
0 commit comments