@@ -1112,8 +1112,7 @@ static struct list_head *rb_list_head(struct list_head *list)
11121112 * its flags will be non zero.
11131113 */
11141114static inline int
1115- rb_is_head_page (struct ring_buffer_per_cpu * cpu_buffer ,
1116- struct buffer_page * page , struct list_head * list )
1115+ rb_is_head_page (struct buffer_page * page , struct list_head * list )
11171116{
11181117 unsigned long val ;
11191118
@@ -1142,8 +1141,7 @@ static bool rb_is_reader_page(struct buffer_page *page)
11421141/*
11431142 * rb_set_list_to_head - set a list_head to be pointing to head.
11441143 */
1145- static void rb_set_list_to_head (struct ring_buffer_per_cpu * cpu_buffer ,
1146- struct list_head * list )
1144+ static void rb_set_list_to_head (struct list_head * list )
11471145{
11481146 unsigned long * ptr ;
11491147
@@ -1166,7 +1164,7 @@ static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
11661164 /*
11671165 * Set the previous list pointer to have the HEAD flag.
11681166 */
1169- rb_set_list_to_head (cpu_buffer , head -> list .prev );
1167+ rb_set_list_to_head (head -> list .prev );
11701168}
11711169
11721170static void rb_list_head_clear (struct list_head * list )
@@ -1241,8 +1239,7 @@ static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
12411239 old_flag , RB_PAGE_NORMAL );
12421240}
12431241
1244- static inline void rb_inc_page (struct ring_buffer_per_cpu * cpu_buffer ,
1245- struct buffer_page * * bpage )
1242+ static inline void rb_inc_page (struct buffer_page * * bpage )
12461243{
12471244 struct list_head * p = rb_list_head ((* bpage )-> list .next );
12481245
@@ -1274,11 +1271,11 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
12741271 */
12751272 for (i = 0 ; i < 3 ; i ++ ) {
12761273 do {
1277- if (rb_is_head_page (cpu_buffer , page , page -> list .prev )) {
1274+ if (rb_is_head_page (page , page -> list .prev )) {
12781275 cpu_buffer -> head_page = page ;
12791276 return page ;
12801277 }
1281- rb_inc_page (cpu_buffer , & page );
1278+ rb_inc_page (& page );
12821279 } while (page != head );
12831280 }
12841281
@@ -1824,7 +1821,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
18241821 cond_resched ();
18251822
18261823 to_remove_page = tmp_iter_page ;
1827- rb_inc_page (cpu_buffer , & tmp_iter_page );
1824+ rb_inc_page (& tmp_iter_page );
18281825
18291826 /* update the counters */
18301827 page_entries = rb_page_entries (to_remove_page );
@@ -2271,7 +2268,7 @@ static void rb_inc_iter(struct ring_buffer_iter *iter)
22712268 if (iter -> head_page == cpu_buffer -> reader_page )
22722269 iter -> head_page = rb_set_head_page (cpu_buffer );
22732270 else
2274- rb_inc_page (cpu_buffer , & iter -> head_page );
2271+ rb_inc_page (& iter -> head_page );
22752272
22762273 iter -> page_stamp = iter -> read_stamp = iter -> head_page -> page -> time_stamp ;
22772274 iter -> head = 0 ;
@@ -2374,7 +2371,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
23742371 * want the outer most commit to reset it.
23752372 */
23762373 new_head = next_page ;
2377- rb_inc_page (cpu_buffer , & new_head );
2374+ rb_inc_page (& new_head );
23782375
23792376 ret = rb_head_page_set_head (cpu_buffer , new_head , next_page ,
23802377 RB_PAGE_NORMAL );
@@ -2526,7 +2523,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
25262523
25272524 next_page = tail_page ;
25282525
2529- rb_inc_page (cpu_buffer , & next_page );
2526+ rb_inc_page (& next_page );
25302527
25312528 /*
25322529 * If for some reason, we had an interrupt storm that made
@@ -2552,7 +2549,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
25522549 * the buffer, unless the commit page is still on the
25532550 * reader page.
25542551 */
2555- if (rb_is_head_page (cpu_buffer , next_page , & tail_page -> list )) {
2552+ if (rb_is_head_page (next_page , & tail_page -> list )) {
25562553
25572554 /*
25582555 * If the commit is not on the reader page, then
@@ -2879,7 +2876,7 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
28792876 return ;
28802877 local_set (& cpu_buffer -> commit_page -> page -> commit ,
28812878 rb_page_write (cpu_buffer -> commit_page ));
2882- rb_inc_page (cpu_buffer , & cpu_buffer -> commit_page );
2879+ rb_inc_page (& cpu_buffer -> commit_page );
28832880 /* add barrier to keep gcc from optimizing too much */
28842881 barrier ();
28852882 }
@@ -3638,14 +3635,14 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
36383635 * Because the commit page may be on the reader page we
36393636 * start with the next page and check the end loop there.
36403637 */
3641- rb_inc_page (cpu_buffer , & bpage );
3638+ rb_inc_page (& bpage );
36423639 start = bpage ;
36433640 do {
36443641 if (bpage -> page == (void * )addr ) {
36453642 local_dec (& bpage -> entries );
36463643 return ;
36473644 }
3648- rb_inc_page (cpu_buffer , & bpage );
3645+ rb_inc_page (& bpage );
36493646 } while (bpage != start );
36503647
36513648 /* commit not part of this buffer?? */
@@ -4367,7 +4364,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
43674364 cpu_buffer -> pages = reader -> list .prev ;
43684365
43694366 /* The reader page will be pointing to the new head */
4370- rb_set_list_to_head (cpu_buffer , & cpu_buffer -> reader_page -> list );
4367+ rb_set_list_to_head (& cpu_buffer -> reader_page -> list );
43714368
43724369 /*
43734370 * We want to make sure we read the overruns after we set up our
@@ -4406,7 +4403,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
44064403 * Now make the new head point back to the reader page.
44074404 */
44084405 rb_list_head (reader -> list .next )-> prev = & cpu_buffer -> reader_page -> list ;
4409- rb_inc_page (cpu_buffer , & cpu_buffer -> head_page );
4406+ rb_inc_page (& cpu_buffer -> head_page );
44104407
44114408 local_inc (& cpu_buffer -> pages_read );
44124409
0 commit comments