Skip to content

Commit d5ceb62

Browse files
olsajiriacmel
authored andcommitted
perf ordered_events: Add 'struct ordered_events_buffer' layer
When ordering events, we use preallocated buffers to store separate events. Those buffers currently don't have their own struct, but since they are basically an array of 'struct ordered_event' objects, we use the first event to hold buffers data - list head, that holds all buffers together: struct ordered_events { ... struct ordered_event *buffer; ... }; struct ordered_event { u64 timestamp; u64 file_offset; union perf_event *event; struct list_head list; }; This is quite convoluted and error prone as demonstrated by free-ing issue discovered and fixed by Stephane in here [1]. This patch adds the 'struct ordered_events_buffer' object, that holds the buffer data and frees it up properly. [1] - https://marc.info/?l=linux-kernel&m=153376761329335&w=2 Reported-by: Stephane Eranian <[email protected]> Signed-off-by: Jiri Olsa <[email protected]> Tested-by: Stephane Eranian <[email protected]> Acked-by: Namhyung Kim <[email protected]> Cc: Alexander Shishkin <[email protected]> Cc: Andi Kleen <[email protected]> Cc: David Ahern <[email protected]> Cc: Peter Zijlstra <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Arnaldo Carvalho de Melo <[email protected]>
1 parent 2e85d59 commit d5ceb62

File tree

2 files changed

+91
-29
lines changed

2 files changed

+91
-29
lines changed

tools/perf/util/ordered-events.c

Lines changed: 70 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -80,14 +80,20 @@ static union perf_event *dup_event(struct ordered_events *oe,
8080
return oe->copy_on_queue ? __dup_event(oe, event) : event;
8181
}
8282

83-
static void free_dup_event(struct ordered_events *oe, union perf_event *event)
83+
static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
8484
{
85-
if (event && oe->copy_on_queue) {
85+
if (event) {
8686
oe->cur_alloc_size -= event->header.size;
8787
free(event);
8888
}
8989
}
9090

91+
static void free_dup_event(struct ordered_events *oe, union perf_event *event)
92+
{
93+
if (oe->copy_on_queue)
94+
__free_dup_event(oe, event);
95+
}
96+
9197
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
9298
static struct ordered_event *alloc_event(struct ordered_events *oe,
9399
union perf_event *event)
@@ -100,15 +106,43 @@ static struct ordered_event *alloc_event(struct ordered_events *oe,
100106
if (!new_event)
101107
return NULL;
102108

109+
/*
110+
* We maintain the following scheme of buffers for ordered
111+
* event allocation:
112+
*
113+
* to_free list -> buffer1 (64K)
114+
* buffer2 (64K)
115+
* ...
116+
*
117+
* Each buffer keeps an array of ordered events objects:
118+
* buffer -> event[0]
119+
* event[1]
120+
* ...
121+
*
122+
* Each allocated ordered event is linked to one of
123+
* following lists:
124+
* - time ordered list 'events'
125+
* - list of currently removed events 'cache'
126+
*
127+
* Allocation of the ordered event uses the following order
128+
* to get the memory:
129+
* - use recently removed object from 'cache' list
130+
* - use available object in current allocation buffer
131+
* - allocate new buffer if the current buffer is full
132+
*
133+
* Removal of ordered event object moves it from events to
134+
* the cache list.
135+
*/
103136
if (!list_empty(cache)) {
104137
new = list_entry(cache->next, struct ordered_event, list);
105138
list_del(&new->list);
106139
} else if (oe->buffer) {
107-
new = oe->buffer + oe->buffer_idx;
140+
new = &oe->buffer->event[oe->buffer_idx];
108141
if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
109142
oe->buffer = NULL;
110143
} else if (oe->cur_alloc_size < oe->max_alloc_size) {
111-
size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
144+
size_t size = sizeof(*oe->buffer) +
145+
MAX_SAMPLE_BUFFER * sizeof(*new);
112146

113147
oe->buffer = malloc(size);
114148
if (!oe->buffer) {
@@ -122,11 +156,11 @@ static struct ordered_event *alloc_event(struct ordered_events *oe,
122156
oe->cur_alloc_size += size;
123157
list_add(&oe->buffer->list, &oe->to_free);
124158

125-
/* First entry is abused to maintain the to_free list. */
126-
oe->buffer_idx = 2;
127-
new = oe->buffer + 1;
159+
oe->buffer_idx = 1;
160+
new = &oe->buffer->event[0];
128161
} else {
129162
pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
163+
return NULL;
130164
}
131165

132166
new->event = new_event;
@@ -300,15 +334,38 @@ void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t d
300334
oe->deliver = deliver;
301335
}
302336

337+
static void
338+
ordered_events_buffer__free(struct ordered_events_buffer *buffer,
339+
unsigned int max, struct ordered_events *oe)
340+
{
341+
if (oe->copy_on_queue) {
342+
unsigned int i;
343+
344+
for (i = 0; i < max; i++)
345+
__free_dup_event(oe, buffer->event[i].event);
346+
}
347+
348+
free(buffer);
349+
}
350+
303351
void ordered_events__free(struct ordered_events *oe)
304352
{
305-
while (!list_empty(&oe->to_free)) {
306-
struct ordered_event *event;
353+
struct ordered_events_buffer *buffer, *tmp;
307354

308-
event = list_entry(oe->to_free.next, struct ordered_event, list);
309-
list_del(&event->list);
310-
free_dup_event(oe, event->event);
311-
free(event);
355+
if (list_empty(&oe->to_free))
356+
return;
357+
358+
/*
359+
* Current buffer might not have all the events allocated
360+
* yet, we need to free only allocated ones ...
361+
*/
362+
list_del(&oe->buffer->list);
363+
ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
364+
365+
/* ... and continue with the rest */
366+
list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
367+
list_del(&buffer->list);
368+
ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
312369
}
313370
}
314371

tools/perf/util/ordered-events.h

Lines changed: 21 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -25,23 +25,28 @@ struct ordered_events;
2525
typedef int (*ordered_events__deliver_t)(struct ordered_events *oe,
2626
struct ordered_event *event);
2727

28+
struct ordered_events_buffer {
29+
struct list_head list;
30+
struct ordered_event event[0];
31+
};
32+
2833
struct ordered_events {
29-
u64 last_flush;
30-
u64 next_flush;
31-
u64 max_timestamp;
32-
u64 max_alloc_size;
33-
u64 cur_alloc_size;
34-
struct list_head events;
35-
struct list_head cache;
36-
struct list_head to_free;
37-
struct ordered_event *buffer;
38-
struct ordered_event *last;
39-
ordered_events__deliver_t deliver;
40-
int buffer_idx;
41-
unsigned int nr_events;
42-
enum oe_flush last_flush_type;
43-
u32 nr_unordered_events;
44-
bool copy_on_queue;
34+
u64 last_flush;
35+
u64 next_flush;
36+
u64 max_timestamp;
37+
u64 max_alloc_size;
38+
u64 cur_alloc_size;
39+
struct list_head events;
40+
struct list_head cache;
41+
struct list_head to_free;
42+
struct ordered_events_buffer *buffer;
43+
struct ordered_event *last;
44+
ordered_events__deliver_t deliver;
45+
int buffer_idx;
46+
unsigned int nr_events;
47+
enum oe_flush last_flush_type;
48+
u32 nr_unordered_events;
49+
bool copy_on_queue;
4550
};
4651

4752
int ordered_events__queue(struct ordered_events *oe, union perf_event *event,

0 commit comments

Comments
 (0)