1515static DEFINE_MUTEX (damon_lock );
1616static int nr_running_ctxs ;
1717
18+ /*
19+ * Construct a damon_region struct
20+ *
21+ * Returns the pointer to the new struct if success, or NULL otherwise
22+ */
23+ struct damon_region * damon_new_region (unsigned long start , unsigned long end )
24+ {
25+ struct damon_region * region ;
26+
27+ region = kmalloc (sizeof (* region ), GFP_KERNEL );
28+ if (!region )
29+ return NULL ;
30+
31+ region -> ar .start = start ;
32+ region -> ar .end = end ;
33+ region -> nr_accesses = 0 ;
34+ INIT_LIST_HEAD (& region -> list );
35+
36+ return region ;
37+ }
38+
39+ /*
40+ * Add a region between two other regions
41+ */
42+ inline void damon_insert_region (struct damon_region * r ,
43+ struct damon_region * prev , struct damon_region * next )
44+ {
45+ __list_add (& r -> list , & prev -> list , & next -> list );
46+ }
47+
48+ void damon_add_region (struct damon_region * r , struct damon_target * t )
49+ {
50+ list_add_tail (& r -> list , & t -> regions_list );
51+ }
52+
53+ static void damon_del_region (struct damon_region * r )
54+ {
55+ list_del (& r -> list );
56+ }
57+
58+ static void damon_free_region (struct damon_region * r )
59+ {
60+ kfree (r );
61+ }
62+
63+ void damon_destroy_region (struct damon_region * r )
64+ {
65+ damon_del_region (r );
66+ damon_free_region (r );
67+ }
68+
69+ /*
70+ * Construct a damon_target struct
71+ *
72+ * Returns the pointer to the new struct if success, or NULL otherwise
73+ */
74+ struct damon_target * damon_new_target (unsigned long id )
75+ {
76+ struct damon_target * t ;
77+
78+ t = kmalloc (sizeof (* t ), GFP_KERNEL );
79+ if (!t )
80+ return NULL ;
81+
82+ t -> id = id ;
83+ INIT_LIST_HEAD (& t -> regions_list );
84+
85+ return t ;
86+ }
87+
88+ void damon_add_target (struct damon_ctx * ctx , struct damon_target * t )
89+ {
90+ list_add_tail (& t -> list , & ctx -> region_targets );
91+ }
92+
93+ static void damon_del_target (struct damon_target * t )
94+ {
95+ list_del (& t -> list );
96+ }
97+
98+ void damon_free_target (struct damon_target * t )
99+ {
100+ struct damon_region * r , * next ;
101+
102+ damon_for_each_region_safe (r , next , t )
103+ damon_free_region (r );
104+ kfree (t );
105+ }
106+
107+ void damon_destroy_target (struct damon_target * t )
108+ {
109+ damon_del_target (t );
110+ damon_free_target (t );
111+ }
112+
18113struct damon_ctx * damon_new_ctx (void )
19114{
20115 struct damon_ctx * ctx ;
@@ -32,15 +127,27 @@ struct damon_ctx *damon_new_ctx(void)
32127
33128 mutex_init (& ctx -> kdamond_lock );
34129
35- ctx -> target = NULL ;
130+ INIT_LIST_HEAD ( & ctx -> region_targets ) ;
36131
37132 return ctx ;
38133}
39134
40- void damon_destroy_ctx (struct damon_ctx * ctx )
135+ static void damon_destroy_targets (struct damon_ctx * ctx )
41136{
42- if (ctx -> primitive .cleanup )
137+ struct damon_target * t , * next_t ;
138+
139+ if (ctx -> primitive .cleanup ) {
43140 ctx -> primitive .cleanup (ctx );
141+ return ;
142+ }
143+
144+ damon_for_each_target_safe (t , next_t , ctx )
145+ damon_destroy_target (t );
146+ }
147+
148+ void damon_destroy_ctx (struct damon_ctx * ctx )
149+ {
150+ damon_destroy_targets (ctx );
44151 kfree (ctx );
45152}
46153
@@ -217,6 +324,21 @@ static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
217324 ctx -> aggr_interval );
218325}
219326
327+ /*
328+ * Reset the aggregated monitoring results ('nr_accesses' of each region).
329+ */
330+ static void kdamond_reset_aggregated (struct damon_ctx * c )
331+ {
332+ struct damon_target * t ;
333+
334+ damon_for_each_target (t , c ) {
335+ struct damon_region * r ;
336+
337+ damon_for_each_region (r , t )
338+ r -> nr_accesses = 0 ;
339+ }
340+ }
341+
220342/*
221343 * Check whether it is time to check and apply the target monitoring regions
222344 *
@@ -238,6 +360,7 @@ static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
238360 */
239361static bool kdamond_need_stop (struct damon_ctx * ctx )
240362{
363+ struct damon_target * t ;
241364 bool stop ;
242365
243366 mutex_lock (& ctx -> kdamond_lock );
@@ -249,7 +372,12 @@ static bool kdamond_need_stop(struct damon_ctx *ctx)
249372 if (!ctx -> primitive .target_valid )
250373 return false;
251374
252- return !ctx -> primitive .target_valid (ctx -> target );
375+ damon_for_each_target (t , ctx ) {
376+ if (ctx -> primitive .target_valid (t ))
377+ return false;
378+ }
379+
380+ return true;
253381}
254382
255383static void set_kdamond_stop (struct damon_ctx * ctx )
@@ -265,6 +393,8 @@ static void set_kdamond_stop(struct damon_ctx *ctx)
265393static int kdamond_fn (void * data )
266394{
267395 struct damon_ctx * ctx = (struct damon_ctx * )data ;
396+ struct damon_target * t ;
397+ struct damon_region * r , * next ;
268398
269399 mutex_lock (& ctx -> kdamond_lock );
270400 pr_info ("kdamond (%d) starts\n" , ctx -> kdamond -> pid );
@@ -291,6 +421,7 @@ static int kdamond_fn(void *data)
291421 if (ctx -> callback .after_aggregation &&
292422 ctx -> callback .after_aggregation (ctx ))
293423 set_kdamond_stop (ctx );
424+ kdamond_reset_aggregated (ctx );
294425 if (ctx -> primitive .reset_aggregated )
295426 ctx -> primitive .reset_aggregated (ctx );
296427 }
@@ -300,6 +431,10 @@ static int kdamond_fn(void *data)
300431 ctx -> primitive .update (ctx );
301432 }
302433 }
434+ damon_for_each_target (t , ctx ) {
435+ damon_for_each_region_safe (r , next , t )
436+ damon_destroy_region (r );
437+ }
303438
304439 if (ctx -> callback .before_terminate &&
305440 ctx -> callback .before_terminate (ctx ))
0 commit comments