Skip to content

Commit ace30fb

Browse files
sjp38akpm00
authored andcommitted
mm/damon/core: use pseudo-moving sum for nr_accesses_bp
Let nr_accesses_bp be calculated as a pseudo-moving sum that updated for every sampling interval, using damon_moving_sum(). This is assumed to be useful for cases that the aggregation interval is set quite huge, but the monivoting results need to be collected earlier than next aggregation interval is passed. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: SeongJae Park <[email protected]> Cc: Brendan Higgins <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 8033382 commit ace30fb

File tree

4 files changed

+36
-13
lines changed

4 files changed

+36
-13
lines changed

include/linux/damon.h

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,8 @@ struct damon_addr_range {
4040
* @ar: The address range of the region.
4141
* @sampling_addr: Address of the sample for the next access check.
4242
* @nr_accesses: Access frequency of this region.
43-
* @nr_accesses_bp: @nr_accesses in basis point (0.01%).
43+
* @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for
44+
* each sampling interval.
4445
* @list: List head for siblings.
4546
* @age: Age of this region.
4647
*
@@ -51,7 +52,11 @@ struct damon_addr_range {
5152
* damon_update_region_access_rate().
5253
*
5354
* @nr_accesses_bp is another representation of @nr_accesses in basis point
54-
* (1 in 10,000) that updated every aggregation interval.
55+
* (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
56+
* manner similar to moving sum. By the algorithm, this value becomes
57+
* @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can
58+
* be used when the aggregation interval is too huge and therefore cannot wait
59+
* for it before getting the access monitoring results.
5560
*
5661
* @age is initially zero, increased for each aggregation interval, and reset
5762
* to zero again if the access frequency is significantly changed. If two
@@ -629,7 +634,8 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
629634
unsigned int nr_ranges);
630635
unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
631636
unsigned int len_window, unsigned int new_value);
632-
void damon_update_region_access_rate(struct damon_region *r, bool accessed);
637+
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
638+
struct damon_attrs *attrs);
633639

634640
struct damos_filter *damos_new_filter(enum damos_filter_type type,
635641
bool matching);

mm/damon/core.c

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1599,14 +1599,28 @@ unsigned int damon_moving_sum(unsigned int mvsum, unsigned int nomvsum,
15991599
* damon_update_region_access_rate() - Update the access rate of a region.
16001600
* @r: The DAMON region to update for its access check result.
16011601
* @accessed: Whether the region has accessed during last sampling interval.
1602+
* @attrs: The damon_attrs of the DAMON context.
16021603
*
16031604
* Update the access rate of a region with the region's last sampling interval
16041605
* access check result.
16051606
*
16061607
* Usually this will be called by &damon_operations->check_accesses callback.
16071608
*/
1608-
void damon_update_region_access_rate(struct damon_region *r, bool accessed)
1609+
void damon_update_region_access_rate(struct damon_region *r, bool accessed,
1610+
struct damon_attrs *attrs)
16091611
{
1612+
unsigned int len_window = 1;
1613+
1614+
/*
1615+
* sample_interval can be zero, but cannot be larger than
1616+
* aggr_interval, owing to validation of damon_set_attrs().
1617+
*/
1618+
if (attrs->sample_interval)
1619+
len_window = attrs->aggr_interval / attrs->sample_interval;
1620+
r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
1621+
r->last_nr_accesses * 10000, len_window,
1622+
accessed ? 10000 : 0);
1623+
16101624
if (accessed)
16111625
r->nr_accesses++;
16121626
}

mm/damon/paddr.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,8 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
148148
return accessed;
149149
}
150150

151-
static void __damon_pa_check_access(struct damon_region *r)
151+
static void __damon_pa_check_access(struct damon_region *r,
152+
struct damon_attrs *attrs)
152153
{
153154
static unsigned long last_addr;
154155
static unsigned long last_folio_sz = PAGE_SIZE;
@@ -157,12 +158,12 @@ static void __damon_pa_check_access(struct damon_region *r)
157158
/* If the region is in the last checked page, reuse the result */
158159
if (ALIGN_DOWN(last_addr, last_folio_sz) ==
159160
ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
160-
damon_update_region_access_rate(r, last_accessed);
161+
damon_update_region_access_rate(r, last_accessed, attrs);
161162
return;
162163
}
163164

164165
last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
165-
damon_update_region_access_rate(r, last_accessed);
166+
damon_update_region_access_rate(r, last_accessed, attrs);
166167

167168
last_addr = r->sampling_addr;
168169
}
@@ -175,7 +176,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
175176

176177
damon_for_each_target(t, ctx) {
177178
damon_for_each_region(r, t) {
178-
__damon_pa_check_access(r);
179+
__damon_pa_check_access(r, &ctx->attrs);
179180
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
180181
}
181182
}

mm/damon/vaddr.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -558,26 +558,27 @@ static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
558558
* r the region to be checked
559559
*/
560560
static void __damon_va_check_access(struct mm_struct *mm,
561-
struct damon_region *r, bool same_target)
561+
struct damon_region *r, bool same_target,
562+
struct damon_attrs *attrs)
562563
{
563564
static unsigned long last_addr;
564565
static unsigned long last_folio_sz = PAGE_SIZE;
565566
static bool last_accessed;
566567

567568
if (!mm) {
568-
damon_update_region_access_rate(r, false);
569+
damon_update_region_access_rate(r, false, attrs);
569570
return;
570571
}
571572

572573
/* If the region is in the last checked page, reuse the result */
573574
if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
574575
ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
575-
damon_update_region_access_rate(r, last_accessed);
576+
damon_update_region_access_rate(r, last_accessed, attrs);
576577
return;
577578
}
578579

579580
last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
580-
damon_update_region_access_rate(r, last_accessed);
581+
damon_update_region_access_rate(r, last_accessed, attrs);
581582

582583
last_addr = r->sampling_addr;
583584
}
@@ -594,7 +595,8 @@ static unsigned int damon_va_check_accesses(struct damon_ctx *ctx)
594595
mm = damon_get_mm(t);
595596
same_target = false;
596597
damon_for_each_region(r, t) {
597-
__damon_va_check_access(mm, r, same_target);
598+
__damon_va_check_access(mm, r, same_target,
599+
&ctx->attrs);
598600
max_nr_accesses = max(r->nr_accesses, max_nr_accesses);
599601
same_target = true;
600602
}

0 commit comments

Comments
 (0)