Skip to content

Commit ef932cd

Browse files
sergey-senozhatskyakpm00
authored andcommitted
zram: factor out ZRAM_HUGE write
zram_write_page() handles: ZRAM_SAME pages (which was already factored out) stores, regular page stores and ZRAM_HUGE pages stores. ZRAM_HUGE handling adds a significant amount of complexity. Instead, we can handle ZRAM_HUGE in a separate function. This allows us to simplify zs_handle allocations slow-path, as it now does not handle ZRAM_HUGE case. ZRAM_HUGE zs_handle allocation, on the other hand, can now drop __GFP_KSWAPD_RECLAIM because we handle ZRAM_HUGE in preemptible context (outside of local-lock scope). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Sergey Senozhatsky <[email protected]> Cc: Minchan Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent a5cd78a commit ef932cd

File tree

1 file changed

+83
-53
lines changed

1 file changed

+83
-53
lines changed

drivers/block/zram/zram_drv.c

Lines changed: 83 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -132,6 +132,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index)
132132
zram_test_flag(zram, index, ZRAM_WB);
133133
}
134134

135+
static inline void update_used_max(struct zram *zram, const unsigned long pages)
136+
{
137+
unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
138+
139+
do {
140+
if (cur_max >= pages)
141+
return;
142+
} while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
143+
&cur_max, pages));
144+
}
145+
146+
static bool zram_can_store_page(struct zram *zram)
147+
{
148+
unsigned long alloced_pages;
149+
150+
alloced_pages = zs_get_total_pages(zram->mem_pool);
151+
update_used_max(zram, alloced_pages);
152+
153+
return !zram->limit_pages || alloced_pages <= zram->limit_pages;
154+
}
155+
135156
#if PAGE_SIZE != 4096
136157
static inline bool is_partial_io(struct bio_vec *bvec)
137158
{
@@ -266,18 +287,6 @@ static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl)
266287
}
267288
#endif
268289

269-
static inline void update_used_max(struct zram *zram,
270-
const unsigned long pages)
271-
{
272-
unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages);
273-
274-
do {
275-
if (cur_max >= pages)
276-
return;
277-
} while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages,
278-
&cur_max, pages));
279-
}
280-
281290
static inline void zram_fill_page(void *ptr, unsigned long len,
282291
unsigned long value)
283292
{
@@ -1639,13 +1648,54 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill,
16391648
return 0;
16401649
}
16411650

1651+
static int write_incompressible_page(struct zram *zram, struct page *page,
1652+
u32 index)
1653+
{
1654+
unsigned long handle;
1655+
void *src, *dst;
1656+
1657+
/*
1658+
* This function is called from preemptible context so we don't need
1659+
* to do optimistic and fallback to pessimistic handle allocation,
1660+
* like we do for compressible pages.
1661+
*/
1662+
handle = zs_malloc(zram->mem_pool, PAGE_SIZE,
1663+
GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE);
1664+
if (IS_ERR_VALUE(handle))
1665+
return PTR_ERR((void *)handle);
1666+
1667+
if (!zram_can_store_page(zram)) {
1668+
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1669+
zs_free(zram->mem_pool, handle);
1670+
return -ENOMEM;
1671+
}
1672+
1673+
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
1674+
src = kmap_local_page(page);
1675+
memcpy(dst, src, PAGE_SIZE);
1676+
kunmap_local(src);
1677+
zs_unmap_object(zram->mem_pool, handle);
1678+
1679+
zram_slot_lock(zram, index);
1680+
zram_set_flag(zram, index, ZRAM_HUGE);
1681+
zram_set_handle(zram, index, handle);
1682+
zram_set_obj_size(zram, index, PAGE_SIZE);
1683+
zram_slot_unlock(zram, index);
1684+
1685+
atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size);
1686+
atomic64_inc(&zram->stats.huge_pages);
1687+
atomic64_inc(&zram->stats.huge_pages_since);
1688+
atomic64_inc(&zram->stats.pages_stored);
1689+
1690+
return 0;
1691+
}
1692+
16421693
static int zram_write_page(struct zram *zram, struct page *page, u32 index)
16431694
{
16441695
int ret = 0;
1645-
unsigned long alloced_pages;
16461696
unsigned long handle = -ENOMEM;
16471697
unsigned int comp_len = 0;
1648-
void *src, *dst, *mem;
1698+
void *dst, *mem;
16491699
struct zcomp_strm *zstrm;
16501700
unsigned long element = 0;
16511701
bool same_filled;
@@ -1663,10 +1713,10 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
16631713

16641714
compress_again:
16651715
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1666-
src = kmap_local_page(page);
1716+
mem = kmap_local_page(page);
16671717
ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm,
1668-
src, &comp_len);
1669-
kunmap_local(src);
1718+
mem, &comp_len);
1719+
kunmap_local(mem);
16701720

16711721
if (unlikely(ret)) {
16721722
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
@@ -1675,8 +1725,11 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
16751725
return ret;
16761726
}
16771727

1678-
if (comp_len >= huge_class_size)
1679-
comp_len = PAGE_SIZE;
1728+
if (comp_len >= huge_class_size) {
1729+
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
1730+
return write_incompressible_page(zram, page, index);
1731+
}
1732+
16801733
/*
16811734
* handle allocation has 2 paths:
16821735
* a) fast path is executed with preemption disabled (for
@@ -1692,66 +1745,43 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index)
16921745
*/
16931746
if (IS_ERR_VALUE(handle))
16941747
handle = zs_malloc(zram->mem_pool, comp_len,
1695-
__GFP_KSWAPD_RECLAIM |
1696-
__GFP_NOWARN |
1697-
__GFP_HIGHMEM |
1698-
__GFP_MOVABLE);
1748+
__GFP_KSWAPD_RECLAIM |
1749+
__GFP_NOWARN |
1750+
__GFP_HIGHMEM |
1751+
__GFP_MOVABLE);
16991752
if (IS_ERR_VALUE(handle)) {
17001753
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
17011754
atomic64_inc(&zram->stats.writestall);
17021755
handle = zs_malloc(zram->mem_pool, comp_len,
1703-
GFP_NOIO | __GFP_HIGHMEM |
1704-
__GFP_MOVABLE);
1756+
GFP_NOIO | __GFP_HIGHMEM |
1757+
__GFP_MOVABLE);
17051758
if (IS_ERR_VALUE(handle))
17061759
return PTR_ERR((void *)handle);
17071760

1708-
if (comp_len != PAGE_SIZE)
1709-
goto compress_again;
1710-
/*
1711-
* If the page is not compressible, you need to acquire the
1712-
* lock and execute the code below. The zcomp_stream_get()
1713-
* call is needed to disable the cpu hotplug and grab the
1714-
* zstrm buffer back. It is necessary that the dereferencing
1715-
* of the zstrm variable below occurs correctly.
1716-
*/
1717-
zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]);
1761+
goto compress_again;
17181762
}
17191763

1720-
alloced_pages = zs_get_total_pages(zram->mem_pool);
1721-
update_used_max(zram, alloced_pages);
1722-
1723-
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
1764+
if (!zram_can_store_page(zram)) {
17241765
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
17251766
zs_free(zram->mem_pool, handle);
17261767
return -ENOMEM;
17271768
}
17281769

17291770
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
17301771

1731-
src = zstrm->buffer;
1732-
if (comp_len == PAGE_SIZE)
1733-
src = kmap_local_page(page);
1734-
memcpy(dst, src, comp_len);
1735-
if (comp_len == PAGE_SIZE)
1736-
kunmap_local(src);
1737-
1772+
memcpy(dst, zstrm->buffer, comp_len);
17381773
zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]);
17391774
zs_unmap_object(zram->mem_pool, handle);
1740-
atomic64_add(comp_len, &zram->stats.compr_data_size);
17411775

17421776
zram_slot_lock(zram, index);
1743-
if (comp_len == PAGE_SIZE) {
1744-
zram_set_flag(zram, index, ZRAM_HUGE);
1745-
atomic64_inc(&zram->stats.huge_pages);
1746-
atomic64_inc(&zram->stats.huge_pages_since);
1747-
}
1748-
17491777
zram_set_handle(zram, index, handle);
17501778
zram_set_obj_size(zram, index, comp_len);
17511779
zram_slot_unlock(zram, index);
17521780

17531781
/* Update stats */
17541782
atomic64_inc(&zram->stats.pages_stored);
1783+
atomic64_add(comp_len, &zram->stats.compr_data_size);
1784+
17551785
return ret;
17561786
}
17571787

0 commit comments

Comments
 (0)