Skip to content

Commit 68ad474

Browse files
davidhildenbrandAlexander Gordeev
authored andcommitted
s390/uv: gmap_make_secure() cleanups for further changes
Let's factor out handling of LRU cache draining and convert the if-else chain to a switch-case. Reviewed-by: Claudio Imbrenda <[email protected]> Signed-off-by: David Hildenbrand <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Heiko Carstens <[email protected]> Signed-off-by: Alexander Gordeev <[email protected]>
1 parent 3f29f65 commit 68ad474

File tree

1 file changed

+40
-26
lines changed
  • arch/s390/kernel

1 file changed

+40
-26
lines changed

arch/s390/kernel/uv.c

Lines changed: 40 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -266,6 +266,36 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
266266
return atomic_read(&mm->context.protected_count) > 1;
267267
}
268268

269+
/*
270+
* Drain LRU caches: the local one on first invocation and the ones of all
271+
* CPUs on successive invocations. Returns "true" on the first invocation.
272+
*/
273+
static bool drain_lru(bool *drain_lru_called)
274+
{
275+
/*
276+
* If we have tried a local drain and the folio refcount
277+
* still does not match our expected safe value, try with a
278+
* system wide drain. This is needed if the pagevecs holding
279+
* the page are on a different CPU.
280+
*/
281+
if (*drain_lru_called) {
282+
lru_add_drain_all();
283+
/* We give up here, don't retry immediately. */
284+
return false;
285+
}
286+
/*
287+
* We are here if the folio refcount does not match the
288+
* expected safe value. The main culprits are usually
289+
* pagevecs. With lru_add_drain() we drain the pagevecs
290+
* on the local CPU so that hopefully the refcount will
291+
* reach the expected safe value.
292+
*/
293+
lru_add_drain();
294+
*drain_lru_called = true;
295+
/* The caller should try again immediately */
296+
return true;
297+
}
298+
269299
/*
270300
* Requests the Ultravisor to make a page accessible to a guest.
271301
* If it's brought in the first time, it will be cleared. If
@@ -275,7 +305,7 @@ static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_str
275305
int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
276306
{
277307
struct vm_area_struct *vma;
278-
bool local_drain = false;
308+
bool drain_lru_called = false;
279309
spinlock_t *ptelock;
280310
unsigned long uaddr;
281311
struct folio *folio;
@@ -331,37 +361,21 @@ int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
331361
out:
332362
mmap_read_unlock(gmap->mm);
333363

334-
if (rc == -EAGAIN) {
364+
switch (rc) {
365+
case -EAGAIN:
335366
/*
336367
* If we are here because the UVC returned busy or partial
337368
* completion, this is just a useless check, but it is safe.
338369
*/
339370
folio_wait_writeback(folio);
340371
folio_put(folio);
341-
} else if (rc == -EBUSY) {
342-
/*
343-
* If we have tried a local drain and the folio refcount
344-
* still does not match our expected safe value, try with a
345-
* system wide drain. This is needed if the pagevecs holding
346-
* the page are on a different CPU.
347-
*/
348-
if (local_drain) {
349-
lru_add_drain_all();
350-
/* We give up here, and let the caller try again */
351-
return -EAGAIN;
352-
}
353-
/*
354-
* We are here if the folio refcount does not match the
355-
* expected safe value. The main culprits are usually
356-
* pagevecs. With lru_add_drain() we drain the pagevecs
357-
* on the local CPU so that hopefully the refcount will
358-
* reach the expected safe value.
359-
*/
360-
lru_add_drain();
361-
local_drain = true;
362-
/* And now we try again immediately after draining */
363-
goto again;
364-
} else if (rc == -ENXIO) {
372+
return -EAGAIN;
373+
case -EBUSY:
374+
/* Additional folio references. */
375+
if (drain_lru(&drain_lru_called))
376+
goto again;
377+
return -EAGAIN;
378+
case -ENXIO:
365379
if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
366380
return -EFAULT;
367381
return -EAGAIN;

0 commit comments

Comments
 (0)