Skip to content

Commit bc40d73

Browse files
Nick Piggintorvalds
authored andcommitted
splice: use get_user_pages_fast
Use get_user_pages_fast in splice. This reverts some mmap_sem batching there, however the biggest problem with mmap_sem tends to be hold times blocking out other threads rather than cacheline bouncing. Further: on architectures that implement get_user_pages_fast without locks, mmap_sem can be avoided completely anyway. Signed-off-by: Nick Piggin <[email protected]> Cc: Dave Kleikamp <[email protected]> Cc: Andy Whitcroft <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Andi Kleen <[email protected]> Cc: Dave Kleikamp <[email protected]> Cc: Badari Pulavarty <[email protected]> Cc: Zach Brown <[email protected]> Cc: Jens Axboe <[email protected]> Reviewed-by: Peter Zijlstra <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent f5dd33c commit bc40d73

File tree

1 file changed

+3
-38
lines changed

1 file changed

+3
-38
lines changed

fs/splice.c

Lines changed: 3 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1160,36 +1160,6 @@ static long do_splice(struct file *in, loff_t __user *off_in,
11601160
return -EINVAL;
11611161
}
11621162

1163-
/*
1164-
* Do a copy-from-user while holding the mmap_semaphore for reading, in a
1165-
* manner safe from deadlocking with simultaneous mmap() (grabbing mmap_sem
1166-
* for writing) and page faulting on the user memory pointed to by src.
1167-
* This assumes that we will very rarely hit the partial != 0 path, or this
1168-
* will not be a win.
1169-
*/
1170-
static int copy_from_user_mmap_sem(void *dst, const void __user *src, size_t n)
1171-
{
1172-
int partial;
1173-
1174-
if (!access_ok(VERIFY_READ, src, n))
1175-
return -EFAULT;
1176-
1177-
pagefault_disable();
1178-
partial = __copy_from_user_inatomic(dst, src, n);
1179-
pagefault_enable();
1180-
1181-
/*
1182-
* Didn't copy everything, drop the mmap_sem and do a faulting copy
1183-
*/
1184-
if (unlikely(partial)) {
1185-
up_read(&current->mm->mmap_sem);
1186-
partial = copy_from_user(dst, src, n);
1187-
down_read(&current->mm->mmap_sem);
1188-
}
1189-
1190-
return partial;
1191-
}
1192-
11931163
/*
11941164
* Map an iov into an array of pages and offset/length tupples. With the
11951165
* partial_page structure, we can map several non-contiguous ranges into
@@ -1203,8 +1173,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
12031173
{
12041174
int buffers = 0, error = 0;
12051175

1206-
down_read(&current->mm->mmap_sem);
1207-
12081176
while (nr_vecs) {
12091177
unsigned long off, npages;
12101178
struct iovec entry;
@@ -1213,7 +1181,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
12131181
int i;
12141182

12151183
error = -EFAULT;
1216-
if (copy_from_user_mmap_sem(&entry, iov, sizeof(entry)))
1184+
if (copy_from_user(&entry, iov, sizeof(entry)))
12171185
break;
12181186

12191187
base = entry.iov_base;
@@ -1247,9 +1215,8 @@ static int get_iovec_page_array(const struct iovec __user *iov,
12471215
if (npages > PIPE_BUFFERS - buffers)
12481216
npages = PIPE_BUFFERS - buffers;
12491217

1250-
error = get_user_pages(current, current->mm,
1251-
(unsigned long) base, npages, 0, 0,
1252-
&pages[buffers], NULL);
1218+
error = get_user_pages_fast((unsigned long)base, npages,
1219+
0, &pages[buffers]);
12531220

12541221
if (unlikely(error <= 0))
12551222
break;
@@ -1288,8 +1255,6 @@ static int get_iovec_page_array(const struct iovec __user *iov,
12881255
iov++;
12891256
}
12901257

1291-
up_read(&current->mm->mmap_sem);
1292-
12931258
if (buffers)
12941259
return buffers;
12951260

0 commit comments

Comments
 (0)