Skip to content

Commit f7c6a7b

Browse files
author
Roland Dreier
committed
IB/uverbs: Export ib_umem_get()/ib_umem_release() to modules
Export ib_umem_get()/ib_umem_release() and put low-level drivers in control of when to call ib_umem_get() to pin and DMA map userspace, rather than always calling it in ib_uverbs_reg_mr() before calling the low-level driver's reg_user_mr method. Also move these functions to be in the ib_core module instead of ib_uverbs, so that driver modules using them do not depend on ib_uverbs. This has a number of advantages: - It is better design from the standpoint of making generic code a library that can be used or overridden by device-specific code as the details of specific devices dictate. - Drivers that do not need to pin userspace memory regions do not need to take the performance hit of calling ib_mem_get(). For example, although I have not tried to implement it in this patch, the ipath driver should be able to avoid pinning memory and just use copy_{to,from}_user() to access userspace memory regions. - Buffers that need special mapping treatment can be identified by the low-level driver. For example, it may be possible to solve some Altix-specific memory ordering issues with mthca CQs in userspace by mapping CQ buffers with extra flags. - Drivers that need to pin and DMA map userspace memory for things other than memory regions can use ib_umem_get() directly, instead of hacks using extra parameters to their reg_phys_mr method. For example, the mlx4 driver that is pending being merged needs to pin and DMA map QP and CQ buffers, but it does not need to create a memory key for these buffers. So the cleanest solution is for mlx4 to call ib_umem_get() in the create_qp and create_cq methods. Signed-off-by: Roland Dreier <[email protected]>
1 parent 36f021b commit f7c6a7b

File tree

20 files changed

+355
-202
lines changed

20 files changed

+355
-202
lines changed

drivers/infiniband/Kconfig

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,11 @@ config INFINIBAND_USER_ACCESS
2929
libibverbs, libibcm and a hardware driver library from
3030
<http://www.openib.org>.
3131

32+
config INFINIBAND_USER_MEM
33+
bool
34+
depends on INFINIBAND_USER_ACCESS != n
35+
default y
36+
3237
config INFINIBAND_ADDR_TRANS
3338
bool
3439
depends on INFINIBAND && INET

drivers/infiniband/core/Makefile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
99

1010
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
1111
device.o fmr_pool.o cache.o
12+
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
1213

1314
ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
1415

@@ -28,5 +29,4 @@ ib_umad-y := user_mad.o
2829

2930
ib_ucm-y := ucm.o
3031

31-
ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_mem.o \
32-
uverbs_marshall.o
32+
ib_uverbs-y := uverbs_main.o uverbs_cmd.o uverbs_marshall.o

drivers/infiniband/core/device.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -613,6 +613,8 @@ static void __exit ib_core_cleanup(void)
613613
{
614614
ib_cache_cleanup();
615615
ib_sysfs_cleanup();
616+
/* Make sure that any pending umem accounting work is done. */
617+
flush_scheduled_work();
616618
}
617619

618620
module_init(ib_core_init);

drivers/infiniband/core/uverbs_mem.c renamed to drivers/infiniband/core/umem.c

Lines changed: 92 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -64,35 +64,56 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
6464
}
6565
}
6666

67-
int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
68-
void *addr, size_t size, int write)
67+
/**
68+
* ib_umem_get - Pin and DMA map userspace memory.
69+
* @context: userspace context to pin memory for
70+
* @addr: userspace virtual address to start at
71+
* @size: length of region to pin
72+
* @access: IB_ACCESS_xxx flags for memory being pinned
73+
*/
74+
struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
75+
size_t size, int access)
6976
{
77+
struct ib_umem *umem;
7078
struct page **page_list;
7179
struct ib_umem_chunk *chunk;
7280
unsigned long locked;
7381
unsigned long lock_limit;
7482
unsigned long cur_base;
7583
unsigned long npages;
76-
int ret = 0;
84+
int ret;
7785
int off;
7886
int i;
7987

8088
if (!can_do_mlock())
81-
return -EPERM;
89+
return ERR_PTR(-EPERM);
8290

83-
page_list = (struct page **) __get_free_page(GFP_KERNEL);
84-
if (!page_list)
85-
return -ENOMEM;
91+
umem = kmalloc(sizeof *umem, GFP_KERNEL);
92+
if (!umem)
93+
return ERR_PTR(-ENOMEM);
8694

87-
mem->user_base = (unsigned long) addr;
88-
mem->length = size;
89-
mem->offset = (unsigned long) addr & ~PAGE_MASK;
90-
mem->page_size = PAGE_SIZE;
91-
mem->writable = write;
95+
umem->context = context;
96+
umem->length = size;
97+
umem->offset = addr & ~PAGE_MASK;
98+
umem->page_size = PAGE_SIZE;
99+
/*
100+
* We ask for writable memory if any access flags other than
101+
* "remote read" are set. "Local write" and "remote write"
102+
* obviously require write access. "Remote atomic" can do
103+
* things like fetch and add, which will modify memory, and
104+
* "MW bind" can change permissions by binding a window.
105+
*/
106+
umem->writable = !!(access & ~IB_ACCESS_REMOTE_READ);
92107

93-
INIT_LIST_HEAD(&mem->chunk_list);
108+
INIT_LIST_HEAD(&umem->chunk_list);
109+
110+
page_list = (struct page **) __get_free_page(GFP_KERNEL);
111+
if (!page_list) {
112+
kfree(umem);
113+
return ERR_PTR(-ENOMEM);
114+
}
94115

95-
npages = PAGE_ALIGN(size + mem->offset) >> PAGE_SHIFT;
116+
npages = PAGE_ALIGN(size + umem->offset) >> PAGE_SHIFT;
96117

97118
down_write(&current->mm->mmap_sem);
98119

@@ -104,13 +125,13 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
104125
goto out;
105126
}
106127

107-
cur_base = (unsigned long) addr & PAGE_MASK;
128+
cur_base = addr & PAGE_MASK;
108129

109130
while (npages) {
110131
ret = get_user_pages(current, current->mm, cur_base,
111132
min_t(int, npages,
112133
PAGE_SIZE / sizeof (struct page *)),
113-
1, !write, page_list, NULL);
134+
1, !umem->writable, page_list, NULL);
114135

115136
if (ret < 0)
116137
goto out;
@@ -136,7 +157,7 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136157
chunk->page_list[i].length = PAGE_SIZE;
137158
}
138159

139-
chunk->nmap = ib_dma_map_sg(dev,
160+
chunk->nmap = ib_dma_map_sg(context->device,
140161
&chunk->page_list[0],
141162
chunk->nents,
142163
DMA_BIDIRECTIONAL);
@@ -151,33 +172,25 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
151172

152173
ret -= chunk->nents;
153174
off += chunk->nents;
154-
list_add_tail(&chunk->list, &mem->chunk_list);
175+
list_add_tail(&chunk->list, &umem->chunk_list);
155176
}
156177

157178
ret = 0;
158179
}
159180

160181
out:
161-
if (ret < 0)
162-
__ib_umem_release(dev, mem, 0);
163-
else
182+
if (ret < 0) {
183+
__ib_umem_release(context->device, umem, 0);
184+
kfree(umem);
185+
} else
164186
current->mm->locked_vm = locked;
165187

166188
up_write(&current->mm->mmap_sem);
167189
free_page((unsigned long) page_list);
168190

169-
return ret;
170-
}
171-
172-
void ib_umem_release(struct ib_device *dev, struct ib_umem *umem)
173-
{
174-
__ib_umem_release(dev, umem, 1);
175-
176-
down_write(&current->mm->mmap_sem);
177-
current->mm->locked_vm -=
178-
PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
179-
up_write(&current->mm->mmap_sem);
191+
return ret < 0 ? ERR_PTR(ret) : umem;
180192
}
193+
EXPORT_SYMBOL(ib_umem_get);
181194

182195
static void ib_umem_account(struct work_struct *_work)
183196
{
@@ -191,35 +204,70 @@ static void ib_umem_account(struct work_struct *_work)
191204
kfree(work);
192205
}
193206

194-
void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem)
207+
/**
208+
* ib_umem_release - release memory pinned with ib_umem_get
209+
* @umem: umem struct to release
210+
*/
211+
void ib_umem_release(struct ib_umem *umem)
195212
{
196213
struct ib_umem_account_work *work;
214+
struct ib_ucontext *context = umem->context;
197215
struct mm_struct *mm;
216+
unsigned long diff;
198217

199-
__ib_umem_release(dev, umem, 1);
218+
__ib_umem_release(umem->context->device, umem, 1);
200219

201220
mm = get_task_mm(current);
202221
if (!mm)
203222
return;
204223

224+
diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
225+
kfree(umem);
226+
205227
/*
206228
* We may be called with the mm's mmap_sem already held. This
207229
* can happen when a userspace munmap() is the call that drops
208230
* the last reference to our file and calls our release
209231
* method. If there are memory regions to destroy, we'll end
210-
* up here and not be able to take the mmap_sem. Therefore we
211-
* defer the vm_locked accounting to the system workqueue.
232+
* up here and not be able to take the mmap_sem. In that case
233+
* we defer the vm_locked accounting to the system workqueue.
212234
*/
235+
if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
236+
work = kmalloc(sizeof *work, GFP_KERNEL);
237+
if (!work) {
238+
mmput(mm);
239+
return;
240+
}
213241

214-
work = kmalloc(sizeof *work, GFP_KERNEL);
215-
if (!work) {
216-
mmput(mm);
242+
INIT_WORK(&work->work, ib_umem_account);
243+
work->mm = mm;
244+
work->diff = diff;
245+
246+
schedule_work(&work->work);
217247
return;
218-
}
248+
} else
249+
down_write(&mm->mmap_sem);
250+
251+
current->mm->locked_vm -= diff;
252+
up_write(&mm->mmap_sem);
253+
mmput(mm);
254+
}
255+
EXPORT_SYMBOL(ib_umem_release);
256+
257+
int ib_umem_page_count(struct ib_umem *umem)
258+
{
259+
struct ib_umem_chunk *chunk;
260+
int shift;
261+
int i;
262+
int n;
263+
264+
shift = ilog2(umem->page_size);
219265

220-
INIT_WORK(&work->work, ib_umem_account);
221-
work->mm = mm;
222-
work->diff = PAGE_ALIGN(umem->length + umem->offset) >> PAGE_SHIFT;
266+
n = 0;
267+
list_for_each_entry(chunk, &umem->chunk_list, list)
268+
for (i = 0; i < chunk->nmap; ++i)
269+
n += sg_dma_len(&chunk->page_list[i]) >> shift;
223270

224-
schedule_work(&work->work);
271+
return n;
225272
}
273+
EXPORT_SYMBOL(ib_umem_page_count);

drivers/infiniband/core/uverbs.h

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
#include <linux/completion.h>
4646

4747
#include <rdma/ib_verbs.h>
48+
#include <rdma/ib_umem.h>
4849
#include <rdma/ib_user_verbs.h>
4950

5051
/*
@@ -163,11 +164,6 @@ void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr);
163164
void ib_uverbs_event_handler(struct ib_event_handler *handler,
164165
struct ib_event *event);
165166

166-
int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
167-
void *addr, size_t size, int write);
168-
void ib_umem_release(struct ib_device *dev, struct ib_umem *umem);
169-
void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem);
170-
171167
#define IB_UVERBS_DECLARE_CMD(name) \
172168
ssize_t ib_uverbs_##name(struct ib_uverbs_file *file, \
173169
const char __user *buf, int in_len, \

0 commit comments

Comments
 (0)