@@ -58,6 +58,7 @@ struct vmci_guest_device {
58
58
59
59
struct tasklet_struct datagram_tasklet ;
60
60
struct tasklet_struct bm_tasklet ;
61
+ struct wait_queue_head inout_wq ;
61
62
62
63
void * data_buffer ;
63
64
dma_addr_t data_buffer_base ;
@@ -115,6 +116,36 @@ static void vmci_write_reg(struct vmci_guest_device *dev, u32 val, u32 reg)
115
116
iowrite32 (val , dev -> iobase + reg );
116
117
}
117
118
119
+ static void vmci_read_data (struct vmci_guest_device * vmci_dev ,
120
+ void * dest , size_t size )
121
+ {
122
+ if (vmci_dev -> mmio_base == NULL )
123
+ ioread8_rep (vmci_dev -> iobase + VMCI_DATA_IN_ADDR ,
124
+ dest , size );
125
+ else {
126
+ /*
127
+ * For DMA datagrams, the data_buffer will contain the header on the
128
+ * first page, followed by the incoming datagram(s) on the following
129
+ * pages. The header uses an S/G element immediately following the
130
+ * header on the first page to point to the data area.
131
+ */
132
+ struct vmci_data_in_out_header * buffer_header = vmci_dev -> data_buffer ;
133
+ struct vmci_sg_elem * sg_array = (struct vmci_sg_elem * )(buffer_header + 1 );
134
+ size_t buffer_offset = dest - vmci_dev -> data_buffer ;
135
+
136
+ buffer_header -> opcode = 1 ;
137
+ buffer_header -> size = 1 ;
138
+ buffer_header -> busy = 0 ;
139
+ sg_array [0 ].addr = vmci_dev -> data_buffer_base + buffer_offset ;
140
+ sg_array [0 ].size = size ;
141
+
142
+ vmci_write_reg (vmci_dev , lower_32_bits (vmci_dev -> data_buffer_base ),
143
+ VMCI_DATA_IN_LOW_ADDR );
144
+
145
+ wait_event (vmci_dev -> inout_wq , buffer_header -> busy == 1 );
146
+ }
147
+ }
148
+
118
149
static int vmci_write_data (struct vmci_guest_device * dev ,
119
150
struct vmci_datagram * dg )
120
151
{
@@ -261,39 +292,59 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
261
292
}
262
293
263
294
/*
264
- * Reads datagrams from the data in port and dispatches them. We
265
- * always start reading datagrams into only the first page of the
266
- * datagram buffer. If the datagrams don't fit into one page, we
267
- * use the maximum datagram buffer size for the remainder of the
268
- * invocation. This is a simple heuristic for not penalizing
269
- * small datagrams.
295
+ * Reads datagrams from the device and dispatches them. For IO port
296
+ * based access to the device, we always start reading datagrams into
297
+ * only the first page of the datagram buffer. If the datagrams don't
298
+ * fit into one page, we use the maximum datagram buffer size for the
299
+ * remainder of the invocation. This is a simple heuristic for not
300
+ * penalizing small datagrams. For DMA-based datagrams, we always
301
+ * use the maximum datagram buffer size, since there is no performance
302
+ * penalty for doing so.
270
303
*
271
304
* This function assumes that it has exclusive access to the data
272
- * in port for the duration of the call.
305
+ * in register(s) for the duration of the call.
273
306
*/
274
307
static void vmci_dispatch_dgs (unsigned long data )
275
308
{
276
309
struct vmci_guest_device * vmci_dev = (struct vmci_guest_device * )data ;
277
310
u8 * dg_in_buffer = vmci_dev -> data_buffer ;
278
311
struct vmci_datagram * dg ;
279
312
size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE ;
280
- size_t current_dg_in_buffer_size = PAGE_SIZE ;
313
+ size_t current_dg_in_buffer_size ;
281
314
size_t remaining_bytes ;
315
+ bool is_io_port = vmci_dev -> mmio_base == NULL ;
282
316
283
317
BUILD_BUG_ON (VMCI_MAX_DG_SIZE < PAGE_SIZE );
284
318
285
- ioread8_rep (vmci_dev -> iobase + VMCI_DATA_IN_ADDR ,
286
- vmci_dev -> data_buffer , current_dg_in_buffer_size );
319
+ if (!is_io_port ) {
320
+ /* For mmio, the first page is used for the header. */
321
+ dg_in_buffer += PAGE_SIZE ;
322
+
323
+ /*
324
+ * For DMA-based datagram operations, there is no performance
325
+ * penalty for reading the maximum buffer size.
326
+ */
327
+ current_dg_in_buffer_size = VMCI_MAX_DG_SIZE ;
328
+ } else {
329
+ current_dg_in_buffer_size = PAGE_SIZE ;
330
+ }
331
+ vmci_read_data (vmci_dev , dg_in_buffer , current_dg_in_buffer_size );
287
332
dg = (struct vmci_datagram * )dg_in_buffer ;
288
333
remaining_bytes = current_dg_in_buffer_size ;
289
334
335
+ /*
336
+ * Read through the buffer until an invalid datagram header is
337
+ * encountered. The exit condition for datagrams read through
338
+ * VMCI_DATA_IN_ADDR is a bit more complicated, since a datagram
339
+ * can start on any page boundary in the buffer.
340
+ */
290
341
while (dg -> dst .resource != VMCI_INVALID_ID ||
291
- remaining_bytes > PAGE_SIZE ) {
342
+ ( is_io_port && remaining_bytes > PAGE_SIZE ) ) {
292
343
unsigned dg_in_size ;
293
344
294
345
/*
295
- * When the input buffer spans multiple pages, a datagram can
296
- * start on any page boundary in the buffer .
346
+ * If using VMCI_DATA_IN_ADDR, skip to the next page
347
+ * as a datagram can start on any page boundary.
297
348
*/
298
349
if (dg -> dst .resource == VMCI_INVALID_ID ) {
299
350
dg = (struct vmci_datagram * )roundup (
@@ -343,11 +394,10 @@ static void vmci_dispatch_dgs(unsigned long data)
343
394
current_dg_in_buffer_size =
344
395
dg_in_buffer_size ;
345
396
346
- ioread8_rep (vmci_dev -> iobase +
347
- VMCI_DATA_IN_ADDR ,
348
- vmci_dev -> data_buffer +
397
+ vmci_read_data (vmci_dev ,
398
+ dg_in_buffer +
349
399
remaining_bytes ,
350
- current_dg_in_buffer_size -
400
+ current_dg_in_buffer_size -
351
401
remaining_bytes );
352
402
}
353
403
@@ -385,10 +435,8 @@ static void vmci_dispatch_dgs(unsigned long data)
385
435
current_dg_in_buffer_size = dg_in_buffer_size ;
386
436
387
437
for (;;) {
388
- ioread8_rep (vmci_dev -> iobase +
389
- VMCI_DATA_IN_ADDR ,
390
- vmci_dev -> data_buffer ,
391
- current_dg_in_buffer_size );
438
+ vmci_read_data (vmci_dev , dg_in_buffer ,
439
+ current_dg_in_buffer_size );
392
440
if (bytes_to_skip <= current_dg_in_buffer_size )
393
441
break ;
394
442
@@ -405,8 +453,7 @@ static void vmci_dispatch_dgs(unsigned long data)
405
453
if (remaining_bytes < VMCI_DG_HEADERSIZE ) {
406
454
/* Get the next batch of datagrams. */
407
455
408
- ioread8_rep (vmci_dev -> iobase + VMCI_DATA_IN_ADDR ,
409
- vmci_dev -> data_buffer ,
456
+ vmci_read_data (vmci_dev , dg_in_buffer ,
410
457
current_dg_in_buffer_size );
411
458
dg = (struct vmci_datagram * )dg_in_buffer ;
412
459
remaining_bytes = current_dg_in_buffer_size ;
@@ -464,8 +511,11 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
464
511
icr &= ~VMCI_ICR_NOTIFICATION ;
465
512
}
466
513
467
- if (icr & VMCI_ICR_DMA_DATAGRAM )
514
+
515
+ if (icr & VMCI_ICR_DMA_DATAGRAM ) {
516
+ wake_up_all (& dev -> inout_wq );
468
517
icr &= ~VMCI_ICR_DMA_DATAGRAM ;
518
+ }
469
519
470
520
if (icr != 0 )
471
521
dev_warn (dev -> dev ,
@@ -498,6 +548,10 @@ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
498
548
*/
499
549
static irqreturn_t vmci_interrupt_dma_datagram (int irq , void * _dev )
500
550
{
551
+ struct vmci_guest_device * dev = _dev ;
552
+
553
+ wake_up_all (& dev -> inout_wq );
554
+
501
555
return IRQ_HANDLED ;
502
556
}
503
557
@@ -584,6 +638,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
584
638
vmci_dispatch_dgs , (unsigned long )vmci_dev );
585
639
tasklet_init (& vmci_dev -> bm_tasklet ,
586
640
vmci_process_bitmap , (unsigned long )vmci_dev );
641
+ init_waitqueue_head (& vmci_dev -> inout_wq );
587
642
588
643
if (mmio_base != NULL ) {
589
644
vmci_dev -> tx_buffer = dma_alloc_coherent (& pdev -> dev , VMCI_DMA_DG_BUFFER_SIZE ,
0 commit comments