@@ -211,72 +211,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
211
211
kobject_uevent (& disk_to_dev (lo -> lo_disk )-> kobj , KOBJ_CHANGE );
212
212
}
213
213
214
- static int lo_write_bvec (struct file * file , struct bio_vec * bvec , loff_t * ppos )
215
- {
216
- struct iov_iter i ;
217
- ssize_t bw ;
218
-
219
- iov_iter_bvec (& i , ITER_SOURCE , bvec , 1 , bvec -> bv_len );
220
-
221
- bw = vfs_iter_write (file , & i , ppos , 0 );
222
-
223
- if (likely (bw == bvec -> bv_len ))
224
- return 0 ;
225
-
226
- printk_ratelimited (KERN_ERR
227
- "loop: Write error at byte offset %llu, length %i.\n" ,
228
- (unsigned long long )* ppos , bvec -> bv_len );
229
- if (bw >= 0 )
230
- bw = - EIO ;
231
- return bw ;
232
- }
233
-
234
- static int lo_write_simple (struct loop_device * lo , struct request * rq ,
235
- loff_t pos )
236
- {
237
- struct bio_vec bvec ;
238
- struct req_iterator iter ;
239
- int ret = 0 ;
240
-
241
- rq_for_each_segment (bvec , rq , iter ) {
242
- ret = lo_write_bvec (lo -> lo_backing_file , & bvec , & pos );
243
- if (ret < 0 )
244
- break ;
245
- cond_resched ();
246
- }
247
-
248
- return ret ;
249
- }
250
-
251
- static int lo_read_simple (struct loop_device * lo , struct request * rq ,
252
- loff_t pos )
253
- {
254
- struct bio_vec bvec ;
255
- struct req_iterator iter ;
256
- struct iov_iter i ;
257
- ssize_t len ;
258
-
259
- rq_for_each_segment (bvec , rq , iter ) {
260
- iov_iter_bvec (& i , ITER_DEST , & bvec , 1 , bvec .bv_len );
261
- len = vfs_iter_read (lo -> lo_backing_file , & i , & pos , 0 );
262
- if (len < 0 )
263
- return len ;
264
-
265
- flush_dcache_page (bvec .bv_page );
266
-
267
- if (len != bvec .bv_len ) {
268
- struct bio * bio ;
269
-
270
- __rq_for_each_bio (bio , rq )
271
- zero_fill_bio (bio );
272
- break ;
273
- }
274
- cond_resched ();
275
- }
276
-
277
- return 0 ;
278
- }
279
-
280
214
static void loop_clear_limits (struct loop_device * lo , int mode )
281
215
{
282
216
struct queue_limits lim = queue_limits_start_update (lo -> lo_queue );
@@ -342,7 +276,7 @@ static void lo_complete_rq(struct request *rq)
342
276
struct loop_cmd * cmd = blk_mq_rq_to_pdu (rq );
343
277
blk_status_t ret = BLK_STS_OK ;
344
278
345
- if (! cmd -> use_aio || cmd -> ret < 0 || cmd -> ret == blk_rq_bytes (rq ) ||
279
+ if (cmd -> ret < 0 || cmd -> ret == blk_rq_bytes (rq ) ||
346
280
req_op (rq ) != REQ_OP_READ ) {
347
281
if (cmd -> ret < 0 )
348
282
ret = errno_to_blk_status (cmd -> ret );
@@ -358,14 +292,13 @@ static void lo_complete_rq(struct request *rq)
358
292
cmd -> ret = 0 ;
359
293
blk_mq_requeue_request (rq , true);
360
294
} else {
361
- if (cmd -> use_aio ) {
362
- struct bio * bio = rq -> bio ;
295
+ struct bio * bio = rq -> bio ;
363
296
364
- while (bio ) {
365
- zero_fill_bio (bio );
366
- bio = bio -> bi_next ;
367
- }
297
+ while (bio ) {
298
+ zero_fill_bio (bio );
299
+ bio = bio -> bi_next ;
368
300
}
301
+
369
302
ret = BLK_STS_IOERR ;
370
303
end_io :
371
304
blk_mq_end_request (rq , ret );
@@ -445,9 +378,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
445
378
446
379
cmd -> iocb .ki_pos = pos ;
447
380
cmd -> iocb .ki_filp = file ;
448
- cmd -> iocb .ki_complete = lo_rw_aio_complete ;
449
- cmd -> iocb .ki_flags = IOCB_DIRECT ;
450
381
cmd -> iocb .ki_ioprio = req_get_ioprio (rq );
382
+ if (cmd -> use_aio ) {
383
+ cmd -> iocb .ki_complete = lo_rw_aio_complete ;
384
+ cmd -> iocb .ki_flags = IOCB_DIRECT ;
385
+ } else {
386
+ cmd -> iocb .ki_complete = NULL ;
387
+ cmd -> iocb .ki_flags = 0 ;
388
+ }
451
389
452
390
if (rw == ITER_SOURCE )
453
391
ret = file -> f_op -> write_iter (& cmd -> iocb , & iter );
@@ -458,23 +396,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
458
396
459
397
if (ret != - EIOCBQUEUED )
460
398
lo_rw_aio_complete (& cmd -> iocb , ret );
461
- return 0 ;
399
+ return - EIOCBQUEUED ;
462
400
}
463
401
464
402
static int do_req_filebacked (struct loop_device * lo , struct request * rq )
465
403
{
466
404
struct loop_cmd * cmd = blk_mq_rq_to_pdu (rq );
467
405
loff_t pos = ((loff_t ) blk_rq_pos (rq ) << 9 ) + lo -> lo_offset ;
468
406
469
- /*
470
- * lo_write_simple and lo_read_simple should have been covered
471
- * by io submit style function like lo_rw_aio(), one blocker
472
- * is that lo_read_simple() need to call flush_dcache_page after
473
- * the page is written from kernel, and it isn't easy to handle
474
- * this in io submit style function which submits all segments
475
- * of the req at one time. And direct read IO doesn't need to
476
- * run flush_dcache_page().
477
- */
478
407
switch (req_op (rq )) {
479
408
case REQ_OP_FLUSH :
480
409
return lo_req_flush (lo , rq );
@@ -490,15 +419,9 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
490
419
case REQ_OP_DISCARD :
491
420
return lo_fallocate (lo , rq , pos , FALLOC_FL_PUNCH_HOLE );
492
421
case REQ_OP_WRITE :
493
- if (cmd -> use_aio )
494
- return lo_rw_aio (lo , cmd , pos , ITER_SOURCE );
495
- else
496
- return lo_write_simple (lo , rq , pos );
422
+ return lo_rw_aio (lo , cmd , pos , ITER_SOURCE );
497
423
case REQ_OP_READ :
498
- if (cmd -> use_aio )
499
- return lo_rw_aio (lo , cmd , pos , ITER_DEST );
500
- else
501
- return lo_read_simple (lo , rq , pos );
424
+ return lo_rw_aio (lo , cmd , pos , ITER_DEST );
502
425
default :
503
426
WARN_ON_ONCE (1 );
504
427
return - EIO ;
@@ -1922,7 +1845,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1922
1845
struct loop_device * lo = rq -> q -> queuedata ;
1923
1846
int ret = 0 ;
1924
1847
struct mem_cgroup * old_memcg = NULL ;
1925
- const bool use_aio = cmd -> use_aio ;
1926
1848
1927
1849
if (write && (lo -> lo_flags & LO_FLAGS_READ_ONLY )) {
1928
1850
ret = - EIO ;
@@ -1952,7 +1874,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
1952
1874
}
1953
1875
failed :
1954
1876
/* complete non-aio request */
1955
- if (! use_aio || ret ) {
1877
+ if (ret != - EIOCBQUEUED ) {
1956
1878
if (ret == - EOPNOTSUPP )
1957
1879
cmd -> ret = ret ;
1958
1880
else
0 commit comments