@@ -762,7 +762,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
762
762
size_t len = iov_iter_count (iter );
763
763
unsigned int nbytes = 0 ;
764
764
struct page * page ;
765
- int i ;
765
+ int i , ret ;
766
766
767
767
if (cmd -> tvc_data_direction == DMA_FROM_DEVICE ) {
768
768
cmd -> saved_iter_addr = dup_iter (& cmd -> saved_iter , iter ,
@@ -775,15 +775,18 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
775
775
page = alloc_page (GFP_KERNEL );
776
776
if (!page ) {
777
777
i -- ;
778
+ ret = - ENOMEM ;
778
779
goto err ;
779
780
}
780
781
781
782
nbytes = min_t (unsigned int , PAGE_SIZE , len );
782
783
sg_set_page (& sg [i ], page , nbytes , 0 );
783
784
784
785
if (cmd -> tvc_data_direction == DMA_TO_DEVICE &&
785
- copy_page_from_iter (page , 0 , nbytes , iter ) != nbytes )
786
+ copy_page_from_iter (page , 0 , nbytes , iter ) != nbytes ) {
787
+ ret = - EFAULT ;
786
788
goto err ;
789
+ }
787
790
788
791
len -= nbytes ;
789
792
}
@@ -798,7 +801,7 @@ vhost_scsi_copy_iov_to_sgl(struct vhost_scsi_cmd *cmd, struct iov_iter *iter,
798
801
for (; i >= 0 ; i -- )
799
802
__free_page (sg_page (& sg [i ]));
800
803
kfree (cmd -> saved_iter_addr );
801
- return - ENOMEM ;
804
+ return ret ;
802
805
}
803
806
804
807
static int
@@ -1282,9 +1285,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1282
1285
" %d\n" , cmd , exp_data_len , prot_bytes , data_direction );
1283
1286
1284
1287
if (data_direction != DMA_NONE ) {
1285
- if ( unlikely ( vhost_scsi_mapal (cmd , prot_bytes ,
1286
- & prot_iter , exp_data_len ,
1287
- & data_iter ) )) {
1288
+ ret = vhost_scsi_mapal (cmd , prot_bytes , & prot_iter ,
1289
+ exp_data_len , & data_iter );
1290
+ if ( unlikely ( ret )) {
1288
1291
vq_err (vq , "Failed to map iov to sgl\n" );
1289
1292
vhost_scsi_release_cmd_res (& cmd -> tvc_se_cmd );
1290
1293
goto err ;
0 commit comments