@@ -32,17 +32,6 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
3232#define IORING_MAX_FIXED_FILES (1U << 20)
3333#define IORING_MAX_REG_BUFFERS (1U << 14)
3434
35- static const struct io_mapped_ubuf dummy_ubuf = {
36- /* set invalid range, so io_import_fixed() fails meeting it */
37- .ubuf = -1UL ,
38- .len = UINT_MAX ,
39- };
40-
41- const struct io_rsrc_node empty_node = {
42- .type = IORING_RSRC_BUFFER ,
43- .buf = (struct io_mapped_ubuf * ) & dummy_ubuf ,
44- };
45-
4635int __io_account_mem (struct user_struct * user , unsigned long nr_pages )
4736{
4837 unsigned long page_limit , cur_pages , new_pages ;
@@ -116,7 +105,7 @@ static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
116105{
117106 unsigned int i ;
118107
119- if (node -> buf != & dummy_ubuf ) {
108+ if (node -> buf ) {
120109 struct io_mapped_ubuf * imu = node -> buf ;
121110
122111 if (!refcount_dec_and_test (& imu -> refs ))
@@ -265,20 +254,21 @@ static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
265254 err = io_buffer_validate (iov );
266255 if (err )
267256 break ;
268- if (!iov -> iov_base && tag ) {
269- err = - EINVAL ;
270- break ;
271- }
272257 node = io_sqe_buffer_register (ctx , iov , & last_hpage );
273258 if (IS_ERR (node )) {
274259 err = PTR_ERR (node );
275260 break ;
276261 }
262+ if (tag ) {
263+ if (!node ) {
264+ err = - EINVAL ;
265+ break ;
266+ }
267+ node -> tag = tag ;
268+ }
277269 i = array_index_nospec (up -> offset + done , ctx -> buf_table .nr );
278270 io_reset_rsrc_node (& ctx -> buf_table , i );
279271 ctx -> buf_table .nodes [i ] = node ;
280- if (tag )
281- node -> tag = tag ;
282272 if (ctx -> compat )
283273 user_data += sizeof (struct compat_iovec );
284274 else
@@ -591,8 +581,11 @@ static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
591581 /* check previously registered pages */
592582 for (i = 0 ; i < ctx -> buf_table .nr ; i ++ ) {
593583 struct io_rsrc_node * node = ctx -> buf_table .nodes [i ];
594- struct io_mapped_ubuf * imu = node -> buf ;
584+ struct io_mapped_ubuf * imu ;
595585
586+ if (!node )
587+ continue ;
588+ imu = node -> buf ;
596589 for (j = 0 ; j < imu -> nr_bvecs ; j ++ ) {
597590 if (!PageCompound (imu -> bvec [j ].bv_page ))
598591 continue ;
@@ -742,7 +735,7 @@ static struct io_rsrc_node *io_sqe_buffer_register(struct io_ring_ctx *ctx,
742735 bool coalesced ;
743736
744737 if (!iov -> iov_base )
745- return rsrc_empty_node ;
738+ return NULL ;
746739
747740 node = io_rsrc_node_alloc (ctx , IORING_RSRC_BUFFER );
748741 if (!node )
@@ -850,19 +843,20 @@ int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
850843 ret = - EFAULT ;
851844 break ;
852845 }
853- if (tag && !iov -> iov_base ) {
854- ret = - EINVAL ;
855- break ;
856- }
857846 }
858847
859848 node = io_sqe_buffer_register (ctx , iov , & last_hpage );
860849 if (IS_ERR (node )) {
861850 ret = PTR_ERR (node );
862851 break ;
863852 }
864- if (tag )
853+ if (tag ) {
854+ if (!node ) {
855+ ret = - EINVAL ;
856+ break ;
857+ }
865858 node -> tag = tag ;
859+ }
866860 data .nodes [i ] = node ;
867861 }
868862
@@ -957,8 +951,8 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
957951 struct io_rsrc_node * dst_node , * src_node ;
958952
959953 src_node = io_rsrc_node_lookup (& src_ctx -> buf_table , i );
960- if (src_node == rsrc_empty_node ) {
961- dst_node = rsrc_empty_node ;
954+ if (! src_node ) {
955+ dst_node = NULL ;
962956 } else {
963957 dst_node = io_rsrc_node_alloc (ctx , IORING_RSRC_BUFFER );
964958 if (!dst_node ) {
0 commit comments