@@ -73,9 +73,9 @@ static struct io_poll *io_poll_get_single(struct io_kiocb *req)
7373
7474static void io_poll_req_insert (struct io_kiocb * req )
7575{
76- struct io_ring_ctx * ctx = req -> ctx ;
77- u32 index = hash_long (req -> cqe .user_data , ctx -> cancel_hash_bits );
78- struct io_hash_bucket * hb = & ctx -> cancel_hash [index ];
76+ struct io_hash_table * table = & req -> ctx -> cancel_table ;
77+ u32 index = hash_long (req -> cqe .user_data , table -> hash_bits );
78+ struct io_hash_bucket * hb = & table -> hbs [index ];
7979
8080 spin_lock (& hb -> lock );
8181 hlist_add_head (& req -> hash_node , & hb -> list );
@@ -84,8 +84,9 @@ static void io_poll_req_insert(struct io_kiocb *req)
8484
8585static void io_poll_req_delete (struct io_kiocb * req , struct io_ring_ctx * ctx )
8686{
87- u32 index = hash_long (req -> cqe .user_data , ctx -> cancel_hash_bits );
88- spinlock_t * lock = & ctx -> cancel_hash [index ].lock ;
87+ struct io_hash_table * table = & req -> ctx -> cancel_table ;
88+ u32 index = hash_long (req -> cqe .user_data , table -> hash_bits );
89+ spinlock_t * lock = & table -> hbs [index ].lock ;
8990
9091 spin_lock (lock );
9192 hash_del (& req -> hash_node );
@@ -539,13 +540,15 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
539540__cold bool io_poll_remove_all (struct io_ring_ctx * ctx , struct task_struct * tsk ,
540541 bool cancel_all )
541542{
543+ struct io_hash_table * table = & ctx -> cancel_table ;
544+ unsigned nr_buckets = 1U << table -> hash_bits ;
542545 struct hlist_node * tmp ;
543546 struct io_kiocb * req ;
544547 bool found = false;
545548 int i ;
546549
547- for (i = 0 ; i < ( 1U << ctx -> cancel_hash_bits ) ; i ++ ) {
548- struct io_hash_bucket * hb = & ctx -> cancel_hash [i ];
550+ for (i = 0 ; i < nr_buckets ; i ++ ) {
551+ struct io_hash_bucket * hb = & table -> hbs [i ];
549552
550553 spin_lock (& hb -> lock );
551554 hlist_for_each_entry_safe (req , tmp , & hb -> list , hash_node ) {
@@ -562,12 +565,12 @@ __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
562565
563566static struct io_kiocb * io_poll_find (struct io_ring_ctx * ctx , bool poll_only ,
564567 struct io_cancel_data * cd ,
565- struct io_hash_bucket hash_table [] ,
568+ struct io_hash_table * table ,
566569 struct io_hash_bucket * * out_bucket )
567570{
568571 struct io_kiocb * req ;
569- u32 index = hash_long (cd -> data , ctx -> cancel_hash_bits );
570- struct io_hash_bucket * hb = & hash_table [index ];
572+ u32 index = hash_long (cd -> data , table -> hash_bits );
573+ struct io_hash_bucket * hb = & table -> hbs [index ];
571574
572575 * out_bucket = NULL ;
573576
@@ -591,16 +594,17 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only,
591594
592595static struct io_kiocb * io_poll_file_find (struct io_ring_ctx * ctx ,
593596 struct io_cancel_data * cd ,
594- struct io_hash_bucket hash_table [] ,
597+ struct io_hash_table * table ,
595598 struct io_hash_bucket * * out_bucket )
596599{
600+ unsigned nr_buckets = 1U << table -> hash_bits ;
597601 struct io_kiocb * req ;
598602 int i ;
599603
600604 * out_bucket = NULL ;
601605
602- for (i = 0 ; i < ( 1U << ctx -> cancel_hash_bits ) ; i ++ ) {
603- struct io_hash_bucket * hb = & hash_table [i ];
606+ for (i = 0 ; i < nr_buckets ; i ++ ) {
607+ struct io_hash_bucket * hb = & table -> hbs [i ];
604608
605609 spin_lock (& hb -> lock );
606610 hlist_for_each_entry (req , & hb -> list , hash_node ) {
@@ -628,15 +632,15 @@ static bool io_poll_disarm(struct io_kiocb *req)
628632}
629633
630634static int __io_poll_cancel (struct io_ring_ctx * ctx , struct io_cancel_data * cd ,
631- struct io_hash_bucket hash_table [] )
635+ struct io_hash_table * table )
632636{
633637 struct io_hash_bucket * bucket ;
634638 struct io_kiocb * req ;
635639
636640 if (cd -> flags & (IORING_ASYNC_CANCEL_FD |IORING_ASYNC_CANCEL_ANY ))
637- req = io_poll_file_find (ctx , cd , ctx -> cancel_hash , & bucket );
641+ req = io_poll_file_find (ctx , cd , table , & bucket );
638642 else
639- req = io_poll_find (ctx , false, cd , ctx -> cancel_hash , & bucket );
643+ req = io_poll_find (ctx , false, cd , table , & bucket );
640644
641645 if (req )
642646 io_poll_cancel_req (req );
@@ -647,7 +651,7 @@ static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
647651
648652int io_poll_cancel (struct io_ring_ctx * ctx , struct io_cancel_data * cd )
649653{
650- return __io_poll_cancel (ctx , cd , ctx -> cancel_hash );
654+ return __io_poll_cancel (ctx , cd , & ctx -> cancel_table );
651655}
652656
653657static __poll_t io_poll_parse_events (const struct io_uring_sqe * sqe ,
@@ -745,7 +749,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
745749 int ret2 , ret = 0 ;
746750 bool locked ;
747751
748- preq = io_poll_find (ctx , true, & cd , ctx -> cancel_hash , & bucket );
752+ preq = io_poll_find (ctx , true, & cd , & ctx -> cancel_table , & bucket );
749753 if (preq )
750754 ret2 = io_poll_disarm (preq );
751755 if (bucket )
0 commit comments