7
7
#include "blk-mq.h"
8
8
#include "blk-mq-tag.h"
9
9
10
- void blk_mq_wait_for_tags (struct blk_mq_tags * tags , struct blk_mq_hw_ctx * hctx ,
11
- bool reserved )
10
+ void blk_mq_wait_for_tags (struct blk_mq_hw_ctx * hctx , bool reserved )
12
11
{
13
12
int tag , zero = 0 ;
14
13
15
- tag = blk_mq_get_tag (tags , hctx , & zero , __GFP_WAIT , reserved );
16
- blk_mq_put_tag (tags , tag , & zero );
14
+ tag = blk_mq_get_tag (hctx , & zero , __GFP_WAIT , reserved );
15
+ blk_mq_put_tag (hctx , tag , & zero );
17
16
}
18
17
19
18
static bool bt_has_free_tags (struct blk_mq_bitmap_tags * bt )
@@ -40,6 +39,84 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
40
39
return bt_has_free_tags (& tags -> bitmap_tags );
41
40
}
42
41
42
+ static inline void bt_index_inc (unsigned int * index )
43
+ {
44
+ * index = (* index + 1 ) & (BT_WAIT_QUEUES - 1 );
45
+ }
46
+
47
+ /*
48
+ * If a previously inactive queue goes active, bump the active user count.
49
+ */
50
+ bool __blk_mq_tag_busy (struct blk_mq_hw_ctx * hctx )
51
+ {
52
+ if (!test_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ) &&
53
+ !test_and_set_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ))
54
+ atomic_inc (& hctx -> tags -> active_queues );
55
+
56
+ return true;
57
+ }
58
+
59
+ /*
60
+ * If a previously busy queue goes inactive, potential waiters could now
61
+ * be allowed to queue. Wake them up and check.
62
+ */
63
+ void __blk_mq_tag_idle (struct blk_mq_hw_ctx * hctx )
64
+ {
65
+ struct blk_mq_tags * tags = hctx -> tags ;
66
+ struct blk_mq_bitmap_tags * bt ;
67
+ int i , wake_index ;
68
+
69
+ if (!test_and_clear_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ))
70
+ return ;
71
+
72
+ atomic_dec (& tags -> active_queues );
73
+
74
+ /*
75
+ * Will only throttle depth on non-reserved tags
76
+ */
77
+ bt = & tags -> bitmap_tags ;
78
+ wake_index = bt -> wake_index ;
79
+ for (i = 0 ; i < BT_WAIT_QUEUES ; i ++ ) {
80
+ struct bt_wait_state * bs = & bt -> bs [wake_index ];
81
+
82
+ if (waitqueue_active (& bs -> wait ))
83
+ wake_up (& bs -> wait );
84
+
85
+ bt_index_inc (& wake_index );
86
+ }
87
+ }
88
+
89
+ /*
90
+ * For shared tag users, we track the number of currently active users
91
+ * and attempt to provide a fair share of the tag depth for each of them.
92
+ */
93
+ static inline bool hctx_may_queue (struct blk_mq_hw_ctx * hctx ,
94
+ struct blk_mq_bitmap_tags * bt )
95
+ {
96
+ unsigned int depth , users ;
97
+
98
+ if (!hctx || !(hctx -> flags & BLK_MQ_F_TAG_SHARED ))
99
+ return true;
100
+ if (!test_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ))
101
+ return true;
102
+
103
+ /*
104
+ * Don't try dividing an ant
105
+ */
106
+ if (bt -> depth == 1 )
107
+ return true;
108
+
109
+ users = atomic_read (& hctx -> tags -> active_queues );
110
+ if (!users )
111
+ return true;
112
+
113
+ /*
114
+ * Allow at least some tags
115
+ */
116
+ depth = max ((bt -> depth + users - 1 ) / users , 4U );
117
+ return atomic_read (& hctx -> nr_active ) < depth ;
118
+ }
119
+
43
120
static int __bt_get_word (struct blk_mq_bitmap * bm , unsigned int last_tag )
44
121
{
45
122
int tag , org_last_tag , end ;
@@ -78,11 +155,15 @@ static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag)
78
155
* multiple users will tend to stick to different cachelines, at least
79
156
* until the map is exhausted.
80
157
*/
81
- static int __bt_get (struct blk_mq_bitmap_tags * bt , unsigned int * tag_cache )
158
+ static int __bt_get (struct blk_mq_hw_ctx * hctx , struct blk_mq_bitmap_tags * bt ,
159
+ unsigned int * tag_cache )
82
160
{
83
161
unsigned int last_tag , org_last_tag ;
84
162
int index , i , tag ;
85
163
164
+ if (!hctx_may_queue (hctx , bt ))
165
+ return -1 ;
166
+
86
167
last_tag = org_last_tag = * tag_cache ;
87
168
index = TAG_TO_INDEX (bt , last_tag );
88
169
@@ -117,11 +198,6 @@ static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache)
117
198
return tag ;
118
199
}
119
200
120
- static inline void bt_index_inc (unsigned int * index )
121
- {
122
- * index = (* index + 1 ) & (BT_WAIT_QUEUES - 1 );
123
- }
124
-
125
201
static struct bt_wait_state * bt_wait_ptr (struct blk_mq_bitmap_tags * bt ,
126
202
struct blk_mq_hw_ctx * hctx )
127
203
{
@@ -142,7 +218,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
142
218
DEFINE_WAIT (wait );
143
219
int tag ;
144
220
145
- tag = __bt_get (bt , last_tag );
221
+ tag = __bt_get (hctx , bt , last_tag );
146
222
if (tag != -1 )
147
223
return tag ;
148
224
@@ -156,7 +232,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx,
156
232
was_empty = list_empty (& wait .task_list );
157
233
prepare_to_wait (& bs -> wait , & wait , TASK_UNINTERRUPTIBLE );
158
234
159
- tag = __bt_get (bt , last_tag );
235
+ tag = __bt_get (hctx , bt , last_tag );
160
236
if (tag != -1 )
161
237
break ;
162
238
@@ -200,14 +276,13 @@ static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
200
276
return tag ;
201
277
}
202
278
203
- unsigned int blk_mq_get_tag (struct blk_mq_tags * tags ,
204
- struct blk_mq_hw_ctx * hctx , unsigned int * last_tag ,
279
+ unsigned int blk_mq_get_tag (struct blk_mq_hw_ctx * hctx , unsigned int * last_tag ,
205
280
gfp_t gfp , bool reserved )
206
281
{
207
282
if (!reserved )
208
- return __blk_mq_get_tag (tags , hctx , last_tag , gfp );
283
+ return __blk_mq_get_tag (hctx -> tags , hctx , last_tag , gfp );
209
284
210
- return __blk_mq_get_reserved_tag (tags , gfp );
285
+ return __blk_mq_get_reserved_tag (hctx -> tags , gfp );
211
286
}
212
287
213
288
static struct bt_wait_state * bt_wake_ptr (struct blk_mq_bitmap_tags * bt )
@@ -265,9 +340,11 @@ static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
265
340
bt_clear_tag (& tags -> breserved_tags , tag );
266
341
}
267
342
268
- void blk_mq_put_tag (struct blk_mq_tags * tags , unsigned int tag ,
343
+ void blk_mq_put_tag (struct blk_mq_hw_ctx * hctx , unsigned int tag ,
269
344
unsigned int * last_tag )
270
345
{
346
+ struct blk_mq_tags * tags = hctx -> tags ;
347
+
271
348
if (tag >= tags -> nr_reserved_tags ) {
272
349
const int real_tag = tag - tags -> nr_reserved_tags ;
273
350
@@ -465,6 +542,7 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
465
542
res = bt_unused_tags (& tags -> breserved_tags );
466
543
467
544
page += sprintf (page , "nr_free=%u, nr_reserved=%u\n" , free , res );
545
+ page += sprintf (page , "active_queues=%u\n" , atomic_read (& tags -> active_queues ));
468
546
469
547
return page - orig_page ;
470
548
}
0 commit comments