@@ -291,70 +291,9 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq)
291291 return -1 ;
292292}
293293
294- static inline void __blk_mq_add_active_requests (struct blk_mq_hw_ctx * hctx ,
295- int val )
296- {
297- if (blk_mq_is_shared_tags (hctx -> flags ))
298- atomic_add (val , & hctx -> queue -> nr_active_requests_shared_tags );
299- else
300- atomic_add (val , & hctx -> nr_active );
301- }
302-
303- static inline void __blk_mq_inc_active_requests (struct blk_mq_hw_ctx * hctx )
304- {
305- __blk_mq_add_active_requests (hctx , 1 );
306- }
307-
308- static inline void __blk_mq_sub_active_requests (struct blk_mq_hw_ctx * hctx ,
309- int val )
310- {
311- if (blk_mq_is_shared_tags (hctx -> flags ))
312- atomic_sub (val , & hctx -> queue -> nr_active_requests_shared_tags );
313- else
314- atomic_sub (val , & hctx -> nr_active );
315- }
316-
317- static inline void __blk_mq_dec_active_requests (struct blk_mq_hw_ctx * hctx )
318- {
319- __blk_mq_sub_active_requests (hctx , 1 );
320- }
321-
322- static inline void blk_mq_add_active_requests (struct blk_mq_hw_ctx * hctx ,
323- int val )
324- {
325- if (hctx -> flags & BLK_MQ_F_TAG_QUEUE_SHARED )
326- __blk_mq_add_active_requests (hctx , val );
327- }
328-
329- static inline void blk_mq_inc_active_requests (struct blk_mq_hw_ctx * hctx )
330- {
331- if (hctx -> flags & BLK_MQ_F_TAG_QUEUE_SHARED )
332- __blk_mq_inc_active_requests (hctx );
333- }
334-
335- static inline void blk_mq_sub_active_requests (struct blk_mq_hw_ctx * hctx ,
336- int val )
337- {
338- if (hctx -> flags & BLK_MQ_F_TAG_QUEUE_SHARED )
339- __blk_mq_sub_active_requests (hctx , val );
340- }
341-
342- static inline void blk_mq_dec_active_requests (struct blk_mq_hw_ctx * hctx )
343- {
344- if (hctx -> flags & BLK_MQ_F_TAG_QUEUE_SHARED )
345- __blk_mq_dec_active_requests (hctx );
346- }
347-
348- static inline int __blk_mq_active_requests (struct blk_mq_hw_ctx * hctx )
349- {
350- if (blk_mq_is_shared_tags (hctx -> flags ))
351- return atomic_read (& hctx -> queue -> nr_active_requests_shared_tags );
352- return atomic_read (& hctx -> nr_active );
353- }
354294static inline void __blk_mq_put_driver_tag (struct blk_mq_hw_ctx * hctx ,
355295 struct request * rq )
356296{
357- blk_mq_dec_active_requests (hctx );
358297 blk_mq_put_tag (hctx -> tags , rq -> mq_ctx , rq -> tag );
359298 rq -> tag = BLK_MQ_NO_TAG ;
360299}
@@ -396,45 +335,6 @@ static inline void blk_mq_free_requests(struct list_head *list)
396335 }
397336}
398337
399- /*
400- * For shared tag users, we track the number of currently active users
401- * and attempt to provide a fair share of the tag depth for each of them.
402- */
403- static inline bool hctx_may_queue (struct blk_mq_hw_ctx * hctx ,
404- struct sbitmap_queue * bt )
405- {
406- unsigned int depth , users ;
407-
408- if (!hctx || !(hctx -> flags & BLK_MQ_F_TAG_QUEUE_SHARED ))
409- return true;
410-
411- /*
412- * Don't try dividing an ant
413- */
414- if (bt -> sb .depth == 1 )
415- return true;
416-
417- if (blk_mq_is_shared_tags (hctx -> flags )) {
418- struct request_queue * q = hctx -> queue ;
419-
420- if (!test_bit (QUEUE_FLAG_HCTX_ACTIVE , & q -> queue_flags ))
421- return true;
422- } else {
423- if (!test_bit (BLK_MQ_S_TAG_ACTIVE , & hctx -> state ))
424- return true;
425- }
426-
427- users = READ_ONCE (hctx -> tags -> active_queues );
428- if (!users )
429- return true;
430-
431- /*
432- * Allow at least some tags
433- */
434- depth = max ((bt -> sb .depth + users - 1 ) / users , 4U );
435- return __blk_mq_active_requests (hctx ) < depth ;
436- }
437-
438338/* run the code block in @dispatch_ops with rcu/srcu read lock held */
439339#define __blk_mq_run_dispatch_ops (q , check_sleep , dispatch_ops ) \
440340do { \
0 commit comments