Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -421,8 +421,6 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)

q->node = node_id;

atomic_set(&q->nr_active_requests_shared_tags, 0);

timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list);
Expand Down
22 changes: 21 additions & 1 deletion block/blk-mq-debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -468,11 +468,31 @@ static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
return 0;
}

struct count_active_params {
struct blk_mq_hw_ctx *hctx;
int *active;
};

static bool hctx_count_active(struct request *rq, void *data)
{
const struct count_active_params *params = data;

if (rq->mq_hctx == params->hctx)
(*params->active)++;

return true;
}

static int hctx_active_show(void *data, struct seq_file *m)
{
struct blk_mq_hw_ctx *hctx = data;
int active = 0;
struct count_active_params params = { .hctx = hctx, .active = &active };

blk_mq_all_tag_iter(hctx->sched_tags ?: hctx->tags, hctx_count_active,
&params);

seq_printf(m, "%d\n", __blk_mq_active_requests(hctx));
seq_printf(m, "%d\n", active);
return 0;
}

Expand Down
4 changes: 0 additions & 4 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,10 +109,6 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
struct sbitmap_queue *bt)
{
if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) &&
!hctx_may_queue(data->hctx, bt))
return BLK_MQ_NO_TAG;

if (data->shallow_depth)
return sbitmap_queue_get_shallow(bt, data->shallow_depth);
else
Expand Down
17 changes: 1 addition & 16 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -489,8 +489,6 @@ __blk_mq_alloc_requests_batch(struct blk_mq_alloc_data *data)
}
} while (data->nr_tags > nr);

if (!(data->rq_flags & RQF_SCHED_TAGS))
blk_mq_add_active_requests(data->hctx, nr);
/* caller already holds a reference, add for remainder */
percpu_ref_get_many(&data->q->q_usage_counter, nr - 1);
data->nr_tags -= nr;
Expand Down Expand Up @@ -587,8 +585,6 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
goto retry;
}

if (!(data->rq_flags & RQF_SCHED_TAGS))
blk_mq_inc_active_requests(data->hctx);
rq = blk_mq_rq_ctx_init(data, blk_mq_tags_from_data(data), tag);
blk_mq_rq_time_init(rq, alloc_time_ns);
return rq;
Expand Down Expand Up @@ -763,8 +759,6 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
tag = blk_mq_get_tag(&data);
if (tag == BLK_MQ_NO_TAG)
goto out_queue_exit;
if (!(data.rq_flags & RQF_SCHED_TAGS))
blk_mq_inc_active_requests(data.hctx);
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag);
blk_mq_rq_time_init(rq, alloc_time_ns);
rq->__data_len = 0;
Expand Down Expand Up @@ -807,10 +801,8 @@ static void __blk_mq_free_request(struct request *rq)
blk_pm_mark_last_busy(rq);
rq->mq_hctx = NULL;

if (rq->tag != BLK_MQ_NO_TAG) {
blk_mq_dec_active_requests(hctx);
if (rq->tag != BLK_MQ_NO_TAG)
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
}
if (sched_tag != BLK_MQ_NO_TAG)
blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
Expand Down Expand Up @@ -1188,8 +1180,6 @@ static inline void blk_mq_flush_tag_batch(struct blk_mq_hw_ctx *hctx,
{
struct request_queue *q = hctx->queue;

blk_mq_sub_active_requests(hctx, nr_tags);

blk_mq_put_tags(hctx->tags, tag_array, nr_tags);
percpu_ref_put_many(&q->q_usage_counter, nr_tags);
}
Expand Down Expand Up @@ -1875,17 +1865,13 @@ bool __blk_mq_alloc_driver_tag(struct request *rq)
if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
bt = &rq->mq_hctx->tags->breserved_tags;
tag_offset = 0;
} else {
if (!hctx_may_queue(rq->mq_hctx, bt))
return false;
}

tag = __sbitmap_queue_get(bt);
if (tag == BLK_MQ_NO_TAG)
return false;

rq->tag = tag + tag_offset;
blk_mq_inc_active_requests(rq->mq_hctx);
return true;
}

Expand Down Expand Up @@ -4058,7 +4044,6 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
if (!zalloc_cpumask_var_node(&hctx->cpumask, gfp, node))
goto free_hctx;

atomic_set(&hctx->nr_active, 0);
if (node == NUMA_NO_NODE)
node = set->numa_node;
hctx->numa_node = node;
Expand Down
100 changes: 0 additions & 100 deletions block/blk-mq.h
Original file line number Diff line number Diff line change
Expand Up @@ -291,70 +291,9 @@ static inline int blk_mq_get_rq_budget_token(struct request *rq)
return -1;
}

static inline void __blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (blk_mq_is_shared_tags(hctx->flags))
atomic_add(val, &hctx->queue->nr_active_requests_shared_tags);
else
atomic_add(val, &hctx->nr_active);
}

static inline void __blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{
__blk_mq_add_active_requests(hctx, 1);
}

static inline void __blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (blk_mq_is_shared_tags(hctx->flags))
atomic_sub(val, &hctx->queue->nr_active_requests_shared_tags);
else
atomic_sub(val, &hctx->nr_active);
}

static inline void __blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{
__blk_mq_sub_active_requests(hctx, 1);
}

static inline void blk_mq_add_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_add_active_requests(hctx, val);
}

static inline void blk_mq_inc_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_inc_active_requests(hctx);
}

static inline void blk_mq_sub_active_requests(struct blk_mq_hw_ctx *hctx,
int val)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_sub_active_requests(hctx, val);
}

static inline void blk_mq_dec_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)
__blk_mq_dec_active_requests(hctx);
}

static inline int __blk_mq_active_requests(struct blk_mq_hw_ctx *hctx)
{
if (blk_mq_is_shared_tags(hctx->flags))
return atomic_read(&hctx->queue->nr_active_requests_shared_tags);
return atomic_read(&hctx->nr_active);
}
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
blk_mq_dec_active_requests(hctx);
blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = BLK_MQ_NO_TAG;
}
Expand Down Expand Up @@ -396,45 +335,6 @@ static inline void blk_mq_free_requests(struct list_head *list)
}
}

/*
* For shared tag users, we track the number of currently active users
* and attempt to provide a fair share of the tag depth for each of them.
*/
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct sbitmap_queue *bt)
{
unsigned int depth, users;

if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return true;

/*
* Don't try dividing an ant
*/
if (bt->sb.depth == 1)
return true;

if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;

if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
} else {
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true;
}

users = READ_ONCE(hctx->tags->active_queues);
if (!users)
return true;

/*
* Allow at least some tags
*/
depth = max((bt->sb.depth + users - 1) / users, 4U);
return __blk_mq_active_requests(hctx) < depth;
}

/* run the code block in @dispatch_ops with rcu/srcu read lock held */
#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
do { \
Expand Down
2 changes: 1 addition & 1 deletion drivers/scsi/scsi_error.c
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,7 @@ enum blk_eh_timer_return scsi_timeout(struct request *req)
*/
if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
return BLK_EH_DONE;
atomic_inc(&scmd->device->iodone_cnt);
percpu_counter_inc(&scmd->device->iodone_cnt);
if (scsi_abort_command(scmd) != SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
scsi_eh_scmd_add(scmd);
Expand Down
8 changes: 4 additions & 4 deletions drivers/scsi/scsi_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -1554,7 +1554,7 @@ static void scsi_complete(struct request *rq)

INIT_LIST_HEAD(&cmd->eh_entry);

atomic_inc(&cmd->device->iodone_cnt);
percpu_counter_inc(&cmd->device->iodone_cnt);
if (cmd->result)
atomic_inc(&cmd->device->ioerr_cnt);

Expand Down Expand Up @@ -1592,7 +1592,7 @@ static enum scsi_qc_status scsi_dispatch_cmd(struct scsi_cmnd *cmd)
struct Scsi_Host *host = cmd->device->host;
int rtn = 0;

atomic_inc(&cmd->device->iorequest_cnt);
percpu_counter_inc(&cmd->device->iorequest_cnt);

/* check if the device is still usable */
if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
Expand All @@ -1614,7 +1614,7 @@ static enum scsi_qc_status scsi_dispatch_cmd(struct scsi_cmnd *cmd)
*/
SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
"queuecommand : device blocked\n"));
atomic_dec(&cmd->device->iorequest_cnt);
percpu_counter_dec(&cmd->device->iorequest_cnt);
return SCSI_MLQUEUE_DEVICE_BUSY;
}

Expand Down Expand Up @@ -1647,7 +1647,7 @@ static enum scsi_qc_status scsi_dispatch_cmd(struct scsi_cmnd *cmd)
trace_scsi_dispatch_cmd_start(cmd);
rtn = host->hostt->queuecommand(host, cmd);
if (rtn) {
atomic_dec(&cmd->device->iorequest_cnt);
percpu_counter_dec(&cmd->device->iorequest_cnt);
trace_scsi_dispatch_cmd_error(cmd, rtn);
if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
rtn != SCSI_MLQUEUE_TARGET_BUSY)
Expand Down
18 changes: 15 additions & 3 deletions drivers/scsi/scsi_scan.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include <linux/kthread.h>
#include <linux/spinlock.h>
#include <linux/async.h>
#include <linux/topology.h>
#include <linux/slab.h>
#include <linux/unaligned.h>

Expand Down Expand Up @@ -286,9 +287,10 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
int display_failure_msg = 1, ret;
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct queue_limits lim;
int node = dev_to_node(shost->dma_dev);

sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
GFP_KERNEL);
sdev = kzalloc_node(sizeof(*sdev) + shost->transportt->device_size,
GFP_KERNEL, node);
if (!sdev)
goto out;

Expand Down Expand Up @@ -349,6 +351,15 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,

scsi_sysfs_device_initialize(sdev);

ret = percpu_counter_init(&sdev->iorequest_cnt, 0, GFP_KERNEL);
if (ret)
goto out_device_destroy;
ret = percpu_counter_init(&sdev->iodone_cnt, 0, GFP_KERNEL);
if (ret) {
percpu_counter_destroy(&sdev->iorequest_cnt);
goto out_device_destroy;
}

if (scsi_device_is_pseudo_dev(sdev))
return sdev;

Expand Down Expand Up @@ -501,8 +512,9 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
struct scsi_target *starget;
struct scsi_target *found_target;
int error, ref_got;
int node = dev_to_node(shost->dma_dev);

starget = kzalloc(size, GFP_KERNEL);
starget = kzalloc_node(size, GFP_KERNEL, node);
if (!starget) {
printk(KERN_ERR "%s: allocation failure\n", __func__);
return NULL;
Expand Down
27 changes: 23 additions & 4 deletions drivers/scsi/scsi_sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -516,6 +516,10 @@ static void scsi_device_dev_release(struct device *dev)
if (vpd_pgb7)
kfree_rcu(vpd_pgb7, rcu);
kfree(sdev->inquiry);
if (percpu_counter_initialized(&sdev->iodone_cnt))
percpu_counter_destroy(&sdev->iodone_cnt);
if (percpu_counter_initialized(&sdev->iorequest_cnt))
percpu_counter_destroy(&sdev->iorequest_cnt);
kfree(sdev);

if (parent)
Expand Down Expand Up @@ -936,11 +940,26 @@ static ssize_t
show_iostat_counterbits(struct device *dev, struct device_attribute *attr,
char *buf)
{
return snprintf(buf, 20, "%d\n", (int)sizeof(atomic_t) * 8);
/*
* iorequest_cnt and iodone_cnt are per-CPU sums (s64); ioerr_cnt and
* iotmo_cnt remain atomic_t. Report the widest counter for tools.
*/
return snprintf(buf, 20, "%zu\n", sizeof(s64) * 8);
}

static DEVICE_ATTR(iocounterbits, S_IRUGO, show_iostat_counterbits, NULL);

#define show_sdev_iostat_percpu(field) \
static ssize_t \
show_iostat_##field(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
unsigned long long count = percpu_counter_sum(&sdev->field); \
return snprintf(buf, 20, "0x%llx\n", count); \
} \
static DEVICE_ATTR(field, 0444, show_iostat_##field, NULL)

#define show_sdev_iostat(field) \
static ssize_t \
show_iostat_##field(struct device *dev, struct device_attribute *attr, \
Expand All @@ -950,10 +969,10 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
unsigned long long count = atomic_read(&sdev->field); \
return snprintf(buf, 20, "0x%llx\n", count); \
} \
static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
static DEVICE_ATTR(field, 0444, show_iostat_##field, NULL)

show_sdev_iostat(iorequest_cnt);
show_sdev_iostat(iodone_cnt);
show_sdev_iostat_percpu(iorequest_cnt);
show_sdev_iostat_percpu(iodone_cnt);
show_sdev_iostat(ioerr_cnt);
show_sdev_iostat(iotmo_cnt);

Expand Down
Loading