From 1b62849dce1f854d865c598a03c2eea1f28f5840 Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Mon, 9 Mar 2020 15:52:17 -0700 Subject: [PATCH] nvme: add nitpicks for cq_cpulist module parameter Signed-off-by: Sultan Alsawaf --- drivers/nvme/host/pci.c | 44 +++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index a57c13ec6518..24188b27e569 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -73,7 +73,6 @@ static const struct kernel_param_ops cq_cpulist_ops = { }; static cpumask_var_t cq_cpumask; -static bool cq_cpumask_present; module_param_cb(cq_cpulist, &cq_cpulist_ops, NULL, 0); MODULE_PARM_DESC(cq_cpulist, "Completion queue IRQ affinity cpu list. " "By default spread IRQs across all online CPUs."); @@ -140,21 +139,28 @@ static int io_queue_depth_set(const char *val, const struct kernel_param *kp) static int cq_cpulist_set(const char *val, const struct kernel_param *kp) { - int err; + int ret; if (!alloc_cpumask_var(&cq_cpumask, GFP_KERNEL)) return -ENOMEM; - err = cpulist_parse(val, cq_cpumask); - if (err < 0 || cpumask_last(cq_cpumask) >= nr_cpu_ids) { - pr_warn("nvme: can't parse cq_cpulist, skipping\n"); - free_cpumask_var(cq_cpumask); - return 0; - } + ret = cpulist_parse(val, cq_cpumask); + if (ret || cpumask_last(cq_cpumask) >= nr_cpu_ids) + goto free_cpumask; - cq_cpumask_present = true; + get_online_cpus(); + cpumask_and(cq_cpumask, cq_cpumask, cpu_online_mask); + put_online_cpus(); + if (cpumask_empty(cq_cpumask)) + goto free_cpumask; return 0; + +free_cpumask: + pr_warn("nvme: can't parse cq_cpulist, skipping\n"); + free_cpumask_var(cq_cpumask); + cq_cpumask = NULL; + return -EINVAL; } static inline unsigned int sq_idx(unsigned int qid, u32 stride) @@ -457,9 +463,14 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) struct pci_dev *pdev = to_pci_dev(dev->dev); const struct cpumask *mask; unsigned int queue, cpu; + int irq; for (queue = 0; queue < set->nr_hw_queues; queue++) { - mask = irq_get_affinity_mask(pci_irq_vector(pdev, queue)); + irq = pci_irq_vector(pdev, queue); + if (irq < 0) + goto fallback; + + mask = irq_get_affinity_mask(irq); if (!mask) goto fallback; @@ -471,8 +482,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) fallback: WARN_ON_ONCE(set->nr_hw_queues > 1); - for_each_possible_cpu(cpu) - set->mq_map[cpu] = 0; + memset(set->mq_map, 0, sizeof(*set->mq_map) * nr_cpu_ids); return 0; } @@ -1972,7 +1982,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) int result, nr_io_queues; unsigned long size; - if (cq_cpumask_present) + if (cq_cpumask) nr_io_queues = cpumask_weight(cq_cpumask); else nr_io_queues = num_possible_cpus(); @@ -2012,7 +2022,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) pci_free_irq_vectors(pdev); nr_io_queues = pci_alloc_irq_vectors(pdev, 1, nr_io_queues, PCI_IRQ_ALL_TYPES | - (cq_cpumask_present ? 0 : PCI_IRQ_AFFINITY)); + (cq_cpumask ? 0 : PCI_IRQ_AFFINITY)); if (nr_io_queues <= 0) return -EIO; dev->max_qid = nr_io_queues; @@ -2034,7 +2044,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (result) return result; - if (cq_cpumask_present) { + if (cq_cpumask) { unsigned int cpu = cpumask_next(-1, cq_cpumask); int cq_vector; @@ -2046,7 +2056,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) } } - return result; + return 0; } static void nvme_del_queue_end(struct request *req, blk_status_t error) @@ -2917,7 +2927,7 @@ static void __exit nvme_exit(void) pci_unregister_driver(&nvme_driver); flush_workqueue(nvme_wq); _nvme_check_size(); - if (cq_cpumask_present) + if (cq_cpumask) free_cpumask_var(cq_cpumask); } -- 2.20.1