summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/property.c24
-rw-r--r--drivers/base/dma-coherent.c19
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/opp/core.c7
-rw-r--r--drivers/base/power/qos.c10
-rw-r--r--drivers/char/tpm/tpm-interface.c10
-rw-r--r--drivers/char/tpm/tpm.h9
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c2
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c98
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c8
-rw-r--r--drivers/clocksource/timer-integrator-ap.c4
-rw-r--r--drivers/cpuidle/cpuidle-arm.c6
-rw-r--r--drivers/crypto/caam/Kconfig5
-rw-r--r--drivers/crypto/caam/ctrl.c19
-rw-r--r--drivers/crypto/caam/regs.h59
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/talitos.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c113
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h14
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c154
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c39
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/input/ff-core.c13
-rw-r--r--drivers/input/misc/uinput.c57
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c2
-rw-r--r--drivers/iommu/Kconfig5
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/of_iommu.c5
-rw-r--r--drivers/irqchip/irq-gic-v3.c8
-rw-r--r--drivers/irqchip/irq-gic-v4.c12
-rw-r--r--drivers/irqchip/irq-mips-gic.c6
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c37
-rw-r--r--drivers/md/raid5.c13
-rw-r--r--drivers/mmc/core/queue.c7
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/cavium-thunderx.c6
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c3
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c45
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c183
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c23
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c30
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c22
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.h3
-rw-r--r--drivers/net/ethernet/realtek/8139too.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c8
-rw-r--r--drivers/net/phy/Kconfig18
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c2
-rw-r--r--drivers/net/usb/lan78xx.c34
-rw-r--r--drivers/nvdimm/namespace_devs.c9
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c13
-rw-r--r--drivers/perf/arm_pmu_acpi.c1
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/scm_blk.c6
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c12
-rw-r--r--drivers/s390/cio/io_sch.h2
-rw-r--r--drivers/scsi/arm/acornscsi.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c64
-rw-r--r--drivers/xen/xenbus/xenbus_client.c130
109 files changed, 1226 insertions, 779 deletions
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index bf22c29d2517..11b113f8e367 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void)
for (i = 0; i < wdat->entries; i++) {
const struct acpi_generic_address *gas;
struct resource_entry *rentry;
- struct resource res;
+ struct resource res = {};
bool found;
gas = &entries[i].register_region;
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index c1c216163de3..3fb8ff513461 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -908,11 +908,12 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
const struct acpi_device *adev = to_acpi_device_node(fwnode);
- struct acpi_device *child_adev = NULL;
const struct list_head *head;
struct list_head *next;
if (!child || is_acpi_device_node(child)) {
+ struct acpi_device *child_adev;
+
if (adev)
head = &adev->children;
else
@@ -922,8 +923,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
goto nondev;
if (child) {
- child_adev = to_acpi_device_node(child);
- next = child_adev->node.next;
+ adev = to_acpi_device_node(child);
+ next = adev->node.next;
if (next == head) {
child = NULL;
goto nondev;
@@ -941,8 +942,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
- if (child_adev)
- head = &child_adev->data.subnodes;
+ if (adev)
+ head = &adev->data.subnodes;
else if (data)
head = &data->data.subnodes;
else
@@ -1293,3 +1294,16 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
const struct fwnode_operations acpi_static_fwnode_ops;
+
+bool is_acpi_device_node(const struct fwnode_handle *fwnode)
+{
+ return !IS_ERR_OR_NULL(fwnode) &&
+ fwnode->ops == &acpi_device_fwnode_ops;
+}
+EXPORT_SYMBOL(is_acpi_device_node);
+
+bool is_acpi_data_node(const struct fwnode_handle *fwnode)
+{
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
+}
+EXPORT_SYMBOL(is_acpi_data_node);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index a39b2166b145..744f64f43454 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -348,16 +348,15 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
struct dma_coherent_mem *mem = rmem->priv;
int ret;
- if (!mem)
- return -ENODEV;
-
- ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
- DMA_MEMORY_EXCLUSIVE, &mem);
-
- if (ret) {
- pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
- &rmem->base, (unsigned long)rmem->size / SZ_1M);
- return ret;
+ if (!mem) {
+ ret = dma_init_coherent_memory(rmem->base, rmem->base,
+ rmem->size,
+ DMA_MEMORY_EXCLUSIVE, &mem);
+ if (ret) {
+ pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return ret;
+ }
}
mem->use_dev_dma_pfn_offset = true;
rmem->priv = mem;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ea1732ed7a9d..770b1539a083 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1860,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev)
{
spin_lock_irq(&dev->power.lock);
dev->power.no_pm_callbacks =
- (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
- (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
+ (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
+ !dev->bus->suspend && !dev->bus->resume)) &&
+ (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
+ !dev->class->suspend && !dev->class->resume)) &&
(!dev->type || pm_ops_is_empty(dev->type->pm)) &&
(!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
- (!dev->driver || pm_ops_is_empty(dev->driver->pm));
+ (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
+ !dev->driver->suspend && !dev->driver->resume));
spin_unlock_irq(&dev->power.lock);
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index a8cc14fd8ae4..a6de32530693 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
opp->available = availability_req;
+ dev_pm_opp_get(opp);
+ mutex_unlock(&opp_table->lock);
+
/* Notify the change of the OPP availability */
if (availability_req)
blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
blocking_notifier_call_chain(&opp_table->head,
OPP_EVENT_DISABLE, opp);
+ dev_pm_opp_put(opp);
+ goto put_table;
+
unlock:
mutex_unlock(&opp_table->lock);
+put_table:
dev_pm_opp_put_opp_table(opp_table);
return r;
}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index f850daeffba4..277d43a83f53 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
-static bool dev_pm_qos_invalid_request(struct device *dev,
- struct dev_pm_qos_request *req)
+static bool dev_pm_qos_invalid_req_type(struct device *dev,
+ enum dev_pm_qos_req_type type)
{
- return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
- && !dev->power.set_latency_tolerance);
+ return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
+ !dev->power.set_latency_tolerance;
}
static int __dev_pm_qos_add_request(struct device *dev,
@@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev,
{
int ret = 0;
- if (!dev || dev_pm_qos_invalid_request(dev, req))
+ if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
return -EINVAL;
if (WARN(dev_pm_qos_request_active(req),
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index fe597e6c55c4..1d6729be4cd6 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
goto out;
}
- msleep(TPM_TIMEOUT); /* CHECK */
+ tpm_msleep(TPM_TIMEOUT);
rmb();
} while (time_before(jiffies, stop));
@@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
dev_info(
&chip->dev, HW_ERR
"TPM command timed out during continue self test");
- msleep(delay_msec);
+ tpm_msleep(delay_msec);
continue;
}
@@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
}
if (rc != TPM_WARN_DOING_SELFTEST)
return rc;
- msleep(delay_msec);
+ tpm_msleep(delay_msec);
} while (--loops > 0);
return rc;
@@ -1085,7 +1085,7 @@ again:
}
} else {
do {
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev)
*/
if (rc != TPM_WARN_RETRY)
break;
- msleep(TPM_TIMEOUT_RETRY);
+ tpm_msleep(TPM_TIMEOUT_RETRY);
}
if (rc)
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 04fbff2edbf3..2d5466a72e40 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -50,7 +50,8 @@ enum tpm_const {
enum tpm_timeout {
TPM_TIMEOUT = 5, /* msecs */
- TPM_TIMEOUT_RETRY = 100 /* msecs */
+ TPM_TIMEOUT_RETRY = 100, /* msecs */
+ TPM_TIMEOUT_RANGE_US = 300 /* usecs */
};
/* TPM addresses */
@@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev);
int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
wait_queue_head_t *queue, bool check_cancel);
+static inline void tpm_msleep(unsigned int delay_msec)
+{
+ usleep_range(delay_msec * 1000,
+ (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
+};
+
struct tpm_chip *tpm_chip_find_get(int chip_num);
__must_check int tpm_try_get_ops(struct tpm_chip *chip);
void tpm_put_ops(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index f7f34b2aa981..e1a41b788f08 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
if (rc != TPM2_RC_TESTING)
break;
- msleep(delay_msec);
+ tpm_msleep(delay_msec);
}
return rc;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a4ac63a21d8a..8f0a98dea327 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = {
SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
};
-static struct acpi_device_id crb_device_ids[] = {
+static const struct acpi_device_id crb_device_ids[] = {
{"MSFT0101", 0},
{"", 0},
};
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index f01d083eced2..25f6e2665385 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -32,26 +32,70 @@
static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
-static struct vio_device_id tpm_ibmvtpm_device_table[] = {
+static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
{ "IBM,vtpm", "IBM,vtpm"},
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
/**
+ *
+ * ibmvtpm_send_crq_word - Send a CRQ request
+ * @vdev: vio device struct
+ * @w1: pre-constructed first word of tpm crq (second word is reserved)
+ *
+ * Return:
+ * 0 - Success
+ * Non-zero - Failure
+ */
+static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
+{
+ return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
+}
+
+/**
+ *
* ibmvtpm_send_crq - Send a CRQ request
*
* @vdev: vio device struct
- * @w1: first word
- * @w2: second word
+ * @valid: Valid field
+ * @msg: Type field
+ * @len: Length field
+ * @data: Data field
+ *
+ * The ibmvtpm crq is defined as follows:
+ *
+ * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
+ * -----------------------------------------------------------------------
+ * Word0 | Valid | Type | Length | Data
+ * -----------------------------------------------------------------------
+ * Word1 | Reserved
+ * -----------------------------------------------------------------------
+ *
+ * Which matches the following structure (on bigendian host):
+ *
+ * struct ibmvtpm_crq {
+ * u8 valid;
+ * u8 msg;
+ * __be16 len;
+ * __be32 data;
+ * __be64 reserved;
+ * } __attribute__((packed, aligned(8)));
+ *
+ * However, the value is passed in a register so just compute the numeric value
+ * to load into the register avoiding byteswap altogether. Endian only affects
+ * memory loads and stores - registers are internally represented the same.
*
* Return:
- * 0 -Sucess
+ * 0 (H_SUCCESS) - Success
* Non-zero - Failure
*/
-static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2)
+static int ibmvtpm_send_crq(struct vio_dev *vdev,
+ u8 valid, u8 msg, u16 len, u32 data)
{
- return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2);
+ u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
+ (u64)data;
+ return ibmvtpm_send_crq_word(vdev, w1);
}
/**
@@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
- struct ibmvtpm_crq crq;
- __be64 *word = (__be64 *)&crq;
int rc, sig;
if (!ibmvtpm->rtce_buf) {
@@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
spin_lock(&ibmvtpm->rtce_lock);
ibmvtpm->res_len = 0;
memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_TPM_COMMAND;
- crq.len = cpu_to_be16(count);
- crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
/*
* set the processing flag before the Hcall, since we may get the
@@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
*/
ibmvtpm->tpm_processing_cmd = true;
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]),
- be64_to_cpu(word[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
+ count, ibmvtpm->rtce_dma_handle);
if (rc != H_SUCCESS) {
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
rc = 0;
@@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
*/
static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
{
- struct ibmvtpm_crq crq;
- u64 *buf = (u64 *) &crq;
int rc;
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;
-
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
- cpu_to_be64(buf[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
@@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
*/
static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
{
- struct ibmvtpm_crq crq;
- u64 *buf = (u64 *) &crq;
int rc;
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_GET_VERSION;
-
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
- cpu_to_be64(buf[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_get_version failed rc=%d\n", rc);
@@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0);
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
@@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
{
int rc;
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0);
+ rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_send_init failed rc=%d\n", rc);
@@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
- struct ibmvtpm_crq crq;
- u64 *buf = (u64 *) &crq;
int rc = 0;
- crq.valid = (u8)IBMVTPM_VALID_CMD;
- crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;
-
- rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
- cpu_to_be64(buf[1]));
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev,
+ IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 3b1b9f9322d5..d8f10047fbba 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
/* check the status-register if wait_for_bit is set */
if (status & 1 << wait_for_bit)
break;
- msleep(TPM_MSLEEP_TIME);
+ tpm_msleep(TPM_MSLEEP_TIME);
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
@@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip)
wait_and_send(chip, TPM_CTRL_WTX);
wait_and_send(chip, 0x00);
wait_and_send(chip, 0x00);
- msleep(TPM_WTX_MSLEEP_TIME);
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
}
static void tpm_wtx_abort(struct tpm_chip *chip)
@@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip)
wait_and_send(chip, 0x00);
wait_and_send(chip, 0x00);
number_of_wtx = 0;
- msleep(TPM_WTX_MSLEEP_TIME);
+ tpm_msleep(TPM_WTX_MSLEEP_TIME);
}
static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index b617b2eeb080..63bc6c3b949e 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l)
if (access & TPM_ACCESS_VALID)
return 0;
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -1;
}
@@ -117,7 +117,7 @@ again:
do {
if (check_locality(chip, l))
return l;
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
}
return -1;
@@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip)
burstcnt = (value >> 8) & 0xFFFF;
if (burstcnt)
return burstcnt;
- msleep(TPM_TIMEOUT);
+ tpm_msleep(TPM_TIMEOUT);
} while (time_before(jiffies, stop));
return -EBUSY;
}
@@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
priv->irq = irq;
chip->flags |= TPM_CHIP_FLAG_IRQ;
if (!priv->irq_tested)
- msleep(1);
+ tpm_msleep(1);
if (!priv->irq_tested)
disable_interrupts(chip);
priv->irq_tested = true;
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 2ff64d9d4fb3..62d24690ba02 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void)
return -readl(sched_clk_base + TIMER_VALUE);
}
-static int integrator_clocksource_init(unsigned long inrate,
- void __iomem *base)
+static int __init integrator_clocksource_init(unsigned long inrate,
+ void __iomem *base)
{
u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
unsigned long rate = inrate;
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 7080c384ad5d..52a75053ee03 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -104,13 +104,13 @@ static int __init arm_idle_init(void)
ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
if (ret <= 0) {
ret = ret ? : -ENODEV;
- goto out_fail;
+ goto init_fail;
}
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("Failed to register cpuidle driver\n");
- goto out_fail;
+ goto init_fail;
}
/*
@@ -149,6 +149,8 @@ static int __init arm_idle_init(void)
}
return 0;
+init_fail:
+ kfree(drv);
out_fail:
while (--cpu >= 0) {
dev = per_cpu(cpuidle_devices, cpu);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e36aeacd7635..1eb852765469 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,7 @@
config CRYPTO_DEV_FSL_CAAM
tristate "Freescale CAAM-Multicore driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
+ select SOC_BUS
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
-config CRYPTO_DEV_FSL_CAAM_IMX
- def_bool SOC_IMX6 || SOC_IMX7D
- depends on CRYPTO_DEV_FSL_CAAM
-
config CRYPTO_DEV_FSL_CAAM_DEBUG
bool "Enable debug output in CAAM driver"
depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index dacb53fb690e..027e121c6f70 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/sys_soc.h>
#include "compat.h"
#include "regs.h"
@@ -19,6 +20,8 @@ bool caam_little_end;
EXPORT_SYMBOL(caam_little_end);
bool caam_dpaa2;
EXPORT_SYMBOL(caam_dpaa2);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
#ifdef CONFIG_CAAM_QI
#include "qi.h"
@@ -28,19 +31,11 @@ EXPORT_SYMBOL(caam_dpaa2);
* i.MX targets tend to have clock control subsystems that can
* enable/disable clocking to our device.
*/
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
static inline struct clk *caam_drv_identify_clk(struct device *dev,
char *clk_name)
{
- return devm_clk_get(dev, clk_name);
+ return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
}
-#else
-static inline struct clk *caam_drv_identify_clk(struct device *dev,
- char *clk_name)
-{
- return NULL;
-}
-#endif
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
@@ -430,6 +425,10 @@ static int caam_probe(struct platform_device *pdev)
{
int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
u64 caam_id;
+ static const struct soc_device_attribute imx_soc[] = {
+ {.family = "Freescale i.MX"},
+ {},
+ };
struct device *dev;
struct device_node *nprop, *np;
struct caam_ctrl __iomem *ctrl;
@@ -451,6 +450,8 @@ static int caam_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ctrlpriv);
nprop = pdev->dev.of_node;
+ caam_imx = (bool)soc_device_match(imx_soc);
+
/* Enable clocking */
clk = caam_drv_identify_clk(&pdev->dev, "ipg");
if (IS_ERR(clk)) {
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 2b5efff9ec3c..17cfd23a38fa 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -67,6 +67,7 @@
*/
extern bool caam_little_end;
+extern bool caam_imx;
#define caam_to_cpu(len) \
static inline u##len caam##len ## _to_cpu(u##len val) \
@@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg)
#else /* CONFIG_64BIT */
static inline void wr_reg64(void __iomem *reg, u64 data)
{
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
- if (caam_little_end) {
+ if (!caam_imx && caam_little_end) {
wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
wr_reg32((u32 __iomem *)(reg), data);
- } else
-#endif
- {
+ } else {
wr_reg32((u32 __iomem *)(reg), data >> 32);
wr_reg32((u32 __iomem *)(reg) + 1, data);
}
@@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
static inline u64 rd_reg64(void __iomem *reg)
{
-#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
- if (caam_little_end)
+ if (!caam_imx && caam_little_end)
return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
(u64)rd_reg32((u32 __iomem *)(reg)));
- else
-#endif
- return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
- (u64)rd_reg32((u32 __iomem *)(reg) + 1));
+
+ return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
+ (u64)rd_reg32((u32 __iomem *)(reg) + 1));
}
#endif /* CONFIG_64BIT */
+static inline u64 cpu_to_caam_dma64(dma_addr_t value)
+{
+ if (caam_imx)
+ return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
+ (u64)cpu_to_caam32(upper_32_bits(value)));
+
+ return cpu_to_caam64(value);
+}
+
+static inline u64 caam_dma64_to_cpu(u64 value)
+{
+ if (caam_imx)
+ return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
+ (u64)caam32_to_cpu(upper_32_bits(value)));
+
+ return caam64_to_cpu(value);
+}
+
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
-#ifdef CONFIG_SOC_IMX7D
-#define cpu_to_caam_dma(value) \
- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
- (u64)cpu_to_caam32(upper_32_bits(value)))
-#define caam_dma_to_cpu(value) \
- (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
- (u64)caam32_to_cpu(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma(value) cpu_to_caam64(value)
-#define caam_dma_to_cpu(value) caam64_to_cpu(value)
-#endif /* CONFIG_SOC_IMX7D */
+#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
+#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
#else
#define cpu_to_caam_dma(value) cpu_to_caam32(value)
#define caam_dma_to_cpu(value) caam32_to_cpu(value)
-#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
-
-#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
-#define cpu_to_caam_dma64(value) \
- (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
- (u64)cpu_to_caam32(upper_32_bits(value)))
-#else
-#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
-#endif
+#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
/*
* jr_outentry
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d2207ac5ba19..5438552bc6d7 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct skcipher_request req;
- struct safexcel_inv_result result = { 0 };
+ struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct skcipher_request));
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 3f819399cd95..3980f946874f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->priv;
struct ahash_request req;
- struct safexcel_inv_result result = { 0 };
+ struct safexcel_inv_result result = {};
int ring = ctx->base.ring;
memset(&req, 0, sizeof(struct ahash_request));
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 79791c690858..dff88838dce7 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
req_ctx->swinit = 0;
} else {
desc->ptr[1] = zero_entry;
- /* Indicate next op is not the first. */
- req_ctx->first = 0;
}
+ /* Indicate next op is not the first. */
+ req_ctx->first = 0;
/* HMAC key */
if (ctx->keylen)
@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
sg_count = edesc->src_nents ?: 1;
if (is_sec1 && sg_count > 1)
- sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
+ sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
else
sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
DMA_TO_DEVICE);
@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
t_alg->algt.alg.hash.final = ahash_final;
t_alg->algt.alg.hash.finup = ahash_finup;
t_alg->algt.alg.hash.digest = ahash_digest;
- t_alg->algt.alg.hash.setkey = ahash_setkey;
+ if (!strncmp(alg->cra_name, "hmac", 4))
+ t_alg->algt.alg.hash.setkey = ahash_setkey;
t_alg->algt.alg.hash.import = ahash_import;
t_alg->algt.alg.hash.export = ahash_export;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 681b639f5133..0649dd43e780 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -183,7 +183,7 @@ static void uninitialize(struct kernel_queue *kq)
{
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
kq->mqd->destroy_mqd(kq->mqd,
- NULL,
+ kq->queue->mqd,
false,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
kq->queue->pipe,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 1cae95e2b13a..03bec765b03d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int num_queues = 0;
struct queue *cur;
- memset(&q_properties, 0, sizeof(struct queue_properties));
memcpy(&q_properties, properties, sizeof(struct queue_properties));
q = NULL;
kq = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 730b8d9db187..6be5b53c3b27 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b1f7299600f0..e651a58c18cf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -168,23 +168,19 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- int old_dpms = connector->dpms;
-
- if (connector->funcs->dpms)
- connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
-
- /* Set the old mode back to the connector for resume */
- connector->dpms = old_dpms;
+ drm_kms_helper_poll_disable(drm_dev);
+ exynos_drm_fbdev_suspend(drm_dev);
+ private->suspend_state = drm_atomic_helper_suspend(drm_dev);
+ if (IS_ERR(private->suspend_state)) {
+ exynos_drm_fbdev_resume(drm_dev);
+ drm_kms_helper_poll_enable(drm_dev);
+ return PTR_ERR(private->suspend_state);
}
- drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -192,22 +188,14 @@ static int exynos_drm_suspend(struct device *dev)
static int exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ struct exynos_drm_private *private = drm_dev->dev_private;
if (pm_runtime_suspended(dev) || !drm_dev)
return 0;
- drm_connector_list_iter_begin(drm_dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->funcs->dpms) {
- int dpms = connector->dpms;
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- connector->funcs->dpms(connector, dpms);
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ drm_atomic_helper_resume(drm_dev, private->suspend_state);
+ exynos_drm_fbdev_resume(drm_dev);
+ drm_kms_helper_poll_enable(drm_dev);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cf131c2aa23e..f8bae4cb4823 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -202,6 +202,7 @@ struct drm_exynos_file_private {
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
+ struct drm_atomic_state *suspend_state;
struct device *dma_dev;
void *mapping;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index c3a068409b48..dfb66ecf417b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -18,6 +18,8 @@
#include <drm/drm_crtc_helper.h>
#include <drm/exynos_drm.h>
+#include <linux/console.h>
+
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_fbdev.h"
@@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(fb_helper);
}
+
+void exynos_drm_fbdev_suspend(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ console_lock();
+ drm_fb_helper_set_suspend(private->fb_helper, 1);
+ console_unlock();
+}
+
+void exynos_drm_fbdev_resume(struct drm_device *dev)
+{
+ struct exynos_drm_private *private = dev->dev_private;
+
+ console_lock();
+ drm_fb_helper_set_suspend(private->fb_helper, 0);
+ console_unlock();
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 330eef87f718..645d1bb7f665 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev);
void exynos_drm_fbdev_fini(struct drm_device *dev);
void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
void exynos_drm_output_poll_changed(struct drm_device *dev);
+void exynos_drm_fbdev_suspend(struct drm_device *drm);
+void exynos_drm_fbdev_resume(struct drm_device *drm);
#else
@@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
#define exynos_drm_output_poll_changed (NULL)
+static inline void exynos_drm_fbdev_suspend(struct drm_device *drm)
+{
+}
+
+static inline void exynos_drm_fbdev_resume(struct drm_device *drm)
+{
+}
+
#endif
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 214fa5e51963..0109ff40b1db 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct drm_display_mode *m;
+ struct drm_connector_list_iter conn_iter;
int mode_ok;
drm_mode_set_crtcinfo(adjusted_mode, 0);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->encoder == encoder)
break;
}
+ if (connector)
+ drm_connector_get(connector);
+ drm_connector_list_iter_end(&conn_iter);
- if (connector->encoder != encoder)
+ if (!connector)
return true;
mode_ok = hdmi_mode_valid(connector, adjusted_mode);
if (mode_ok == MODE_OK)
- return true;
+ goto cleanup;
/*
* Find the most suitable mode and copy it to adjusted_mode.
@@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
}
}
+cleanup:
+ drm_connector_put(connector);
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 40af17ec6312..ff3154fe6588 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int bar_index =
- (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
u32 new = *(u32 *)(p_data);
bool lo = IS_ALIGNED(offset, 8);
u64 size;
int ret = 0;
bool mmio_enabled =
vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
+ struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
- if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX))
- return -EINVAL;
-
+ /*
+ * Power-up software can determine how much address
+ * space the device requires by writing a value of
+ * all 1's to the register and then reading the value
+ * back. The device will return 0's in all don't-care
+ * address bits.
+ */
if (new == 0xffffffff) {
- /*
- * Power-up software can determine how much address
- * space the device requires by writing a value of
- * all 1's to the register and then reading the value
- * back. The device will return 0's in all don't-care
- * address bits.
- */
- size = vgpu->cfg_space.bar[bar_index].size;
- if (lo) {
- new = rounddown(new, size);
- } else {
- u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
- /* for 32bit mode bar it returns all-0 in upper 32
- * bit, for 64bit mode bar it will calculate the
- * size with lower 32bit and return the corresponding
- * value
+ switch (offset) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
+ intel_vgpu_write_pci_bar(vgpu, offset,
+ size >> (lo ? 0 : 32), lo);
+ /*
+ * Untrap the BAR, since guest hasn't configured a
+ * valid GPA
*/
- if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
- new &= (~(size-1)) >> 32;
- else
- new = 0;
- }
- /*
- * Unmapp & untrap the BAR, since guest hasn't configured a
- * valid GPA
- */
- switch (bar_index) {
- case INTEL_GVT_PCI_BAR_GTTMMIO:
ret = trap_gttmmio(vgpu, false);
break;
- case INTEL_GVT_PCI_BAR_APERTURE:
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
+ intel_vgpu_write_pci_bar(vgpu, offset,
+ size >> (lo ? 0 : 32), lo);
ret = map_aperture(vgpu, false);
break;
+ default:
+ /* Unimplemented BARs */
+ intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
}
- intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
} else {
- /*
- * Unmapp & untrap the old BAR first, since guest has
- * re-configured the BAR
- */
- switch (bar_index) {
- case INTEL_GVT_PCI_BAR_GTTMMIO:
- ret = trap_gttmmio(vgpu, false);
+ switch (offset) {
+ case PCI_BASE_ADDRESS_0:
+ case PCI_BASE_ADDRESS_1:
+ /*
+ * Untrap the old BAR first, since guest has
+ * re-configured the BAR
+ */
+ trap_gttmmio(vgpu, false);
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ ret = trap_gttmmio(vgpu, mmio_enabled);
break;
- case INTEL_GVT_PCI_BAR_APERTURE:
- ret = map_aperture(vgpu, false);
+ case PCI_BASE_ADDRESS_2:
+ case PCI_BASE_ADDRESS_3:
+ map_aperture(vgpu, false);
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
+ ret = map_aperture(vgpu, mmio_enabled);
break;
- }
- intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
- /* Track the new BAR */
- if (mmio_enabled) {
- switch (bar_index) {
- case INTEL_GVT_PCI_BAR_GTTMMIO:
- ret = trap_gttmmio(vgpu, true);
- break;
- case INTEL_GVT_PCI_BAR_APERTURE:
- ret = map_aperture(vgpu, true);
- break;
- }
+ default:
+ intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
}
}
return ret;
@@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
switch (rounddown(offset, 4)) {
- case PCI_BASE_ADDRESS_0:
- case PCI_BASE_ADDRESS_1:
- case PCI_BASE_ADDRESS_2:
- case PCI_BASE_ADDRESS_3:
+ case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
if (WARN_ON(!IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
@@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
struct intel_gvt *gvt = vgpu->gvt;
const struct intel_gvt_device_info *info = &gvt->device_info;
u16 *gmch_ctl;
- int i;
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
info->cfg_space_size);
@@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
*/
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
- for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
- vgpu->cfg_space.bar[i].size = pci_resource_len(
- gvt->dev_priv->drm.pdev, i * 2);
- vgpu->cfg_space.bar[i].tracked = false;
- }
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
+ pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+ vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
+ pci_resource_len(gvt->dev_priv->drm.pdev, 2);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e21ce9c18b6e..b63893eeca73 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
pipe);
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
- bool in_vbl = true;
unsigned long irqflags;
if (WARN_ON(!mode->crtc_clock)) {
@@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
- in_vbl = position >= vbl_start && position < vbl_end;
-
/*
* While in vblank, position will be negative
* counting up towards 0 at vbl_end. And outside
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f17275519484..00cd17c76fdc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14030,7 +14030,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
DRM_DEBUG_KMS("bad plane %d handle\n", i);
- return -EINVAL;
+ goto err;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index f0c11aec5ea5..7442891762be 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
@@ -903,15 +901,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
intel_panel_disable_backlight(old_conn_state);
/*
- * Disable Device ready before the port shutdown in order
- * to avoid split screen
- */
- if (IS_BROXTON(dev_priv)) {
- for_each_dsi_port(port, intel_dsi->ports)
- I915_WRITE(MIPI_DEVICE_READY(port), 0);
- }
-
- /*
* According to the spec we should send SHUTDOWN before
* MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
* has shown that the v3 sequence works for v2 VBTs too
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a17b1de7d7e0..3b1c5d783ee7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bxt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
@@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
if (!panel->backlight.max)
return -ENODEV;
+ panel->backlight.min = get_backlight_min_vbt(connector);
+
val = bxt_get_backlight(connector);
val = intel_panel_compute_brightness(connector, val);
panel->backlight.level = clamp(val, panel->backlight.min,
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index ee9e27dc799b..de57d6c11a25 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
*/
if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
if (attr.qp_state >= IB_QPS_INIT) {
- if (qp->device->get_link_layer(qp->device, attr.port_num) !=
+ if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
IB_LINK_LAYER_INFINIBAND)
return true;
goto lid_check;
@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
/* Can't get a quick answer, iterate over all ports */
for (port = 0; port < qp->device->phys_port_cnt; port++)
- if (qp->device->get_link_layer(qp->device, port) !=
+ if (rdma_port_get_link_layer(qp->device, port) !=
IB_LINK_LAYER_INFINIBAND)
num_eth_ports++;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b3ad37fec578..ecbac91b2e14 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -93,11 +93,13 @@ struct bnxt_re_dev {
struct ib_device ibdev;
struct list_head list;
unsigned long flags;
-#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
-#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
-#define BNXT_RE_FLAG_GOT_MSIX 2
-#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8
-#define BNXT_RE_FLAG_QOS_WORK_REG 16
+#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
+#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
+#define BNXT_RE_FLAG_GOT_MSIX 2
+#define BNXT_RE_FLAG_HAVE_L2_REF 3
+#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
+#define BNXT_RE_FLAG_QOS_WORK_REG 5
+#define BNXT_RE_FLAG_TASK_IN_PROG 6
struct net_device *netdev;
unsigned int version, major, minor;
struct bnxt_en_dev *en_dev;
@@ -108,6 +110,8 @@ struct bnxt_re_dev {
struct delayed_work worker;
u8 cur_prio_map;
+ u8 active_speed;
+ u8 active_width;
/* FP Notification Queue (CQ & SRQ) */
struct tasklet_struct nq_task;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 01eee15bbd65..0d89621d9fe8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
port_attr->sm_sl = 0;
port_attr->subnet_timeout = 0;
port_attr->init_type_reply = 0;
- /* call the underlying netdev's ethtool hooks to query speed settings
- * for which we acquire rtnl_lock _only_ if it's registered with
- * IB stack to avoid race in the NETDEV_UNREG path
- */
- if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
- if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
- &port_attr->active_width))
- return -EINVAL;
+ port_attr->active_speed = rdev->active_speed;
+ port_attr->active_width = rdev->active_width;
+
return 0;
}
@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid *gid_to_del;
/* Delete the entry from the hardware */
ctx = *context;
@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
if (sgid_tbl && sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
+ gid_to_del = &sgid_tbl->tbl[ctx->idx];
+ /* DEL_GID is called in WQ context(netdevice_event_work_handler)
+ * or via the ib_unregister_device path. In the former case QP1
+ * may not be destroyed yet, in which case just return as FW
+ * needs that entry to be present and will fail it's deletion.
+ * We could get invoked again after QP1 is destroyed OR get an
+ * ADD_GID call with a different GID value for the same index
+ * where we issue MODIFY_GID cmd to update the GID entry -- TBD
+ */
+ if (ctx->idx == 0 &&
+ rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
+ ctx->refcnt == 1 && rdev->qp1_sqp) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Trying to delete GID0 while QP1 is alive\n");
+ return -EFAULT;
+ }
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid(sgid_tbl,
- &sgid_tbl->tbl[ctx->idx],
- true);
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
kfree(rdev->sqp_ah);
kfree(rdev->qp1_sqp);
+ rdev->qp1_sqp = NULL;
+ rdev->sqp_ah = NULL;
}
if (!IS_ERR_OR_NULL(qp->rumem))
@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
+ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
} else if (qp_attr->qp_state == IB_QPS_RTR) {
qp->qplib_qp.modify_flags |=
CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
qp->qplib_qp.path_mtu =
__from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
+ qp->qplib_qp.mtu =
+ ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
}
if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
- struct bnxt_qplib_qp qplib_qp;
+ struct bnxt_qplib_qp *qplib_qp;
int rc;
- memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp));
- qplib_qp.id = qp->qplib_qp.id;
- qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
+ qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
+ if (!qplib_qp)
+ return -ENOMEM;
+
+ qplib_qp->id = qp->qplib_qp.id;
+ qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
- rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp);
+ rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
- return rc;
+ goto out;
}
- qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state);
- qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0;
- qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access);
- qp_attr->pkey_index = qplib_qp.pkey_index;
- qp_attr->qkey = qplib_qp.qkey;
+ qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
+ qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
+ qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
+ qp_attr->pkey_index = qplib_qp->pkey_index;
+ qp_attr->qkey = qplib_qp->qkey;
qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label,
- qplib_qp.ah.host_sgid_index,
- qplib_qp.ah.hop_limit,
- qplib_qp.ah.traffic_class);
- rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data);
- rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl);
- ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac);
- qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu);
- qp_attr->timeout = qplib_qp.timeout;
- qp_attr->retry_cnt = qplib_qp.retry_cnt;
- qp_attr->rnr_retry = qplib_qp.rnr_retry;
- qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer;
- qp_attr->rq_psn = qplib_qp.rq.psn;
- qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic;
- qp_attr->sq_psn = qplib_qp.sq.psn;
- qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic;
- qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR :
- IB_SIGNAL_REQ_WR;
- qp_attr->dest_qp_num = qplib_qp.dest_qpn;
+ rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
+ qplib_qp->ah.host_sgid_index,
+ qplib_qp->ah.hop_limit,
+ qplib_qp->ah.traffic_class);
+ rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
+ rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
+ ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
+ qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
+ qp_attr->timeout = qplib_qp->timeout;
+ qp_attr->retry_cnt = qplib_qp->retry_cnt;
+ qp_attr->rnr_retry = qplib_qp->rnr_retry;
+ qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+ qp_attr->rq_psn = qplib_qp->rq.psn;
+ qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
+ qp_attr->sq_psn = qplib_qp->sq.psn;
+ qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
+ qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
+ IB_SIGNAL_REQ_WR;
+ qp_attr->dest_qp_num = qplib_qp->dest_qpn;
qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
qp_init_attr->cap = qp_attr->cap;
- return 0;
+out:
+ kfree(qplib_qp);
+ return rc;
}
/* Routine for sending QP1 packets for RoCE V1 an V2
@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
+ wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
wqe->atomic.swap_data = atomic_wr(wr)->swap;
break;
case IB_WR_ATOMIC_FETCH_AND_ADD:
@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
return rc;
}
- if (mr->npages && mr->pages) {
+ if (mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl);
kfree(mr->pages);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 82d1cbc27aee..e7450ea92aa9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
}
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+ ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+ &rdev->active_width);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work)
else if (netif_carrier_ok(rdev->netdev))
bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
IB_EVENT_PORT_ACTIVE);
+ ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
+ &rdev->active_width);
break;
default:
break;
}
+ smp_mb__before_atomic();
+ clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
kfree(re_work);
}
@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
break;
case NETDEV_UNREGISTER:
+ /* netdev notifier will call NETDEV_UNREGISTER again later since
+ * we are still holding the reference to the netdev
+ */
+ if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
+ goto exit;
bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task);
+ set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
queue_work(bnxt_re_wq, &re_work->work);
}
}
@@ -1375,6 +1387,22 @@ err_netdev:
static void __exit bnxt_re_mod_exit(void)
{
+ struct bnxt_re_dev *rdev;
+ LIST_HEAD(to_be_deleted);
+
+ mutex_lock(&bnxt_re_dev_lock);
+ /* Free all adapter allocated resources */
+ if (!list_empty(&bnxt_re_dev_list))
+ list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
+ mutex_unlock(&bnxt_re_dev_lock);
+
+ list_for_each_entry(rdev, &to_be_deleted, list) {
+ dev_info(rdev_to_dev(rdev), "Unregistering Device");
+ bnxt_re_dev_stop(rdev);
+ bnxt_re_ib_unreg(rdev, true);
+ bnxt_re_remove_one(rdev);
+ bnxt_re_dev_unreg(rdev);
+ }
unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
if (bnxt_re_wq)
destroy_workqueue(bnxt_re_wq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 391bb7006e8f..2bdb1562bd21 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
return -EINVAL;
}
+ if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
+ return -ETIMEDOUT;
+
/* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe
*/
@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
/* timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
+ set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 0ed312f17c8d..85b16da287f9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw {
unsigned long *cmdq_bitmap;
u32 bmap_size;
unsigned long flags;
-#define FIRMWARE_INITIALIZED_FLAG 1
+#define FIRMWARE_INITIALIZED_FLAG BIT(0)
#define FIRMWARE_FIRST_FLAG BIT(31)
+#define FIRMWARE_TIMED_OUT BIT(3)
wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index ceaa2fa54d32..daf7a56e5d7e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_TID(rpl);
struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
+ if (!ep) {
+ pr_debug("%s stid %d lookup failure!\n", __func__, stid);
+ goto out;
+ }
pr_debug("%s ep %p\n", __func__, ep);
c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
c4iw_put_ep(&ep->com);
+out:
return 0;
}
@@ -2594,9 +2599,9 @@ fail:
c4iw_put_ep(&child_ep->com);
reject:
reject_cr(dev, hwtid, skb);
+out:
if (parent_ep)
c4iw_put_ep(&parent_ep->com);
-out:
return 0;
}
@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = ep;
goto out;
}
-
+ remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
ep->com.local_addr.ss_family);
fail2:
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 9b1566468744..a65e4cbdce2f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -201,7 +201,6 @@ enum init_completion_state {
CEQ_CREATED,
ILQ_CREATED,
IEQ_CREATED,
- INET_NOTIFIER,
IP_ADDR_REGISTERED,
RDMA_DEV_REGISTERED
};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 14f36ba4e5be..5230dd3c938c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
}
/**
- * listen_port_in_use - determine if port is in use
- * @port: Listen port number
+ * i40iw_port_in_use - determine if port is in use
+ * @port: port number
+ * @active_side: flag for listener side vs active side
*/
-static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
+static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
{
struct i40iw_cm_listener *listen_node;
+ struct i40iw_cm_node *cm_node;
unsigned long flags;
bool ret = false;
- spin_lock_irqsave(&cm_core->listen_list_lock, flags);
- list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
- if (listen_node->loc_port == port) {
- ret = true;
- break;
+ if (active_side) {
+ /* search connected node list */
+ spin_lock_irqsave(&cm_core->ht_lock, flags);
+ list_for_each_entry(cm_node, &cm_core->connected_nodes, list) {
+ if (cm_node->loc_port == port) {
+ ret = true;
+ break;
+ }
+ }
+ if (!ret)
+ clear_bit(port, cm_core->active_side_ports);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+ } else {
+ spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+ list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+ if (listen_node->loc_port == port) {
+ ret = true;
+ break;
+ }
}
+ spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
- spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
return ret;
}
@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
if (listener->iwdev) {
- if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
+ if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
i40iw_manage_apbvt(listener->iwdev,
listener->loc_port,
I40IW_MANAGE_APBVT_DEL);
@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
if (cm_node->listener) {
i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
} else {
- if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) &&
- cm_node->apbvt_set) {
+ if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
i40iw_manage_apbvt(cm_node->iwdev,
cm_node->loc_port,
I40IW_MANAGE_APBVT_DEL);
- i40iw_get_addr_info(cm_node, &nfo);
- if (cm_node->qhash_set) {
- i40iw_manage_qhash(cm_node->iwdev,
- &nfo,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
- cm_node->qhash_set = 0;
- }
+ cm_node->apbvt_set = 0;
+ }
+ i40iw_get_addr_info(cm_node, &nfo);
+ if (cm_node->qhash_set) {
+ i40iw_manage_qhash(cm_node->iwdev,
+ &nfo,
+ I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_DELETE,
+ NULL,
+ false);
+ cm_node->qhash_set = 0;
}
}
@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
tcp_info->insert_vlan_tag = true;
- tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
+ tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
+ cm_node->vlan_id);
}
if (cm_node->ipv4) {
tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct sockaddr_in *raddr;
struct sockaddr_in6 *laddr6;
struct sockaddr_in6 *raddr6;
- bool qhash_set = false;
- int apbvt_set = 0;
- int err = 0;
- enum i40iw_status_code status;
+ int ret = 0;
+ unsigned long flags;
ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_info.user_pri = rt_tos2priority(cm_id->tos);
i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
__func__, cm_id->tos, cm_info.user_pri);
- if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
- (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
- raddr6->sin6_addr.in6_u.u6_addr32,
- sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
- status = i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_ADD,
- NULL,
- true);
- if (status)
- return -EINVAL;
- qhash_set = true;
- }
- status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
- if (status) {
- i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
- return -EINVAL;
- }
-
- apbvt_set = 1;
cm_id->add_ref(cm_id);
cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
conn_param->private_data_len,
@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
&cm_info);
if (IS_ERR(cm_node)) {
- err = PTR_ERR(cm_node);
- goto err_out;
+ ret = PTR_ERR(cm_node);
+ cm_id->rem_ref(cm_id);
+ return ret;
+ }
+
+ if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
+ (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
+ raddr6->sin6_addr.in6_u.u6_addr32,
+ sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
+ if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+ I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ cm_node->qhash_set = true;
}
+ spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
+ if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ } else {
+ spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
+ }
+
+ cm_node->apbvt_set = true;
i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
!cm_node->ord_size)
cm_node->ord_size = 1;
- cm_node->apbvt_set = apbvt_set;
- cm_node->qhash_set = qhash_set;
iwqp->cm_node = cm_node;
cm_node->iwqp = iwqp;
iwqp->cm_id = cm_id;
@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
cm_node->state = I40IW_CM_STATE_SYN_SENT;
- err = i40iw_send_syn(cm_node, 0);
- if (err) {
- i40iw_rem_ref_cm_node(cm_node);
- goto err_out;
- }
+ ret = i40iw_send_syn(cm_node, 0);
+ if (ret)
+ goto err;
}
i40iw_debug(cm_node->dev,
@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
cm_node->rem_port,
cm_node,
cm_node->cm_id);
+
return 0;
-err_out:
+err:
if (cm_info.ipv4)
i40iw_debug(&iwdev->sc_dev,
I40IW_DEBUG_CM,
@@ -3867,22 +3879,10 @@ err_out:
"Api - connect() FAILED: dest addr=%pI6",
cm_info.rem_addr);
- if (qhash_set)
- i40iw_manage_qhash(iwdev,
- &cm_info,
- I40IW_QHASH_TYPE_TCP_ESTABLISHED,
- I40IW_QHASH_MANAGE_TYPE_DELETE,
- NULL,
- false);
-
- if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
- cm_info.loc_port))
- i40iw_manage_apbvt(iwdev,
- cm_info.loc_port,
- I40IW_MANAGE_APBVT_DEL);
+ i40iw_rem_ref_cm_node(cm_node);
cm_id->rem_ref(cm_id);
iwdev->cm_core.stats_connect_errs++;
- return err;
+ return ret;
}
/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 2e52e38ffcf3..45abef76295b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -71,6 +71,9 @@
#define I40IW_HW_IRD_SETTING_32 32
#define I40IW_HW_IRD_SETTING_64 64
+#define MAX_PORTS 65536
+#define I40IW_VLAN_PRIO_SHIFT 13
+
enum ietf_mpa_flags {
IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
@@ -411,6 +414,8 @@ struct i40iw_cm_core {
spinlock_t ht_lock; /* manage hash table */
spinlock_t listen_list_lock; /* listen list */
+ unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
+
u64 stats_nodes_created;
u64 stats_nodes_destroyed;
u64 stats_listen_created;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index cc742c3132c6..27590ae21881 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = {
.notifier_call = i40iw_net_event
};
-static atomic_t i40iw_notifiers_registered;
-
/**
* i40iw_find_i40e_handler - find a handler given a client info
* @ldev: pointer to a client info
@@ -1376,11 +1374,20 @@ error:
*/
static void i40iw_register_notifiers(void)
{
- if (atomic_inc_return(&i40iw_notifiers_registered) == 1) {
- register_inetaddr_notifier(&i40iw_inetaddr_notifier);
- register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
- register_netevent_notifier(&i40iw_net_notifier);
- }
+ register_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+ register_netevent_notifier(&i40iw_net_notifier);
+}
+
+/**
+ * i40iw_unregister_notifiers - unregister tcp ip notifiers
+ */
+
+static void i40iw_unregister_notifiers(void)
+{
+ unregister_netevent_notifier(&i40iw_net_notifier);
+ unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
+ unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
}
/**
@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
u32 i;
u32 size;
+ if (!ldev->msix_count) {
+ i40iw_pr_err("No MSI-X vectors\n");
+ return I40IW_ERR_CONFIG;
+ }
+
iwdev->msix_count = ldev->msix_count;
size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
if (!iwdev->reset)
i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
/* fallthrough */
- case INET_NOTIFIER:
- if (!atomic_dec_return(&i40iw_notifiers_registered)) {
- unregister_netevent_notifier(&i40iw_net_notifier);
- unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
- unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
- }
/* fallthrough */
case PBLE_CHUNK_MEM:
i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
status = i40iw_save_msix_info(iwdev, ldev);
if (status)
- goto exit;
+ return status;
iwdev->hw.dev_context = (void *)ldev->pcidev;
iwdev->hw.hw_addr = ldev->hw_addr;
status = i40iw_allocate_dma_mem(&iwdev->hw,
@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
break;
iwdev->init_state = PBLE_CHUNK_MEM;
iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
- i40iw_register_notifiers();
- iwdev->init_state = INET_NOTIFIER;
status = i40iw_add_mac_ip(iwdev);
if (status)
break;
@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void)
i40iw_client.type = I40E_CLIENT_IWARP;
spin_lock_init(&i40iw_handler_lock);
ret = i40e_register_client(&i40iw_client);
+ i40iw_register_notifiers();
+
return ret;
}
@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void)
*/
static void __exit i40iw_exit_module(void)
{
+ i40iw_unregister_notifiers();
i40e_unregister_client(&i40iw_client);
}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 62f1f45b8737..e52dbbb4165e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
return NOTIFY_DONE;
iwdev = &hdl->device;
- if (iwdev->init_state < INET_NOTIFIER)
+ if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE;
netdev = iwdev->ldev->netdev;
@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
return NOTIFY_DONE;
iwdev = &hdl->device;
- if (iwdev->init_state < INET_NOTIFIER)
+ if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE;
netdev = iwdev->ldev->netdev;
@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
if (!iwhdl)
return NOTIFY_DONE;
iwdev = &iwhdl->device;
- if (iwdev->init_state < INET_NOTIFIER)
+ if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
return NOTIFY_DONE;
p = (__be32 *)neigh->primary_key;
i40iw_copy_ip_ntohl(local_ipaddr, p);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1aa411034a27..28b3d02d511b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -1027,7 +1027,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
iwqp->last_aeq = I40IW_AE_RESET_SENT;
spin_unlock_irqrestore(&iwqp->lock, flags);
+ i40iw_cm_disconn(iwqp);
}
+ } else {
+ spin_lock_irqsave(&iwqp->lock, flags);
+ if (iwqp->cm_id) {
+ if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+ iwqp->cm_id->add_ref(iwqp->cm_id);
+ i40iw_schedule_cm_timer(iwqp->cm_node,
+ (struct i40iw_puda_buf *)iwqp,
+ I40IW_TIMER_TYPE_CLOSE, 1, 0);
+ }
+ }
+ spin_unlock_irqrestore(&iwqp->lock, flags);
}
}
return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ab3c562d5ba7..05fb4bdff6a0 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if (!dbg)
return -ENOMEM;
+ dev->delay_drop.dbg = dbg;
+
dbg->dir_debugfs =
debugfs_create_dir("delay_drop",
dev->mdev->priv.dbg_root);
if (!dbg->dir_debugfs)
- return -ENOMEM;
+ goto out_debugfs;
dbg->events_cnt_debugfs =
debugfs_create_atomic_t("num_timeout_events", 0400,
@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if (!dbg->timeout_debugfs)
goto out_debugfs;
- dev->delay_drop.dbg = dbg;
-
return 0;
out_debugfs:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index dcb5942f9fb5..65b166cc7437 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status)
case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
err_num = -EAGAIN;
break;
+ default:
+ err_num = -EFAULT;
}
+ break;
default:
err_num = -EFAULT;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 663a0c301c43..984aa3484928 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib(
return (enum ib_wc_status)status;
}
-static inline int pvrdma_wc_opcode_to_ib(int opcode)
-{
- return opcode;
+static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
+{
+ switch (opcode) {
+ case PVRDMA_WC_SEND:
+ return IB_WC_SEND;
+ case PVRDMA_WC_RDMA_WRITE:
+ return IB_WC_RDMA_WRITE;
+ case PVRDMA_WC_RDMA_READ:
+ return IB_WC_RDMA_READ;
+ case PVRDMA_WC_COMP_SWAP:
+ return IB_WC_COMP_SWAP;
+ case PVRDMA_WC_FETCH_ADD:
+ return IB_WC_FETCH_ADD;
+ case PVRDMA_WC_LOCAL_INV:
+ return IB_WC_LOCAL_INV;
+ case PVRDMA_WC_FAST_REG_MR:
+ return IB_WC_REG_MR;
+ case PVRDMA_WC_MASKED_COMP_SWAP:
+ return IB_WC_MASKED_COMP_SWAP;
+ case PVRDMA_WC_MASKED_FETCH_ADD:
+ return IB_WC_MASKED_FETCH_ADD;
+ case PVRDMA_WC_RECV:
+ return IB_WC_RECV;
+ case PVRDMA_WC_RECV_RDMA_WITH_IMM:
+ return IB_WC_RECV_RDMA_WITH_IMM;
+ default:
+ return IB_WC_SEND;
+ }
}
static inline int pvrdma_wc_flags_to_ib(int flags)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 14b62f7472b4..7774654c2ccb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_neigh *neigh;
- if (wc->status != IB_WC_RNR_RETRY_EXC_ERR)
- ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
+ * so don't make waves.
+ */
+ if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
+ wc->status == IB_WC_RETRY_EXC_ERR)
+ ipoib_dbg(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
else
- ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n",
- wc->status, wr_id, wc->vendor_err);
+ ipoib_warn(priv,
+ "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
+ __func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 8f2042432c85..66a46c84e28f 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
EXPORT_SYMBOL_GPL(input_ff_erase);
/*
- * flush_effects - erase all effects owned by a file handle
+ * input_ff_flush - erase all effects owned by a file handle
+ * @dev: input device to erase effect from
+ * @file: purported owner of the effects
+ *
+ * This function erases all force-feedback effects associated with
+ * the given owner from specified device. Note that @file may be %NULL,
+ * in which case all effects will be erased.
*/
-static int flush_effects(struct input_dev *dev, struct file *file)
+int input_ff_flush(struct input_dev *dev, struct file *file)
{
struct ff_device *ff = dev->ff;
int i;
@@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file)
return 0;
}
+EXPORT_SYMBOL_GPL(input_ff_flush);
/**
* input_ff_event() - generic handler for force-feedback events
@@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
mutex_init(&ff->mutex);
dev->ff = ff;
- dev->flush = flush_effects;
+ dev->flush = input_ff_flush;
dev->event = input_ff_event;
__set_bit(EV_FF, dev->evbit);
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 022be0e22eba..443151de90c6 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev,
uinput_request_alloc_id(udev, request));
}
-static void uinput_request_done(struct uinput_device *udev,
- struct uinput_request *request)
+static void uinput_request_release_slot(struct uinput_device *udev,
+ unsigned int id)
{
/* Mark slot as available */
- udev->requests[request->id] = NULL;
- wake_up(&udev->requests_waitq);
+ spin_lock(&udev->requests_lock);
+ udev->requests[id] = NULL;
+ spin_unlock(&udev->requests_lock);
- complete(&request->done);
+ wake_up(&udev->requests_waitq);
}
static int uinput_request_send(struct uinput_device *udev,
@@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev,
static int uinput_request_submit(struct uinput_device *udev,
struct uinput_request *request)
{
- int error;
+ int retval;
- error = uinput_request_reserve_slot(udev, request);
- if (error)
- return error;
+ retval = uinput_request_reserve_slot(udev, request);
+ if (retval)
+ return retval;
- error = uinput_request_send(udev, request);
- if (error) {
- uinput_request_done(udev, request);
- return error;
- }
+ retval = uinput_request_send(udev, request);
+ if (retval)
+ goto out;
wait_for_completion(&request->done);
- return request->retval;
+ retval = request->retval;
+
+ out:
+ uinput_request_release_slot(udev, request->id);
+ return retval;
}
/*
@@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev)
request = udev->requests[i];
if (request) {
request->retval = -ENODEV;
- uinput_request_done(udev, request);
+ complete(&request->done);
}
}
@@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id)
return uinput_request_submit(udev, &request);
}
+static int uinput_dev_flush(struct input_dev *dev, struct file *file)
+{
+ /*
+ * If we are called with file == NULL that means we are tearing
+ * down the device, and therefore we can not handle FF erase
+ * requests: either we are handling UI_DEV_DESTROY (and holding
+ * the udev->mutex), or the file descriptor is closed and there is
+ * nobody on the other side anymore.
+ */
+ return file ? input_ff_flush(dev, file) : 0;
+}
+
static void uinput_destroy_device(struct uinput_device *udev)
{
const char *name, *phys;
@@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev)
dev->ff->playback = uinput_dev_playback;
dev->ff->set_gain = uinput_dev_set_gain;
dev->ff->set_autocenter = uinput_dev_set_autocenter;
+ /*
+ * The standard input_ff_flush() implementation does
+ * not quite work for uinput as we can't reasonably
+ * handle FF requests during device teardown.
+ */
+ dev->flush = uinput_dev_flush;
}
error = input_register_device(udev->dev);
@@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
}
req->retval = ff_up.retval;
- uinput_request_done(udev, req);
+ complete(&req->done);
goto out;
case UI_END_FF_ERASE:
@@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
}
req->retval = ff_erase.retval;
- uinput_request_done(udev, req);
+ complete(&req->done);
goto out;
}
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 15b1330606c1..e19eb60b3d2f 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -598,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client,
}
/* Wait for F/W to update one page ROM data. */
- msleep(20);
+ msleep(35);
error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
if (error) {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 49bd2ab8c507..f3a21343e636 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -278,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
depends on ARM || IOMMU_DMA
- depends on ARCH_RENESAS || COMPILE_TEST
+ depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
@@ -373,7 +373,8 @@ config MTK_IOMMU_V1
config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || COMPILE_TEST
+ depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on HAS_DMA
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index ca5ebaeafd6a..57c920c1372d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
#define dmar_parse_one_rhsa dmar_res_noop
#endif
-static void __init
+static void
dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
{
struct acpi_dmar_hardware_unit *drhd;
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e60e3dba85a0..50947ebb6d17 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -157,10 +157,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
err = of_iommu_xlate(info->dev, &iommu_spec);
of_node_put(iommu_spec.np);
- if (err)
- return err;
-
- return info->np == pdev->bus->dev.of_node;
+ return err;
}
const struct iommu_ops *of_iommu_configure(struct device *dev,
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 519149ec9053..b5df99c6f680 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn)
{
const __be32 *cell;
u64 hwid;
- int i;
+ int cpu;
cell = of_get_property(dn, "reg", NULL);
if (!cell)
@@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn)
if (hwid & ~MPIDR_HWID_BITMASK)
return -1;
- for (i = 0; i < num_possible_cpus(); i++)
- if (cpu_logical_map(i) == hwid)
- return i;
+ for_each_possible_cpu(cpu)
+ if (cpu_logical_map(cpu) == hwid)
+ return cpu;
return -1;
}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 2370e6d9e603..cd0bcc3b7e33 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = MAP_VLPI,
- .map = map,
+ {
+ .map = map,
+ },
};
/*
@@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map)
{
struct its_cmd_info info = {
.cmd_type = GET_VLPI,
- .map = map,
+ {
+ .map = map,
+ },
};
return irq_set_vcpu_affinity(irq, &info);
@@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
{
struct its_cmd_info info = {
.cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
- .config = config,
+ {
+ .config = config,
+ },
};
return irq_set_vcpu_affinity(irq, &info);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6e52a88bbd9e..40159ac12ac8 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -169,7 +169,7 @@ static void gic_mask_irq(struct irq_data *d)
{
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
- write_gic_rmask(BIT(intr));
+ write_gic_rmask(intr);
gic_clear_pcpu_masks(intr);
}
@@ -179,7 +179,7 @@ static void gic_unmask_irq(struct irq_data *d)
unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
unsigned int cpu;
- write_gic_smask(BIT(intr));
+ write_gic_smask(intr);
gic_clear_pcpu_masks(intr);
cpu = cpumask_first_and(affinity, cpu_online_mask);
@@ -767,7 +767,7 @@ static int __init gic_of_init(struct device_node *node,
for (i = 0; i < gic_shared_intrs; i++) {
change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
change_gic_trig(i, GIC_TRIG_LEVEL);
- write_gic_rmask(BIT(i));
+ write_gic_rmask(i);
}
for (i = 0; i < gic_vpes; i++) {
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 6c44609fd83a..cd2b3c69771a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
isdn_net_local *lp;
struct ippp_struct *is;
int proto;
- unsigned char protobuf[4];
is = file->private_data;
@@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
if (!lp)
printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
else {
- /*
- * Don't reset huptimer for
- * LCP packets. (Echo requests).
- */
- if (copy_from_user(protobuf, buf, 4))
- return -EFAULT;
- proto = PPP_PROTOCOL(protobuf);
- if (proto != PPP_LCP)
- lp->huptimer = 0;
+ if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
+ unsigned char protobuf[4];
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ if (copy_from_user(protobuf, buf, 4))
+ return -EFAULT;
+
+ proto = PPP_PROTOCOL(protobuf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
- if (lp->isdn_device < 0 || lp->isdn_channel < 0)
return 0;
+ }
if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
lp->dialstate == 0 &&
(lp->flags & ISDN_NET_CONNECTED)) {
unsigned short hl;
struct sk_buff *skb;
+ unsigned char *cpy_buf;
/*
* we need to reserve enough space in front of
* sk_buff. old call to dev_alloc_skb only reserved
@@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
return count;
}
skb_reserve(skb, hl);
- if (copy_from_user(skb_put(skb, count), buf, count))
+ cpy_buf = skb_put(skb, count);
+ if (copy_from_user(cpy_buf, buf, count))
{
kfree_skb(skb);
return -EFAULT;
}
+
+ /*
+ * Don't reset huptimer for
+ * LCP packets. (Echo requests).
+ */
+ proto = PPP_PROTOCOL(cpy_buf);
+ if (proto != PPP_LCP)
+ lp->huptimer = 0;
+
if (is->debug & 0x40) {
printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4188a4881148..076409455b60 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -811,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
spin_unlock(&head->batch_head->batch_lock);
goto unlock_out;
}
+ /*
+ * We must assign batch_head of this stripe within the
+ * batch_lock, otherwise clear_batch_ready of batch head
+ * stripe could clear BATCH_READY bit of this stripe and
+ * this stripe->batch_head doesn't get assigned, which
+ * could confuse clear_batch_ready for this stripe
+ */
+ sh->batch_head = head->batch_head;
/*
* at this point, head's BATCH_READY could be cleared, but we
@@ -818,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
*/
list_add(&sh->batch_list, &head->batch_list);
spin_unlock(&head->batch_head->batch_lock);
-
- sh->batch_head = head->batch_head;
} else {
head->batch_head = head;
sh->batch_head = head->batch_head;
@@ -4599,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
(1 << STRIPE_PREREAD_ACTIVE) |
- (1 << STRIPE_DEGRADED)),
+ (1 << STRIPE_DEGRADED) |
+ (1 << STRIPE_ON_UNPLUG_LIST)),
head_sh->state & (1 << STRIPE_INSYNC));
sh->check_state = head_sh->check_state;
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index affa7370ba82..74c663b1c0a7 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -242,6 +242,12 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+ /*
+ * mmc_init_request() depends on card->bouncesz so it must be calculated
+ * before blk_init_allocated_queue() starts allocating requests.
+ */
+ card->bouncesz = mmc_queue_calc_bouncesz(host);
+
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -265,7 +271,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
- card->bouncesz = mmc_queue_calc_bouncesz(host);
if (card->bouncesz) {
blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
blk_queue_max_segments(mq->queue, card->bouncesz / 512);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 02179ed2a40d..8c15637178ff 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -5,7 +5,7 @@
comment "MMC/SD/SDIO Host Controller Drivers"
config MMC_DEBUG
- bool "MMC host drivers debugginG"
+ bool "MMC host drivers debugging"
depends on MMC != n
help
This is an option for use by developers; most people should
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index b9cc95998799..eee08d81b242 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -7,6 +7,7 @@
*
* Copyright (C) 2016 Cavium Inc.
*/
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/mmc/mmc.h>
@@ -149,8 +150,11 @@ error:
for (i = 0; i < CAVIUM_MAX_MMC; i++) {
if (host->slot[i])
cvm_mmc_of_slot_remove(host->slot[i]);
- if (host->slot_pdev[i])
+ if (host->slot_pdev[i]) {
+ get_device(&host->slot_pdev[i]->dev);
of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
+ put_device(&host->slot_pdev[i]->dev);
+ }
}
clk_disable_unprepare(host->clk);
return ret;
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index c3bb358ef01e..5796468db653 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -707,7 +707,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
}
res = clk_prepare_enable(host->clk);
if (res)
- goto err_exit1;
+ goto err_put_clk;
nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
nand_chip->dev_ready = lpc32xx_nand_device_ready;
@@ -814,6 +814,7 @@ err_exit3:
dma_release_channel(host->dma_chan);
err_exit2:
clk_disable_unprepare(host->clk);
+err_put_clk:
clk_put(host->clk);
err_exit1:
lpc32xx_wp_enable(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index bcc8cef1c615..12edaae17d81 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2668,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
struct mtd_oob_ops *ops)
{
- int chipnr, realpage, page, blockmask, column;
+ int chipnr, realpage, page, column;
struct nand_chip *chip = mtd_to_nand(mtd);
uint32_t writelen = ops->len;
@@ -2704,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
- blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
/* Invalidate the page cache, when we write to the cached page */
if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index cf1d4a15e10a..19c000722cbc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1784,7 +1784,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
* @nor: pointer to a 'struct spi_nor'
* @addr: offset in the SFDP area to start reading data from
* @len: number of bytes to read
- * @buf: buffer where the SFDP data are copied into
+ * @buf: buffer where the SFDP data are copied into (dma-safe memory)
*
* Whatever the actual numbers of bytes for address and dummy cycles are
* for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
@@ -1829,6 +1829,36 @@ read_err:
return ret;
}
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor: pointer to a 'struct spi_nor'
+ * @addr: offset in the SFDP area to start reading data from
+ * @len: number of bytes to read
+ * @buf: buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ * otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+ size_t len, void *buf)
+{
+ void *dma_safe_buf;
+ int ret;
+
+ dma_safe_buf = kmalloc(len, GFP_KERNEL);
+ if (!dma_safe_buf)
+ return -ENOMEM;
+
+ ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+ memcpy(buf, dma_safe_buf, len);
+ kfree(dma_safe_buf);
+
+ return ret;
+}
+
struct sfdp_parameter_header {
u8 id_lsb;
u8 minor;
@@ -2101,7 +2131,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
bfpt_header->length * sizeof(u32));
addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
memset(&bfpt, 0, sizeof(bfpt));
- err = spi_nor_read_sfdp(nor, addr, len, &bfpt);
+ err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
if (err < 0)
return err;
@@ -2127,6 +2157,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
params->size = bfpt.dwords[BFPT_DWORD(2)];
if (params->size & BIT(31)) {
params->size &= ~BIT(31);
+
+ /*
+ * Prevent overflows on params->size. Anyway, a NOR of 2^64
+ * bits is unlikely to exist so this error probably means
+ * the BFPT we are reading is corrupted/wrong.
+ */
+ if (params->size > 63)
+ return -EINVAL;
+
params->size = 1ULL << params->size;
} else {
params->size++;
@@ -2243,7 +2282,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
int i, err;
/* Get the SFDP header. */
- err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
+ err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
if (err < 0)
return err;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c3c53f6cd9e6..83eec9a8c275 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
}
+static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
+ u64 *tx_bytes, u64 *tx_packets)
+{
+ struct bcm_sysport_tx_ring *ring;
+ u64 bytes = 0, packets = 0;
+ unsigned int start;
+ unsigned int q;
+
+ for (q = 0; q < priv->netdev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
+ do {
+ start = u64_stats_fetch_begin_irq(&priv->syncp);
+ bytes = ring->bytes;
+ packets = ring->packets;
+ } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
+
+ *tx_bytes += bytes;
+ *tx_packets += packets;
+ }
+}
+
static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
struct u64_stats_sync *syncp = &priv->syncp;
struct bcm_sysport_tx_ring *ring;
+ u64 tx_bytes = 0, tx_packets = 0;
unsigned int start;
int i, j;
- if (netif_running(dev))
+ if (netif_running(dev)) {
bcm_sysport_update_mib_counters(priv);
+ bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
+ stats64->tx_bytes = tx_bytes;
+ stats64->tx_packets = tx_packets;
+ }
for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
const struct bcm_sysport_stats *s;
@@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev,
continue;
p += s->stat_offset;
- if (s->stat_sizeof == sizeof(u64))
+ if (s->stat_sizeof == sizeof(u64) &&
+ s->type == BCM_SYSPORT_STAT_NETDEV64) {
do {
start = u64_stats_fetch_begin_irq(syncp);
data[i] = *(u64 *)p;
} while (u64_stats_fetch_retry_irq(syncp, start));
- else
+ } else
data[i] = *(u32 *)p;
j++;
}
@@ -1716,27 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
- struct bcm_sysport_tx_ring *ring;
- u64 tx_packets = 0, tx_bytes = 0;
unsigned int start;
- unsigned int q;
netdev_stats_to_stats64(stats, &dev->stats);
- for (q = 0; q < dev->num_tx_queues; q++) {
- ring = &priv->tx_rings[q];
- do {
- start = u64_stats_fetch_begin_irq(&priv->syncp);
- tx_bytes = ring->bytes;
- tx_packets = ring->packets;
- } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
-
- stats->tx_bytes += tx_bytes;
- stats->tx_packets += tx_packets;
- }
-
- stats64->tx_bytes = stats->tx_bytes;
- stats64->tx_packets = stats->tx_packets;
+ bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
+ &stats->tx_packets);
do {
start = u64_stats_fetch_begin_irq(&priv->syncp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index ccd699fb2d70..7dd3d131043a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
{
int rc = 0;
+ if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
+ cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 38c7b21e5d63..ede1876a9a19 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -374,8 +374,8 @@ struct bufdesc_ex {
#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
#define FEC_ENET_TS_TIMER ((uint)0x00008000)
-#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER)
-#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER)
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
+#define FEC_NAPI_IMASK FEC_ENET_MII
#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
/* ENET interrupt coalescing macro define */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 56f56d6ada9c..3dc2d771a222 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
if (int_events == 0)
return false;
- if (int_events & FEC_ENET_RXF)
+ if (int_events & FEC_ENET_RXF_0)
fep->work_rx |= (1 << 2);
if (int_events & FEC_ENET_RXF_1)
fep->work_rx |= (1 << 0);
if (int_events & FEC_ENET_RXF_2)
fep->work_rx |= (1 << 1);
- if (int_events & FEC_ENET_TXF)
+ if (int_events & FEC_ENET_TXF_0)
fep->work_tx |= (1 << 2);
if (int_events & FEC_ENET_TXF_1)
fep->work_tx |= (1 << 0);
@@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id)
}
if (fep->ptp_clock)
- fec_ptp_check_pps_event(fep);
-
+ if (fec_ptp_check_pps_event(fep))
+ ret = IRQ_HANDLED;
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 59efbd605416..5bcb2238acb2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
}
static int hnae3_match_n_instantiate(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev,
- bool is_reg, bool *matched)
+ struct hnae3_ae_dev *ae_dev, bool is_reg)
{
int ret;
- *matched = false;
-
/* check if this client matches the type of ae_dev */
if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
return 0;
}
- /* there is a match of client and dev */
- *matched = true;
/* now, (un-)instantiate client by calling lower layer */
if (is_reg) {
@@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client)
{
struct hnae3_client *client_tmp;
struct hnae3_ae_dev *ae_dev;
- bool matched;
int ret = 0;
mutex_lock(&hnae3_common_lock);
@@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched);
+ ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port\n");
@@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client);
void hnae3_unregister_client(struct hnae3_client *client)
{
struct hnae3_ae_dev *ae_dev;
- bool matched;
mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false, &matched);
+ hnae3_match_n_instantiate(client, ae_dev, false);
}
list_del(&client->node);
@@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- bool matched;
int ret = 0;
mutex_lock(&hnae3_common_lock);
@@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true,
- &matched);
+ ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed\n");
- if (matched)
- break;
}
}
@@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
const struct pci_device_id *id;
struct hnae3_ae_dev *ae_dev;
struct hnae3_client *client;
- bool matched;
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_dev */
@@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
/* check the client list for the match with this ae_dev type and
* un-initialize the figure out client instance
*/
- list_for_each_entry(client, &hnae3_client_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false,
- &matched);
- if (matched)
- break;
- }
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- bool matched;
int ret = 0;
mutex_lock(&hnae3_common_lock);
@@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true,
- &matched);
+ ret = hnae3_match_n_instantiate(client, ae_dev, true);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed\n");
- if (matched)
- break;
}
out_err:
@@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
const struct pci_device_id *id;
struct hnae3_ae_algo *ae_algo;
struct hnae3_client *client;
- bool matched;
mutex_lock(&hnae3_common_lock);
/* Check if there are matched ae_algo */
@@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
if (!id)
continue;
- list_for_each_entry(client, &hnae3_client_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false,
- &matched);
- if (matched)
- break;
- }
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_match_n_instantiate(client, ae_dev, false);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index b2f28ae81273..1a01cadfe5f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -49,7 +49,17 @@
#define HNAE3_CLASS_NAME_SIZE 16
#define HNAE3_DEV_INITED_B 0x0
-#define HNAE_DEV_SUPPORT_ROCE_B 0x1
+#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
+#define HNAE3_DEV_SUPPORT_DCB_B 0x2
+
+#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
+ BIT(HNAE3_DEV_SUPPORT_ROCE_B))
+
+#define hnae3_dev_roce_supported(hdev) \
+ hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+
+#define hnae3_dev_dcb_supported(hdev) \
+ hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -366,12 +376,12 @@ struct hnae3_ae_algo {
struct hnae3_tc_info {
u16 tqp_offset; /* TQP offset from base TQP */
u16 tqp_count; /* Total TQPs */
- u8 up; /* user priority */
u8 tc; /* TC index */
bool enable; /* If this TC is enable or not */
};
#define HNAE3_MAX_TC 8
+#define HNAE3_MAX_USER_PRIO 8
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -379,6 +389,7 @@ struct hnae3_knic_private_info {
u16 num_desc;
u8 num_tc; /* Total number of enabled TCs */
+ u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
u16 num_tqps; /* total number of TQPs in this handle */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 91ae0135ee50..758cf3948131 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -238,7 +238,7 @@ struct hclge_tqp_map {
u8 rsv[18];
};
-#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11
+#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
enum hclge_int_type {
HCLGE_INT_TX,
@@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain {
#define HCLGE_INT_TYPE_S 0
#define HCLGE_INT_TYPE_M 0x3
#define HCLGE_TQP_ID_S 2
-#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S)
+#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
+#define HCLGE_INT_GL_IDX_S 13
+#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 rsv;
};
#define HCLGE_TC_NUM 8
@@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc {
struct hclge_rx_priv_buff {
__le16 buf_num[HCLGE_TC_NUM];
- u8 rsv[8];
+ __le16 shared_buf;
+ u8 rsv[6];
};
struct hclge_query_version {
@@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue {
#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
+#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bb45365fb817..e0685e630afe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
- /* Required last entry */
- {0, }
-};
-
-static const struct pci_device_id roce_pci_tbl[] = {
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
- /* Required last entry */
+ /* required last entry */
{0, }
};
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
- if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) {
+ if (hnae3_dev_roce_supported(hdev)) {
hdev->num_roce_msix =
hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
@@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->base_tqp_pid = 0;
hdev->rss_size_max = 1;
hdev->rx_buf_len = cfg.rx_buf_len;
- for (i = 0; i < ETH_ALEN; i++)
- hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
+ ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
+ hdev->hw.mac.phy_addr = cfg.phy_addr;
hdev->num_desc = cfg.tqp_desc_num;
hdev->tm_info.num_pg = 1;
hdev->tm_info.num_tc = cfg.tc_num;
@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
- shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+ if (hnae3_dev_dcb_supported(hdev))
+ shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
+ else
+ shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
+
shared_buf_tc = pfc_enable_num * hdev->mps +
(tc_num - pfc_enable_num) * hdev->mps / 2 +
hdev->mps;
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
struct hclge_priv_buf *priv;
int i;
+ /* When DCB is not supported, rx private
+ * buffer is not allocated.
+ */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ if (!hclge_is_rx_buf_ok(hdev, rx_all))
+ return -ENOMEM;
+
+ return 0;
+ }
+
/* step 1, try to alloc private buffer for all enabled tc */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &hdev->priv_buf[i];
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
priv->wl.high = 2 * hdev->mps;
priv->buf_size = priv->wl.high;
}
+ } else {
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
}
}
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &hdev->priv_buf[i];
- if (hdev->hw_tc_map & BIT(i))
- priv->enable = 1;
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = 128;
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
}
+ req->shared_buf =
+ cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
+ (1 << HCLGE_TC0_PRI_BUF_EN_B));
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
return ret;
}
- ret = hclge_rx_priv_wl_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "could not configure rx private waterline %d\n", ret);
- return ret;
- }
+ if (hnae3_dev_dcb_supported(hdev)) {
+ ret = hclge_rx_priv_wl_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure rx private waterline %d\n",
+ ret);
+ return ret;
+ }
- ret = hclge_common_thrd_config(hdev);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "could not configure common threshold %d\n", ret);
- return ret;
+ ret = hclge_common_thrd_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure common threshold %d\n",
+ ret);
+ return ret;
+ }
}
ret = hclge_common_wl_config(hdev);
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 tc_size[HCLGE_MAX_TC_NUM];
u32 *rss_indir = NULL;
+ u16 rss_size = 0, roundup_size;
const u8 *key;
int i, ret, j;
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
vport[j].rss_indirection_tbl[i] =
- i % hdev->rss_size_max;
+ i % vport[j].alloc_rss_size;
+
+ /* vport 0 is for PF */
+ if (j != 0)
+ continue;
+
+ rss_size = vport[j].alloc_rss_size;
rss_indir[i] = vport[j].rss_indirection_tbl[i];
}
}
@@ -2613,42 +2644,31 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
if (ret)
goto err;
+ /* Each TC have the same queue size, and tc_size set to hardware is
+ * the log2 of roundup power of two of rss_size, the acutal queue
+ * size is limited by indirection table.
+ */
+ if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
+ dev_err(&hdev->pdev->dev,
+ "Configure rss tc size failed, invalid TC_SIZE = %d\n",
+ rss_size);
+ return -EINVAL;
+ }
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- if (hdev->hw_tc_map & BIT(i))
- tc_valid[i] = 1;
- else
- tc_valid[i] = 0;
+ tc_valid[i] = 0;
- switch (hdev->rss_size_max) {
- case HCLGE_RSS_TC_SIZE_0:
- tc_size[i] = 0;
- break;
- case HCLGE_RSS_TC_SIZE_1:
- tc_size[i] = 1;
- break;
- case HCLGE_RSS_TC_SIZE_2:
- tc_size[i] = 2;
- break;
- case HCLGE_RSS_TC_SIZE_3:
- tc_size[i] = 3;
- break;
- case HCLGE_RSS_TC_SIZE_4:
- tc_size[i] = 4;
- break;
- case HCLGE_RSS_TC_SIZE_5:
- tc_size[i] = 5;
- break;
- case HCLGE_RSS_TC_SIZE_6:
- tc_size[i] = 6;
- break;
- case HCLGE_RSS_TC_SIZE_7:
- tc_size[i] = 7;
- break;
- default:
- break;
- }
- tc_offset[i] = hdev->rss_size_max * i;
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = rss_size * i;
}
+
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
err:
@@ -2679,7 +2699,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2763,8 +2787,12 @@ static int hclge_unmap_ring_from_vector(
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index);
+ hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2778,7 +2806,7 @@ static int hclge_unmap_ring_from_vector(
}
i = 0;
hclge_cmd_setup_basic_desc(&desc,
- HCLGE_OPC_ADD_RING_TO_VECTOR,
+ HCLGE_OPC_DEL_RING_TO_VECTOR,
false);
req->int_vector_id = vector_id;
}
@@ -3665,6 +3693,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
{
#define HCLGE_VLAN_TYPE_VF_TABLE 0
#define HCLGE_VLAN_TYPE_PORT_TABLE 1
+ struct hnae3_handle *handle;
int ret;
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
@@ -3674,8 +3703,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
true);
+ if (ret)
+ return ret;
- return ret;
+ handle = &hdev->vport[0].nic;
+ return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -3920,8 +3952,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
goto err;
if (hdev->roce_client &&
- hnae_get_bit(hdev->ae_dev->flag,
- HNAE_DEV_SUPPORT_ROCE_B)) {
+ hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
ret = hclge_init_roce_base_info(vport);
@@ -3944,8 +3975,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
break;
case HNAE3_CLIENT_ROCE:
- if (hnae_get_bit(hdev->ae_dev->flag,
- HNAE_DEV_SUPPORT_ROCE_B)) {
+ if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_client = client;
vport->roce.client = client;
}
@@ -4057,7 +4087,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
- const struct pci_device_id *id;
struct hclge_dev *hdev;
int ret;
@@ -4072,10 +4101,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->ae_dev = ae_dev;
ae_dev->priv = hdev;
- id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
- if (id)
- hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
-
ret = hclge_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI init failed\n");
@@ -4138,12 +4163,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
- ret = hclge_rss_init_hw(hdev);
- if (ret) {
- dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
- return ret;
- }
-
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -4156,6 +4175,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+ return ret;
+ }
+
setup_timer(&hdev->service_timer, hclge_service_timer,
(unsigned long)hdev);
INIT_WORK(&hdev->service_task, hclge_service_task);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index edb10ad075eb..9fcfd9395424 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -176,7 +176,6 @@ struct hclge_pg_info {
struct hclge_tc_info {
u8 tc_id;
u8 tc_sch_mode; /* 0: sp; 1: dwrr */
- u8 up;
u8 pgid;
u32 bw_limit;
};
@@ -197,6 +196,7 @@ struct hclge_tm_info {
u8 num_tc;
u8 num_pg; /* It must be 1 if vNET-Base schd */
u8 pg_dwrr[HCLGE_PG_NUM];
+ u8 prio_tc[HNAE3_MAX_USER_PRIO];
struct hclge_pg_info pg_info[HCLGE_PG_NUM];
struct hclge_tc_info tc_info[HNAE3_MAX_TC];
enum hclge_fc_mode fc_mode;
@@ -477,6 +477,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+ u16 alloc_rss_size;
u16 qs_offset;
u16 bw_limit; /* VSI BW Limit (0 = disabled) */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 1c577d268f00..73a75d7cc551 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
{
u8 tc;
- for (tc = 0; tc < hdev->tm_info.num_tc; tc++)
- if (hdev->tm_info.tc_info[tc].up == pri_id)
- break;
+ tc = hdev->tm_info.prio_tc[pri_id];
if (tc >= hdev->tm_info.num_tc)
return -EINVAL;
@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
- for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) {
+ for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
ret = hclge_fill_pri_array(hdev, pri, pri_id);
if (ret)
return ret;
@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id;
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
- hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id;
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
- hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->num_tqps / kinfo->num_tc);
vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
vport->dwrr = 100; /* 100 percent as init */
+ vport->alloc_rss_size = kinfo->rss_size;
for (i = 0; i < kinfo->num_tc; i++) {
if (hdev->hw_tc_map & BIT(i)) {
@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
kinfo->tc_info[i].tqp_count = kinfo->rss_size;
kinfo->tc_info[i].tc = i;
- kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
} else {
/* Set to default queue if TC is disable */
kinfo->tc_info[i].enable = false;
kinfo->tc_info[i].tqp_offset = 0;
kinfo->tc_info[i].tqp_count = 1;
kinfo->tc_info[i].tc = 0;
- kinfo->tc_info[i].up = 0;
}
}
+
+ memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
+ FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
}
static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
for (i = 0; i < hdev->tm_info.num_tc; i++) {
hdev->tm_info.tc_info[i].tc_id = i;
hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
- hdev->tm_info.tc_info[i].up = i;
hdev->tm_info.tc_info[i].pgid = 0;
hdev->tm_info.tc_info[i].bw_limit =
hdev->tm_info.pg_info[0].bw_limit;
}
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] =
+ (i >= hdev->tm_info.num_tc) ? 0 : i;
+
hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
}
@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
if (ret)
return ret;
+ /* Only DCB-supported dev supports qset back pressure setting */
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
for (i = 0; i < hdev->tm_info.num_tc; i++) {
ret = hclge_tm_qs_bp_cfg(hdev, i);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 7e67337dfaf2..85158b0d73fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd {
u32 rsvd1;
};
-#define hclge_tm_set_feild(dest, string, val) \
+#define hclge_tm_set_field(dest, string, val) \
hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
-#define hclge_tm_get_feild(src, string) \
+#define hclge_tm_get_field(src, string) \
hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
index 1c3e29447891..35369e1c8036 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
@@ -41,11 +41,16 @@ static struct hnae3_client client;
static const struct pci_device_id hns3_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
ae_dev->pdev = pdev;
+ ae_dev->flag = ent->driver_data;
ae_dev->dev_type = HNAE3_DEV_KNIC;
pci_set_drvdata(pdev, ae_dev);
@@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev)
eth_hw_addr_random(netdev);
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
- /* Also copy this new MAC address into hdev */
- if (h->ae_algo->ops->set_mac_addr)
- h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
}
+
+ if (h->ae_algo->ops->set_mac_addr)
+ h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
+
}
static void hns3_nic_set_priv_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 2c74baa2398a..fff09dcf9e34 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
unsigned long flags;
MAL_DBG2(mal, "poll(%d)" NL, budget);
- again:
+
/* Process TX skbs */
list_for_each(l, &mal->poll_list) {
struct mal_commac *mc =
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&mal->lock, flags);
mal_disable_eob_irq(mal);
spin_unlock_irqrestore(&mal->lock, flags);
- goto again;
}
mc->ops->poll_tx(mc->dev);
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index bbe24639aa5a..c8c6231b87f3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data)
static int emac_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
+ case ETH_SS_PRIV_FLAGS:
+ return 1;
case ETH_SS_STATS:
return EMAC_STATS_LEN;
default:
@@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
unsigned int i;
switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ strcpy(data, "single-pause-mode");
+ break;
+
case ETH_SS_STATS:
for (i = 0; i < EMAC_STATS_LEN; i++) {
strlcpy(data, emac_ethtool_stat_strings[i],
@@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev)
return EMAC_MAX_REG_SIZE * sizeof(u32);
}
+#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0)
+
+static int emac_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE);
+
+ if (netif_running(netdev))
+ return emac_reinit_locked(adpt);
+
+ return 0;
+}
+
+static u32 emac_get_priv_flags(struct net_device *netdev)
+{
+ struct emac_adapter *adpt = netdev_priv(netdev);
+
+ return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0;
+}
+
static const struct ethtool_ops emac_ethtool_ops = {
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_regs_len = emac_get_regs_len,
.get_regs = emac_get_regs,
+
+ .set_priv_flags = emac_set_priv_flags,
+ .get_priv_flags = emac_get_priv_flags,
};
void emac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index bcd4708b3745..0ea3ca09c689 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt)
mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
DEBUG_MODE | SINGLE_PAUSE_MODE);
+ /* Enable single-pause-frame mode if requested.
+ *
+ * If enabled, the EMAC will send a single pause frame when the RX
+ * queue is full. This normally leads to packet loss because
+ * the pause frame disables the remote MAC only for 33ms (the quanta),
+ * and then the remote MAC continues sending packets even though
+ * the RX queue is still full.
+ *
+ * If disabled, the EMAC sends a pause frame every 31ms until the RX
+ * queue is no longer full. Normally, this is the preferred
+ * method of operation. However, when the system is hung (e.g.
+ * cores are halted), the EMAC interrupt handler is never called
+ * and so the RX queue fills up quickly and stays full. The resuling
+ * non-stop "flood" of pause frames sometimes has the effect of
+ * disabling nearby switches. In some cases, other nearby switches
+ * are also affected, shutting down the entire network.
+ *
+ * The user can enable or disable single-pause-frame mode
+ * via ethtool.
+ */
+ mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
+
writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 60850bfa3d32..759543512117 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
/* default to automatic flow control */
adpt->automatic = true;
+
+ /* Disable single-pause-frame mode by default */
+ adpt->single_pause_mode = false;
}
/* Get the clock */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h
index 8ee4ec6aef2e..d7c9f44209d4 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -363,6 +363,9 @@ struct emac_adapter {
bool tx_flow_control;
bool rx_flow_control;
+ /* True == use single-pause-frame mode. */
+ bool single_pause_mode;
+
/* Ring parameter */
u8 tpd_burst;
u8 rfd_burst;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ca22f2898664..d24b47b8e0b2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
if (likely(RTL_R16(IntrStatus) & RxAckBits))
work_done += rtl8139_rx(dev, tp, budget);
- if (work_done < budget && napi_complete_done(napi, work_done)) {
+ if (work_done < budget) {
unsigned long flags;
spin_lock_irqsave(&tp->lock, flags);
- RTL_W16_F(IntrMask, rtl8139_intr_mask);
+ if (napi_complete_done(napi, work_done))
+ RTL_W16_F(IntrMask, rtl8139_intr_mask);
spin_unlock_irqrestore(&tp->lock, flags);
}
spin_unlock(&tp->rx_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index a366b3747eeb..8a280b48e3a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
{ .compatible = "allwinner,sun8i-h3-emac" },
{ .compatible = "allwinner,sun8i-v3s-emac" },
{ .compatible = "allwinner,sun50i-a64-emac" },
+ {},
};
/* If phy-handle property is passed from DT, use it as the PHY */
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d98cdfb1536b..5176be76ca7d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -150,6 +150,8 @@ struct netvsc_device_info {
u32 num_chn;
u32 send_sections;
u32 recv_sections;
+ u32 send_section_size;
+ u32 recv_section_size;
};
enum rndis_device_state {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index a5511b7326af..8d5077fb0492 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -76,9 +76,6 @@ static struct netvsc_device *alloc_net_device(void)
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
- net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE;
- net_device->send_section_size = NETVSC_SEND_SECTION_SIZE;
-
init_completion(&net_device->channel_init_wait);
init_waitqueue_head(&net_device->subchan_open);
INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
@@ -262,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device,
int ret = 0;
/* Get receive buffer area. */
- buf_size = device_info->recv_sections * net_device->recv_section_size;
+ buf_size = device_info->recv_sections * device_info->recv_section_size;
buf_size = roundup(buf_size, PAGE_SIZE);
net_device->recv_buf = vzalloc(buf_size);
@@ -344,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
/* Now setup the send buffer. */
- buf_size = device_info->send_sections * net_device->send_section_size;
+ buf_size = device_info->send_sections * device_info->send_section_size;
buf_size = round_up(buf_size, PAGE_SIZE);
net_device->send_buf = vzalloc(buf_size);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d4902ee5f260..a32ae02e1b6c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -848,7 +848,9 @@ static int netvsc_set_channels(struct net_device *net,
device_info.num_chn = count;
device_info.ring_size = ring_size;
device_info.send_sections = nvdev->send_section_cnt;
+ device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
+ device_info.recv_section_size = nvdev->recv_section_size;
rndis_filter_device_remove(dev, nvdev);
@@ -963,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
device_info.ring_size = ring_size;
device_info.num_chn = nvdev->num_chn;
device_info.send_sections = nvdev->send_section_cnt;
+ device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = nvdev->recv_section_cnt;
+ device_info.recv_section_size = nvdev->recv_section_size;
rndis_filter_device_remove(hdev, nvdev);
@@ -1485,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
device_info.num_chn = nvdev->num_chn;
device_info.ring_size = ring_size;
device_info.send_sections = new_tx;
+ device_info.send_section_size = nvdev->send_section_size;
device_info.recv_sections = new_rx;
+ device_info.recv_section_size = nvdev->recv_section_size;
netif_device_detach(ndev);
was_opened = rndis_filter_opened(nvdev);
@@ -1934,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev,
device_info.ring_size = ring_size;
device_info.num_chn = VRSS_CHANNEL_DEFAULT;
device_info.send_sections = NETVSC_DEFAULT_TX;
+ device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
device_info.recv_sections = NETVSC_DEFAULT_RX;
+ device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
nvdev = rndis_filter_device_add(dev, &device_info);
if (IS_ERR(nvdev)) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a9d16a3af514..cd931cf9dcc2 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -160,15 +160,6 @@ config MDIO_XGENE
endif
-menuconfig PHYLIB
- tristate "PHY Device support and infrastructure"
- depends on NETDEVICES
- select MDIO_DEVICE
- help
- Ethernet controllers are usually attached to PHY
- devices. This option provides infrastructure for
- managing PHY devices.
-
config PHYLINK
tristate
depends on NETDEVICES
@@ -179,6 +170,15 @@ config PHYLINK
configuration links, PHYs, and Serdes links with MAC level
autonegotiation modes.
+menuconfig PHYLIB
+ tristate "PHY Device support and infrastructure"
+ depends on NETDEVICES
+ select MDIO_DEVICE
+ help
+ Ethernet controllers are usually attached to PHY
+ devices. This option provides infrastructure for
+ managing PHY devices.
+
if PHYLIB
config SWPHY
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e842d2cd1ee7..2b1e67bc1e73 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.port = PORT_BNC;
else
cmd->base.port = PORT_MII;
-
+ cmd->base.transceiver = phy_is_internal(phydev) ?
+ XCVR_INTERNAL : XCVR_EXTERNAL;
cmd->base.phy_address = phydev->mdio.addr;
cmd->base.autoneg = phydev->autoneg;
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8cf0c5901f95..67f25ac29025 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
{
const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
char *irq_str;
- char irq_num[4];
+ char irq_num[8];
switch(phydev->irq) {
case PHY_POLL:
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index d15dd3938ba8..2e5150b0b8d5 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
priv->phy_drv->read_status(phydev);
val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
- val &= XILINX_GMII2RGMII_SPEED_MASK;
+ val &= ~XILINX_GMII2RGMII_SPEED_MASK;
if (phydev->speed == SPEED_1000)
val |= BMCR_SPEED1000;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index b99a7fb09f8e..0161f77641fa 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct lan78xx_net *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret)
+ return ret;
ee->magic = LAN78XX_EEPROM_MAGIC;
- return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+ ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
+
+ usb_autopm_put_interface(dev->intf);
+
+ return ret;
}
static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct lan78xx_net *dev = netdev_priv(netdev);
+ int ret;
+
+ ret = usb_autopm_get_interface(dev->intf);
+ if (ret)
+ return ret;
- /* Allow entire eeprom update only */
- if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
- (ee->offset == 0) &&
- (ee->len == 512) &&
- (data[0] == EEPROM_INDICATOR))
- return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
+ /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
+ * to load data from EEPROM
+ */
+ if (ee->magic == LAN78XX_EEPROM_MAGIC)
+ ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
(ee->offset == 0) &&
(ee->len == 512) &&
(data[0] == OTP_INDICATOR_1))
- return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
+ ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
+
+ usb_autopm_put_interface(dev->intf);
- return -EINVAL;
+ return ret;
}
static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
@@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
/* LAN7801 only has RGMII mode */
if (dev->chipid == ID_REV_CHIP_ID_7801_)
buf &= ~MAC_CR_GMII_EN_;
- buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
ret = lan78xx_write_reg(dev, MAC_CR, buf);
ret = lan78xx_read_reg(dev, MAC_TX, &buf);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 1427a386a033..3e4d1e7998da 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev)
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_index *nsindex;
+ /*
+ * If any of the DIMMs do not support labels the only
+ * possible BTT format is v1.
+ */
+ if (!ndd) {
+ loop_bitmask = 0;
+ break;
+ }
+
nsindex = to_namespace_index(ndd, ndd->ns_current);
if (nsindex == NULL)
loop_bitmask |= 1;
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 4ddc6e8f9fe7..f9308c2f22e6 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -251,9 +251,8 @@ err:
return ret;
}
-static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
+static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
{
- u8 irq;
u8 msi_count;
struct pci_epf *epf = epf_test->epf;
struct pci_epc *epc = epf->epc;
@@ -262,7 +261,6 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
reg->status |= STATUS_IRQ_RAISED;
msi_count = pci_epc_get_msi(epc);
- irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
if (irq > msi_count || msi_count <= 0)
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
else
@@ -289,6 +287,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->command = 0;
reg->status = 0;
+ irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
+
if (command & COMMAND_RAISE_LEGACY_IRQ) {
reg->status = STATUS_IRQ_RAISED;
pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
@@ -301,7 +301,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_WRITE_FAIL;
else
reg->status |= STATUS_WRITE_SUCCESS;
- pci_epf_test_raise_irq(epf_test);
+ pci_epf_test_raise_irq(epf_test, irq);
goto reset_handler;
}
@@ -311,7 +311,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_READ_SUCCESS;
else
reg->status |= STATUS_READ_FAIL;
- pci_epf_test_raise_irq(epf_test);
+ pci_epf_test_raise_irq(epf_test, irq);
goto reset_handler;
}
@@ -321,13 +321,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
reg->status |= STATUS_COPY_SUCCESS;
else
reg->status |= STATUS_COPY_FAIL;
- pci_epf_test_raise_irq(epf_test);
+ pci_epf_test_raise_irq(epf_test, irq);
goto reset_handler;
}
if (command & COMMAND_RAISE_MSI_IRQ) {
msi_count = pci_epc_get_msi(epc);
- irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
if (irq > msi_count || msi_count <= 0)
goto reset_handler;
reg->status = STATUS_IRQ_RAISED;
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 0a9b78705ee8..3303dd8d8eb5 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
ret = armpmu_register(pmu);
if (ret) {
pr_warn("Failed to register PMU for CPU%d\n", cpu);
+ kfree(pmu->name);
return ret;
}
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ea19b4ff87a2..29f35e29d480 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->request_queue)
+ blk_mq_run_hw_queues(device->block->request_queue,
+ true);
}
}
EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
@@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device)
dasd_schedule_device_bh(device);
if (device->block) {
dasd_schedule_block_bh(device->block);
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->request_queue)
+ blk_mq_run_hw_queues(device->block->request_queue,
+ true);
}
if (!device->stopped)
@@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
if (device->block) {
dasd_schedule_block_bh(device->block);
- blk_mq_run_hw_queues(device->block->request_queue, true);
+ if (device->block->request_queue)
+ blk_mq_run_hw_queues(device->block->request_queue,
+ true);
}
clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 2e7fd966c515..eb51893c74a4 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq)
static void scm_request_finish(struct scm_request *scmrq)
{
struct scm_blk_dev *bdev = scmrq->bdev;
- int *error;
+ blk_status_t *error;
int i;
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
static void scm_blk_request_done(struct request *req)
{
- int *error = blk_mq_rq_to_pdu(req);
+ blk_status_t *error = blk_mq_rq_to_pdu(req);
blk_mq_end_request(req, *error);
}
@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
atomic_set(&bdev->queued_reqs, 0);
bdev->tag_set.ops = &scm_mq_ops;
- bdev->tag_set.cmd_size = sizeof(int);
+ bdev->tag_set.cmd_size = sizeof(blk_status_t);
bdev->tag_set.nr_hw_queues = nr_requests;
bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 489b583f263d..e5c32f4b5287 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev)
static int recovery_check(struct device *dev, void *data)
{
struct ccw_device *cdev = to_ccwdev(dev);
+ struct subchannel *sch;
int *redo = data;
spin_lock_irq(cdev->ccwlock);
switch (cdev->private->state) {
+ case DEV_STATE_ONLINE:
+ sch = to_subchannel(cdev->dev.parent);
+ if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
+ break;
+ /* fall through */
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
cdev->private->dev_id.ssid,
@@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused)
}
spin_unlock_irq(&recovery_lock);
} else
- CIO_MSG_EVENT(4, "recovery: end\n");
+ CIO_MSG_EVENT(3, "recovery: end\n");
}
static DECLARE_WORK(recovery_work, recovery_work_func);
@@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data)
schedule_work(&recovery_work);
}
-static void ccw_device_schedule_recovery(void)
+void ccw_device_schedule_recovery(void)
{
unsigned long flags;
- CIO_MSG_EVENT(4, "recovery: schedule\n");
+ CIO_MSG_EVENT(3, "recovery: schedule\n");
spin_lock_irqsave(&recovery_lock, flags);
if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
recovery_phase = 0;
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index ec497af99dd8..69cb70f080a5 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev);
void ccw_device_set_notoper(struct ccw_device *cdev);
void ccw_device_set_timeout(struct ccw_device *, int);
+void ccw_device_schedule_recovery(void);
/* Channel measurement facility related */
void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 12016e32e519..f98ea674c3d8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type)
}
}
+static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
+
+ if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
+ ccw_device_schedule_recovery();
+
+ cdev->private->path_broken_mask = broken_paths;
+}
+
void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
@@ -508,6 +519,7 @@ callback:
memset(&cdev->private->irb, 0, sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
+ ccw_device_handle_broken_paths(cdev);
break;
case -ETIME:
case -EUSERS:
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 220f49145b2f..9a1b56b2df3e 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -131,6 +131,8 @@ struct ccw_device_private {
not operable */
u8 path_gone_mask; /* mask of paths, that became unavailable */
u8 path_new_mask; /* mask of paths, that became available */
+ u8 path_broken_mask; /* mask of paths, which were found to be
+ unusable */
struct {
unsigned int fast:1; /* post with "channel end" */
unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 690816f3c6af..421fe869a11e 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2725,9 +2725,9 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
* Params : SCpnt - command causing reset
* Returns : one of SCSI_RESET_ macros
*/
-int acornscsi_host_reset(struct Scsi_Host *shpnt)
+int acornscsi_host_reset(struct scsi_cmnd *SCpnt)
{
- AS_Host *host = (AS_Host *)shpnt->hostdata;
+ AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
struct scsi_cmnd *SCptr;
host->stats.resets += 1;
@@ -2741,7 +2741,7 @@ int acornscsi_host_reset(struct Scsi_Host *shpnt)
printk(KERN_WARNING "acornscsi_reset: ");
print_sbic_status(asr, ssr, host->scsi.phase);
- for (devidx = 0; devidx < 9; devidx ++) {
+ for (devidx = 0; devidx < 9; devidx++)
acornscsi_dumplog(host, devidx);
}
#endif
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3c6bc0081fcb..ba9d70f8a6a1 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3571,7 +3571,7 @@ fc_vport_sched_delete(struct work_struct *work)
static enum blk_eh_timer_return
fc_bsg_job_timeout(struct request *req)
{
- struct bsg_job *job = (void *) req->special;
+ struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_rport *rport = fc_bsg_to_rport(job);
struct fc_internal *i = to_fc_internal(shost->transportt);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 11c1738c2100..fb9f8b5f4673 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2915,8 +2915,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
- else if (sdkp->lbpu && sdkp->max_unmap_blocks)
- sd_config_discard(sdkp, SD_LBP_UNMAP);
else
sd_config_discard(sdkp, SD_LBP_DISABLE);
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cf0e71db9e51..0419c2298eab 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q)
return max_sectors << 9;
}
+static void
+sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
+{
+ Sg_request *srp;
+ int val;
+ unsigned int ms;
+
+ val = 0;
+ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if (val > SG_MAX_QUEUE)
+ break;
+ rinfo[val].req_state = srp->done + 1;
+ rinfo[val].problem =
+ srp->header.masked_status &
+ srp->header.host_status &
+ srp->header.driver_status;
+ if (srp->done)
+ rinfo[val].duration =
+ srp->header.duration;
+ else {
+ ms = jiffies_to_msecs(jiffies);
+ rinfo[val].duration =
+ (ms > srp->header.duration) ?
+ (ms - srp->header.duration) : 0;
+ }
+ rinfo[val].orphan = srp->orphan;
+ rinfo[val].sg_io_owned = srp->sg_io_owned;
+ rinfo[val].pack_id = srp->header.pack_id;
+ rinfo[val].usr_ptr = srp->header.usr_ptr;
+ val++;
+ }
+}
+
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EFAULT;
else {
sg_req_info_t *rinfo;
- unsigned int ms;
- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
- GFP_KERNEL);
+ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+ GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
- val = 0;
- list_for_each_entry(srp, &sfp->rq_list, entry) {
- if (val >= SG_MAX_QUEUE)
- break;
- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
- rinfo[val].req_state = srp->done + 1;
- rinfo[val].problem =
- srp->header.masked_status &
- srp->header.host_status &
- srp->header.driver_status;
- if (srp->done)
- rinfo[val].duration =
- srp->header.duration;
- else {
- ms = jiffies_to_msecs(jiffies);
- rinfo[val].duration =
- (ms > srp->header.duration) ?
- (ms - srp->header.duration) : 0;
- }
- rinfo[val].orphan = srp->orphan;
- rinfo[val].sg_io_owned = srp->sg_io_owned;
- rinfo[val].pack_id = srp->header.pack_id;
- rinfo[val].usr_ptr = srp->header.usr_ptr;
- val++;
- }
+ sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 82a8866758ee..a1c17000129b 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -519,64 +519,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
return err;
}
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
- grant_ref_t *gnt_refs,
- unsigned int nr_grefs,
- void **vaddr)
-{
- struct xenbus_map_node *node;
- struct vm_struct *area;
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- int err = GNTST_okay;
- int i;
- bool leaked;
-
- *vaddr = NULL;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
- if (!area) {
- kfree(node);
- return -ENOMEM;
- }
-
- for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
-
- err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
- phys_addrs,
- GNTMAP_host_map | GNTMAP_contains_pte,
- &leaked);
- if (err)
- goto failed;
-
- node->nr_handles = nr_grefs;
- node->pv.area = area;
-
- spin_lock(&xenbus_valloc_lock);
- list_add(&node->next, &xenbus_valloc_pages);
- spin_unlock(&xenbus_valloc_lock);
-
- *vaddr = area->addr;
- return 0;
-
-failed:
- if (!leaked)
- free_vm_area(area);
- else
- pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
-
- kfree(node);
- return err;
-}
-
struct map_ring_valloc_hvm
{
unsigned int idx;
@@ -725,6 +667,65 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+#ifdef CONFIG_XEN_PV
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ void **vaddr)
+{
+ struct xenbus_map_node *node;
+ struct vm_struct *area;
+ pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+ int err = GNTST_okay;
+ int i;
+ bool leaked;
+
+ *vaddr = NULL;
+
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
+ if (!area) {
+ kfree(node);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_grefs; i++)
+ phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+
+ err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
+ phys_addrs,
+ GNTMAP_host_map | GNTMAP_contains_pte,
+ &leaked);
+ if (err)
+ goto failed;
+
+ node->nr_handles = nr_grefs;
+ node->pv.area = area;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
+
+ *vaddr = area->addr;
+ return 0;
+
+failed:
+ if (!leaked)
+ free_vm_area(area);
+ else
+ pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
+
+ kfree(node);
+ return err;
+}
+
static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
@@ -788,6 +789,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
return err;
}
+static const struct xenbus_ring_ops ring_ops_pv = {
+ .map = xenbus_map_ring_valloc_pv,
+ .unmap = xenbus_unmap_ring_vfree_pv,
+};
+#endif
+
struct unmap_ring_vfree_hvm
{
unsigned int idx;
@@ -916,11 +923,6 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
-static const struct xenbus_ring_ops ring_ops_pv = {
- .map = xenbus_map_ring_valloc_pv,
- .unmap = xenbus_unmap_ring_vfree_pv,
-};
-
static const struct xenbus_ring_ops ring_ops_hvm = {
.map = xenbus_map_ring_valloc_hvm,
.unmap = xenbus_unmap_ring_vfree_hvm,
@@ -928,8 +930,10 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
void __init xenbus_ring_ops_init(void)
{
+#ifdef CONFIG_XEN_PV
if (!xen_feature(XENFEAT_auto_translated_physmap))
ring_ops = &ring_ops_pv;
else
+#endif
ring_ops = &ring_ops_hvm;
}