summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 11:42:18 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-13 11:42:18 -0800
commit098c30557a9a19827240aaadc137e4668157dc6b (patch)
tree793d589e27197e754fb38c5ae08b763d6e5d0334
parent72cca7baf4fba777b8ab770b902cf2e08941773f (diff)
parent5d47ec02c37ea632398cb251c884e3a488dff794 (diff)
Merge tag 'driver-core-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core
Pull driver core updates from Greg KH: "Here's the new driver core patches for 4.10-rc1. Big thing here is the nice addition of "functional dependencies" to the driver core. The idea has been talked about for a very long time, great job to Rafael for stepping up and implementing it. It's been tested for longer than the 4.9-rc1 date, we held off on merging it earlier in order to feel more comfortable about it. Other than that, it's just a handful of small other patches, some good cleanups to the mess that is the firmware class code, and we have a test driver for the deferred probe logic. All of these have been in linux-next for a while with no reported issues" * tag 'driver-core-4.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (30 commits) firmware: Correct handling of fw_state_wait() return value driver core: Silence device links sphinx warning firmware: remove warning at documentation generation time drivers: base: dma-mapping: Fix typo in dmam_alloc_non_coherent comments driver core: test_async: fix up typo found by 0-day firmware: move fw_state_is_done() into UHM section firmware: do not use fw_lock for fw_state protection firmware: drop bit ops in favor of simple state machine firmware: refactor loading status firmware: fix usermode helper fallback loading driver core: firmware_class: convert to use class_groups driver core: devcoredump: convert to use class_groups driver core: class: add class_groups support kernfs: Declare two local data structures static driver-core: fix platform_no_drv_owner.cocci warnings drivers/base/memory.c: Remove unused 'first_page' variable driver core: add CLASS_ATTR_WO() drivers: base: cacheinfo: support DT overrides for cache properties drivers: base: cacheinfo: add pr_fmt logging drivers: base: cacheinfo: fix boot error message when acpi is enabled ...
-rw-r--r--Documentation/ABI/testing/sysfs-devices-deferred_probe12
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c2
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h15
-rw-r--r--drivers/base/cacheinfo.c138
-rw-r--r--drivers/base/class.c15
-rw-r--r--drivers/base/core.c578
-rw-r--r--drivers/base/dd.c79
-rw-r--r--drivers/base/devcoredump.c10
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c178
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/power/main.c87
-rw-r--r--drivers/base/power/power.h10
-rw-r--r--drivers/base/power/runtime.c174
-rw-r--r--drivers/base/test/Kconfig9
-rw-r--r--drivers/base/test/Makefile1
-rw-r--r--drivers/base/test/test_async_driver_probe.c169
-rw-r--r--fs/kernfs/inode.c4
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/debugfs.h44
-rw-r--r--include/linux/device.h91
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_runtime.h10
-rw-r--r--lib/kobject_uevent.c6
26 files changed, 1517 insertions, 128 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
new file mode 100644
index 000000000000..58553d7a321f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-deferred_probe
@@ -0,0 +1,12 @@
+What: /sys/devices/.../deferred_probe
+Date: August 2016
+Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Description:
+ The /sys/devices/.../deferred_probe attribute is
+ present for all devices. If a driver detects during
+ probing a device that a related device is not yet
+ ready, it may defer probing of the first device. The
+ kernel will retry probing the first device after any
+ other device is successfully probed. This attribute
+ reads as 1 if probing of this device is currently
+ deferred, or 0 otherwise.
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index de6626c18e42..be6337156502 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
ci_leaf_init(this_leaf++, &id4_regs);
__cache_cpumap_setup(cpu, idx, &id4_regs);
}
+ this_cpu_ci->cpu_map_populated = true;
+
return 0;
}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index afa67f9ed726..d718ae4b907a 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -224,6 +224,8 @@ config DEBUG_TEST_DRIVER_REMOVE
unusable. You should say N here unless you are explicitly looking to
test this functionality.
+source "drivers/base/test/Kconfig"
+
config SYS_HYPERVISOR
bool
default n
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 2609ba20b396..f2816f6ff76a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -24,5 +24,7 @@ obj-$(CONFIG_PINCTRL) += pinctrl.o
obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
obj-$(CONFIG_GENERIC_MSI_IRQ_DOMAIN) += platform-msi.o
+obj-y += test/
+
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e05db388bd1c..ada9dce34e6d 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -107,6 +107,9 @@ extern void bus_remove_device(struct device *dev);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
+extern void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
@@ -138,6 +141,8 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
+extern struct device_attribute dev_attr_deferred_probe;
+
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
@@ -152,3 +157,13 @@ extern int devtmpfs_init(void);
#else
static inline int devtmpfs_init(void) { return 0; }
#endif
+
+/* Device links support */
+extern int device_links_read_lock(void);
+extern void device_links_read_unlock(int idx);
+extern int device_links_check_suppliers(struct device *dev);
+extern void device_links_driver_bound(struct device *dev);
+extern void device_links_driver_cleanup(struct device *dev);
+extern void device_links_no_driver(struct device *dev);
+extern bool device_links_busy(struct device *dev);
+extern void device_links_unbind_consumers(struct device *dev);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 47983a2ba621..1e3903d0d994 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -16,6 +16,9 @@
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/cacheinfo.h>
#include <linux/compiler.h>
@@ -85,7 +88,120 @@ static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
{
return sib_leaf->of_node == this_leaf->of_node;
}
+
+/* OF properties to query for a given cache type */
+struct cache_type_info {
+ const char *size_prop;
+ const char *line_size_props[2];
+ const char *nr_sets_prop;
+};
+
+static const struct cache_type_info cache_type_info[] = {
+ {
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ }, {
+ .size_prop = "i-cache-size",
+ .line_size_props = { "i-cache-line-size",
+ "i-cache-block-size", },
+ .nr_sets_prop = "i-cache-sets",
+ }, {
+ .size_prop = "d-cache-size",
+ .line_size_props = { "d-cache-line-size",
+ "d-cache-block-size", },
+ .nr_sets_prop = "d-cache-sets",
+ },
+};
+
+static inline int get_cacheinfo_idx(enum cache_type type)
+{
+ if (type == CACHE_TYPE_UNIFIED)
+ return 0;
+ return type;
+}
+
+static void cache_size(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *cache_size;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].size_prop;
+
+ cache_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (cache_size)
+ this_leaf->size = of_read_number(cache_size, 1);
+}
+
+/* not cache_line_size() because that's a macro in include/linux/cache.h */
+static void cache_get_line_size(struct cacheinfo *this_leaf)
+{
+ const __be32 *line_size;
+ int i, lim, ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
+
+ for (i = 0; i < lim; i++) {
+ const char *propname;
+
+ propname = cache_type_info[ct_idx].line_size_props[i];
+ line_size = of_get_property(this_leaf->of_node, propname, NULL);
+ if (line_size)
+ break;
+ }
+
+ if (line_size)
+ this_leaf->coherency_line_size = of_read_number(line_size, 1);
+}
+
+static void cache_nr_sets(struct cacheinfo *this_leaf)
+{
+ const char *propname;
+ const __be32 *nr_sets;
+ int ct_idx;
+
+ ct_idx = get_cacheinfo_idx(this_leaf->type);
+ propname = cache_type_info[ct_idx].nr_sets_prop;
+
+ nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
+ if (nr_sets)
+ this_leaf->number_of_sets = of_read_number(nr_sets, 1);
+}
+
+static void cache_associativity(struct cacheinfo *this_leaf)
+{
+ unsigned int line_size = this_leaf->coherency_line_size;
+ unsigned int nr_sets = this_leaf->number_of_sets;
+ unsigned int size = this_leaf->size;
+
+ /*
+ * If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
+ this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
+}
+
+static void cache_of_override_properties(unsigned int cpu)
+{
+ int index;
+ struct cacheinfo *this_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (index = 0; index < cache_leaves(cpu); index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ cache_size(this_leaf);
+ cache_get_line_size(this_leaf);
+ cache_nr_sets(this_leaf);
+ cache_associativity(this_leaf);
+ }
+}
#else
+static void cache_of_override_properties(unsigned int cpu) { }
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
struct cacheinfo *sib_leaf)
@@ -104,9 +220,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
unsigned int index;
- int ret;
+ int ret = 0;
- ret = cache_setup_of_node(cpu);
+ if (this_cpu_ci->cpu_map_populated)
+ return 0;
+
+ if (of_have_populated_dt())
+ ret = cache_setup_of_node(cpu);
+ else if (!acpi_disabled)
+ /* No cache property/hierarchy support yet in ACPI */
+ ret = -ENOTSUPP;
if (ret)
return ret;
@@ -161,6 +284,12 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
}
}
+static void cache_override_properties(unsigned int cpu)
+{
+ if (of_have_populated_dt())
+ return cache_of_override_properties(cpu);
+}
+
static void free_cache_attributes(unsigned int cpu)
{
if (!per_cpu_cacheinfo(cpu))
@@ -203,10 +332,11 @@ static int detect_cache_attributes(unsigned int cpu)
*/
ret = cache_shared_cpu_map_setup(cpu);
if (ret) {
- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
- cpu);
+ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
goto free_ci;
}
+
+ cache_override_properties(cpu);
return 0;
free_ci:
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 71059e32bebc..a2b2896693d6 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -163,6 +163,18 @@ static void klist_class_dev_put(struct klist_node *n)
put_device(dev);
}
+static int class_add_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&cls->p->subsys.kobj, groups);
+}
+
+static void class_remove_groups(struct class *cls,
+ const struct attribute_group **groups)
+{
+ return sysfs_remove_groups(&cls->p->subsys.kobj, groups);
+}
+
int __class_register(struct class *cls, struct lock_class_key *key)
{
struct subsys_private *cp;
@@ -203,6 +215,8 @@ int __class_register(struct class *cls, struct lock_class_key *key)
kfree(cp);
return error;
}
+ error = class_add_groups(class_get(cls), cls->class_groups);
+ class_put(cls);
error = add_class_attrs(class_get(cls));
class_put(cls);
return error;
@@ -213,6 +227,7 @@ void class_unregister(struct class *cls)
{
pr_debug("device class '%s': unregistering\n", cls->name);
remove_class_attrs(cls);
+ class_remove_groups(cls, cls->class_groups);
kset_unregister(&cls->p->subsys);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ce057a568673..020ea7f05520 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -44,6 +44,572 @@ static int __init sysfs_deprecated_setup(char *arg)
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
+/* Device links support. */
+
+#ifdef CONFIG_SRCU
+static DEFINE_MUTEX(device_links_lock);
+DEFINE_STATIC_SRCU(device_links_srcu);
+
+static inline void device_links_write_lock(void)
+{
+ mutex_lock(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ mutex_unlock(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ return srcu_read_lock(&device_links_srcu);
+}
+
+void device_links_read_unlock(int idx)
+{
+ srcu_read_unlock(&device_links_srcu, idx);
+}
+#else /* !CONFIG_SRCU */
+static DECLARE_RWSEM(device_links_lock);
+
+static inline void device_links_write_lock(void)
+{
+ down_write(&device_links_lock);
+}
+
+static inline void device_links_write_unlock(void)
+{
+ up_write(&device_links_lock);
+}
+
+int device_links_read_lock(void)
+{
+ down_read(&device_links_lock);
+ return 0;
+}
+
+void device_links_read_unlock(int not_used)
+{
+ up_read(&device_links_lock);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_is_dependent - Check if one device depends on another one
+ * @dev: Device to check dependencies for.
+ * @target: Device to check against.
+ *
+ * Check if @target depends on @dev or any device dependent on it (its child or
+ * its consumer etc). Return 1 if that is the case or 0 otherwise.
+ */
+static int device_is_dependent(struct device *dev, void *target)
+{
+ struct device_link *link;
+ int ret;
+
+ if (WARN_ON(dev == target))
+ return 1;
+
+ ret = device_for_each_child(dev, target, device_is_dependent);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (WARN_ON(link->consumer == target))
+ return 1;
+
+ ret = device_is_dependent(link->consumer, target);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int device_reorder_to_tail(struct device *dev, void *not_used)
+{
+ struct device_link *link;
+
+ /*
+ * Devices that have not been registered yet will be put to the ends
+ * of the lists during the registration, so skip them here.
+ */
+ if (device_is_registered(dev))
+ devices_kset_move_last(dev);
+
+ if (device_pm_initialized(dev))
+ device_pm_move_last(dev);
+
+ device_for_each_child(dev, NULL, device_reorder_to_tail);
+ list_for_each_entry(link, &dev->links.consumers, s_node)
+ device_reorder_to_tail(link->consumer, NULL);
+
+ return 0;
+}
+
+/**
+ * device_link_add - Create a link between two devices.
+ * @consumer: Consumer end of the link.
+ * @supplier: Supplier end of the link.
+ * @flags: Link flags.
+ *
+ * The caller is responsible for the proper synchronization of the link creation
+ * with runtime PM. First, setting the DL_FLAG_PM_RUNTIME flag will cause the
+ * runtime PM framework to take the link into account. Second, if the
+ * DL_FLAG_RPM_ACTIVE flag is set in addition to it, the supplier devices will
+ * be forced into the active metastate and reference-counted upon the creation
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+ * If the DL_FLAG_AUTOREMOVE is set, the link will be removed automatically
+ * when the consumer device driver unbinds from it. The combination of both
+ * DL_FLAG_AUTOREMOVE and DL_FLAG_STATELESS set is invalid and will cause NULL
+ * to be returned.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+ * on it to the ends of these lists (that does not happen to devices that have
+ * not been registered when this function is called).
+ *
+ * The supplier device is required to be registered when this function is called
+ * and NULL will be returned if that is not the case. The consumer device need
+ * not be registered, however.
+ */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags)
+{
+ struct device_link *link;
+
+ if (!consumer || !supplier ||
+ ((flags & DL_FLAG_STATELESS) && (flags & DL_FLAG_AUTOREMOVE)))
+ return NULL;
+
+ device_links_write_lock();
+ device_pm_lock();
+
+ /*
+ * If the supplier has not been fully registered yet or there is a
+ * reverse dependency between the consumer and the supplier already in
+ * the graph, return NULL.
+ */
+ if (!device_pm_initialized(supplier)
+ || device_is_dependent(consumer, supplier)) {
+ link = NULL;
+ goto out;
+ }
+
+ list_for_each_entry(link, &supplier->links.consumers, s_node)
+ if (link->consumer == consumer)
+ goto out;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+
+ if (flags & DL_FLAG_PM_RUNTIME) {
+ if (flags & DL_FLAG_RPM_ACTIVE) {
+ if (pm_runtime_get_sync(supplier) < 0) {
+ pm_runtime_put_noidle(supplier);
+ kfree(link);
+ link = NULL;
+ goto out;
+ }
+ link->rpm_active = true;
+ }
+ pm_runtime_new_link(consumer);
+ }
+ get_device(supplier);
+ link->supplier = supplier;
+ INIT_LIST_HEAD(&link->s_node);
+ get_device(consumer);
+ link->consumer = consumer;
+ INIT_LIST_HEAD(&link->c_node);
+ link->flags = flags;
+
+ /* Determine the initial link state. */
+ if (flags & DL_FLAG_STATELESS) {
+ link->status = DL_STATE_NONE;
+ } else {
+ switch (supplier->links.status) {
+ case DL_DEV_DRIVER_BOUND:
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+ * Balance the decrementation of the supplier's
+ * runtime PM usage counter after consumer probe
+ * in driver_probe_device().
+ */
+ if (flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(supplier);
+
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+ case DL_DEV_DRIVER_BOUND:
+ link->status = DL_STATE_ACTIVE;
+ break;
+ default:
+ link->status = DL_STATE_AVAILABLE;
+ break;
+ }
+ break;
+ case DL_DEV_UNBINDING:
+ link->status = DL_STATE_SUPPLIER_UNBIND;
+ break;
+ default:
+ link->status = DL_STATE_DORMANT;
+ break;
+ }
+ }
+
+ /*
+ * Move the consumer and all of the devices depending on it to the end
+ * of dpm_list and the devices_kset list.
+ *
+ * It is necessary to hold dpm_list locked throughout all that or else
+ * we may end up suspending with a wrong ordering of it.
+ */
+ device_reorder_to_tail(consumer, NULL);
+
+ list_add_tail_rcu(&link->s_node, &supplier->links.consumers);
+ list_add_tail_rcu(&link->c_node, &consumer->links.suppliers);
+
+ dev_info(consumer, "Linked as a consumer to %s\n", dev_name(supplier));
+
+ out:
+ device_pm_unlock();
+ device_links_write_unlock();
+ return link;
+}
+EXPORT_SYMBOL_GPL(device_link_add);
+
+static void device_link_free(struct device_link *link)
+{
+ put_device(link->consumer);
+ put_device(link->supplier);
+ kfree(link);
+}
+
+#ifdef CONFIG_SRCU
+static void __device_link_free_srcu(struct rcu_head *rhead)
+{
+ device_link_free(container_of(rhead, struct device_link, rcu_head));
+}
+
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_drop_link(link->consumer);
+
+ list_del_rcu(&link->s_node);
+ list_del_rcu(&link->c_node);
+ call_srcu(&device_links_srcu, &link->rcu_head, __device_link_free_srcu);
+}
+#else /* !CONFIG_SRCU */
+static void __device_link_del(struct device_link *link)
+{
+ dev_info(link->consumer, "Dropping the link to %s\n",
+ dev_name(link->supplier));
+
+ list_del(&link->s_node);
+ list_del(&link->c_node);
+ device_link_free(link);
+}
+#endif /* !CONFIG_SRCU */
+
+/**
+ * device_link_del - Delete a link between two devices.
+ * @link: Device link to delete.
+ *
+ * The caller must ensure proper synchronization of this function with runtime
+ * PM.
+ */
+void device_link_del(struct device_link *link)
+{
+ device_links_write_lock();
+ device_pm_lock();
+ __device_link_del(link);
+ device_pm_unlock();
+ device_links_write_unlock();
+}
+EXPORT_SYMBOL_GPL(device_link_del);
+
+static void device_links_missing_supplier(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node)
+ if (link->status == DL_STATE_CONSUMER_PROBE)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+}
+
+/**
+ * device_links_check_suppliers - Check presence of supplier drivers.
+ * @dev: Consumer device.
+ *
+ * Check links from this device to any suppliers. Walk the list of the device's
+ * links to suppliers and see if all of them are available. If not, simply
+ * return -EPROBE_DEFER.
+ *
+ * We need to guarantee that the supplier will not go away after the check has
+ * been positive here. It only can go away in __device_release_driver() and
+ * that function checks the device's links to consumers. This means we need to
+ * mark the link as "consumer probe in progress" to make the supplier removal
+ * wait for us to complete (or bad things may happen).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+int device_links_check_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int ret = 0;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status != DL_STATE_AVAILABLE) {
+ device_links_missing_supplier(dev);
+ ret = -EPROBE_DEFER;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_CONSUMER_PROBE);
+ }
+ dev->links.status = DL_DEV_PROBING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_driver_bound - Update device links after probing its driver.
+ * @dev: Device to update the links for.
+ *
+ * The probe has been successful, so update links from this device to any
+ * consumers by changing their status to "available".
+ *
+ * Also change the status of @dev's links to suppliers to "active".
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_bound(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_DORMANT);
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ list_for_each_entry(link, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->status != DL_STATE_CONSUMER_PROBE);
+ WRITE_ONCE(link->status, DL_STATE_ACTIVE);
+ }
+
+ dev->links.status = DL_DEV_DRIVER_BOUND;
+
+ device_links_write_unlock();
+}
+
+/**
+ * __device_links_no_driver - Update links of a device without a driver.
+ * @dev: Device without a drvier.
+ *
+ * Delete all non-persistent links from this device to any suppliers.
+ *
+ * Persistent links stay around, but their status is changed to "available",
+ * unless they already are in the "supplier unbind in progress" state in which
+ * case they need not be updated.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+static void __device_links_no_driver(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->flags & DL_FLAG_AUTOREMOVE)
+ __device_link_del(link);
+ else if (link->status != DL_STATE_SUPPLIER_UNBIND)
+ WRITE_ONCE(link->status, DL_STATE_AVAILABLE);
+ }
+
+ dev->links.status = DL_DEV_NO_DRIVER;
+}
+
+void device_links_no_driver(struct device *dev)
+{
+ device_links_write_lock();
+ __device_links_no_driver(dev);
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_driver_cleanup - Update links after driver removal.
+ * @dev: Device whose driver has just gone away.
+ *
+ * Update links to consumers for @dev by changing their status to "dormant" and
+ * invoke %__device_links_no_driver() to update links to suppliers for it as
+ * appropriate.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_driver_cleanup(struct device *dev)
+{
+ struct device_link *link;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ WARN_ON(link->flags & DL_FLAG_AUTOREMOVE);
+ WARN_ON(link->status != DL_STATE_SUPPLIER_UNBIND);
+ WRITE_ONCE(link->status, DL_STATE_DORMANT);
+ }
+
+ __device_links_no_driver(dev);
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_busy - Check if there are any busy links to consumers.
+ * @dev: Device to check.
+ *
+ * Check each consumer of the device and return 'true' if its link's status
+ * is one of "consumer probe" or "active" (meaning that the given consumer is
+ * probing right now or its driver is present). Otherwise, change the link
+ * state to "supplier unbind" to prevent the consumer from being probed
+ * successfully going forward.
+ *
+ * Return 'false' if there are no probing or active consumers.
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+bool device_links_busy(struct device *dev)
+{
+ struct device_link *link;
+ bool ret = false;
+
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->status == DL_STATE_CONSUMER_PROBE
+ || link->status == DL_STATE_ACTIVE) {
+ ret = true;
+ break;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ }
+
+ dev->links.status = DL_DEV_UNBINDING;
+
+ device_links_write_unlock();
+ return ret;
+}
+
+/**
+ * device_links_unbind_consumers - Force unbind consumers of the given device.
+ * @dev: Device to unbind the consumers of.
+ *
+ * Walk the list of links to consumers for @dev and if any of them is in the
+ * "consumer probe" state, wait for all device probes in progress to complete
+ * and start over.
+ *
+ * If that's not the case, change the status of the link to "supplier unbind"
+ * and check if the link was in the "active" state. If so, force the consumer
+ * driver to unbind and start over (the consumer will not re-probe as we have
+ * changed the state of the link already).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ */
+void device_links_unbind_consumers(struct device *dev)
+{
+ struct device_link *link;
+
+ start:
+ device_links_write_lock();
+
+ list_for_each_entry(link, &dev->links.consumers, s_node) {
+ enum device_link_state status;
+
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ status = link->status;
+ if (status == DL_STATE_CONSUMER_PROBE) {
+ device_links_write_unlock();
+
+ wait_for_device_probe();
+ goto start;
+ }
+ WRITE_ONCE(link->status, DL_STATE_SUPPLIER_UNBIND);
+ if (status == DL_STATE_ACTIVE) {
+ struct device *consumer = link->consumer;
+
+ get_device(consumer);
+
+ device_links_write_unlock();
+
+ device_release_driver_internal(consumer, NULL,
+ consumer->parent);
+ put_device(consumer);
+ goto start;
+ }
+ }
+
+ device_links_write_unlock();
+}
+
+/**
+ * device_links_purge - Delete existing links to other devices.
+ * @dev: Target device.
+ */
+static void device_links_purge(struct device *dev)
+{
+ struct device_link *link, *ln;
+
+ /*
+ * Delete all of the remaining links from this device to any other
+ * devices (either consumers or suppliers).
+ */
+ device_links_write_lock();
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.suppliers, c_node) {
+ WARN_ON(link->status == DL_STATE_ACTIVE);
+ __device_link_del(link);
+ }
+
+ list_for_each_entry_safe_reverse(link, ln, &dev->links.consumers, s_node) {
+ WARN_ON(link->status != DL_STATE_DORMANT &&
+ link->status != DL_STATE_NONE);
+ __device_link_del(link);
+ }
+
+ device_links_write_unlock();
+}
+
+/* Device links support end. */
+
int (*platform_notify)(struct device *dev) = NULL;
int (*platform_notify_remove)(struct device *dev) = NULL;
static struct kobject *dev_kobj;
@@ -494,8 +1060,14 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
+ error = device_create_file(dev, &dev_attr_deferred_probe);
+ if (error)
+ goto err_remove_online;
+
return 0;
+ err_remove_online:
+ device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -513,6 +1085,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
+ device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
@@ -711,6 +1284,9 @@ void device_initialize(struct device *dev)
#ifdef CONFIG_GENERIC_MSI_IRQ
INIT_LIST_HEAD(&dev->msi_list);
#endif
+ INIT_LIST_HEAD(&dev->links.consumers);
+ INIT_LIST_HEAD(&dev->links.suppliers);
+ dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
@@ -1258,6 +1834,8 @@ void device_del(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DEL_DEVICE, dev);
+
+ device_links_purge(dev);
dpm_sysfs_remove(dev);
if (parent)
klist_del(&dev->p->knode_parent);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index d76cd97a98b6..a8b258e5407b 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,6 +53,19 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static ssize_t deferred_probe_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ bool value;
+
+ mutex_lock(&deferred_probe_mutex);
+ value = !list_empty(&dev->p->deferred_probe);
+ mutex_unlock(&deferred_probe_mutex);
+
+ return sprintf(buf, "%d\n", value);
+}
+DEVICE_ATTR_RO(deferred_probe);
+
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
@@ -244,6 +257,7 @@ static void driver_bound(struct device *dev)
__func__, dev_name(dev));
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ device_links_driver_bound(dev);
device_pm_check_callbacks(dev);
@@ -338,6 +352,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
return ret;
}
+ ret = device_links_check_suppliers(dev);
+ if (ret)
+ return ret;
+
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
drv->bus->name, __func__, drv->name, dev_name(dev));
@@ -416,6 +434,7 @@ probe_failed:
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
pinctrl_bind_failed:
+ device_links_no_driver(dev);
devres_release_all(dev);
driver_sysfs_remove(dev);
dev->driver = NULL;
@@ -508,6 +527,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
drv->bus->name, __func__, dev_name(dev), drv->name);
+ pm_runtime_get_suppliers(dev);
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@@ -518,6 +538,7 @@ int driver_probe_device(struct device_driver *drv, struct device *dev)
if (dev->parent)
pm_runtime_put(dev->parent);
+ pm_runtime_put_suppliers(dev);
return ret;
}
@@ -772,7 +793,7 @@ EXPORT_SYMBOL_GPL(driver_attach);
* __device_release_driver() must be called with @dev lock held.
* When called for a USB interface, @dev->parent lock must be held as well.
*/
-static void __device_release_driver(struct device *dev)
+static void __device_release_driver(struct device *dev, struct device *parent)
{
struct device_driver *drv;
@@ -781,7 +802,27 @@ static void __device_release_driver(struct device *dev)
if (driver_allows_async_probing(drv))
async_synchronize_full();
+ while (device_links_busy(dev)) {
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+
+ device_links_unbind_consumers(dev);
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ /*
+ * A concurrent invocation of the same function might
+ * have released the driver successfully while this one
+ * was waiting, so check for that.
+ */
+ if (dev->driver != drv)
+ return;
+ }
+
pm_runtime_get_sync(dev);
+ pm_runtime_clean_up_links(dev);
driver_sysfs_remove(dev);
@@ -796,6 +837,8 @@ static void __device_release_driver(struct device *dev)
dev->bus->remove(dev);
else if (drv->remove)
drv->remove(dev);
+
+ device_links_driver_cleanup(dev);
devres_release_all(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
@@ -812,12 +855,32 @@ static void __device_release_driver(struct device *dev)
}
}
+void device_release_driver_internal(struct device *dev,
+ struct device_driver *drv,
+ struct device *parent)
+{
+ if (parent)
+ device_lock(parent);
+
+ device_lock(dev);
+ if (!drv || drv == dev->driver)
+ __device_release_driver(dev, parent);
+
+ device_unlock(dev);
+ if (parent)
+ device_unlock(parent);
+}
+
/**
* device_release_driver - manually detach device from driver.
* @dev: device.
*
* Manually detach device from driver.
* When called for a USB interface, @dev->parent lock must be held.
+ *
+ * If this function is to be called with @dev->parent lock held, ensure that
+ * the device's consumers are unbound in advance or that their locks can be
+ * acquired under the @dev->parent lock.
*/
void device_release_driver(struct device *dev)
{
@@ -826,9 +889,7 @@ void device_release_driver(struct device *dev)
* within their ->remove callback for the same device, they
* will deadlock right here.
*/
- device_lock(dev);
- __device_release_driver(dev);
- device_unlock(dev);
+ device_release_driver_internal(dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(device_release_driver);
@@ -853,15 +914,7 @@ void driver_detach(struct device_driver *drv)
dev = dev_prv->device;
get_device(dev);
spin_unlock(&drv->p->klist_devices.k_lock);
-
- if (dev->parent) /* Needed for USB */
- device_lock(dev->parent);
- device_lock(dev);
- if (dev->driver == drv)
- __device_release_driver(dev);
- device_unlock(dev);
- if (dev->parent)
- device_unlock(dev->parent);
+ device_release_driver_internal(dev, drv, dev->parent);
put_device(dev);
}
}
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
index 240374fd1838..7be310f7db73 100644
--- a/drivers/base/devcoredump.c
+++ b/drivers/base/devcoredump.c
@@ -160,18 +160,20 @@ static ssize_t disabled_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(disabled);
-static struct class_attribute devcd_class_attrs[] = {
- __ATTR_RW(disabled),
- __ATTR_NULL
+static struct attribute *devcd_class_attrs[] = {
+ &class_attr_disabled.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(devcd_class);
static struct class devcd_class = {
.name = "devcoredump",
.owner = THIS_MODULE,
.dev_release = devcd_dev_release,
.dev_groups = devcd_dev_groups,
- .class_attrs = devcd_class_attrs,
+ .class_groups = devcd_class_groups,
};
static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 8f8b68c80986..efd71cf4fdea 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -108,13 +108,13 @@ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
EXPORT_SYMBOL(dmam_free_coherent);
/**
- * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
+ * dmam_alloc_non_coherent - Managed dma_alloc_noncoherent()
* @dev: Device to allocate non_coherent memory for
* @size: Size of allocation
* @dma_handle: Out argument for allocated DMA handle
* @gfp: Allocation flags
*
- * Managed dma_alloc_non_coherent(). Memory allocated using this
+ * Managed dma_alloc_noncoherent(). Memory allocated using this
* function will be automatically released on driver detach.
*
* RETURNS:
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 22d1760a4278..4497d263209f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -30,6 +30,7 @@
#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/security.h>
+#include <linux/swait.h>
#include <generated/utsrelease.h>
@@ -91,10 +92,11 @@ static inline bool fw_is_builtin_firmware(const struct firmware *fw)
}
#endif
-enum {
+enum fw_status {
+ FW_STATUS_UNKNOWN,
FW_STATUS_LOADING,
FW_STATUS_DONE,
- FW_STATUS_ABORT,
+ FW_STATUS_ABORTED,
};
static int loading_timeout = 60; /* In seconds */
@@ -104,6 +106,82 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_JIFFY_OFFSET;
}
+/*
+ * Concurrent request_firmware() for the same firmware need to be
+ * serialized. struct fw_state is simple state machine which hold the
+ * state of the firmware loading.
+ */
+struct fw_state {
+ struct swait_queue_head wq;
+ enum fw_status status;
+};
+
+static void fw_state_init(struct fw_state *fw_st)
+{
+ init_swait_queue_head(&fw_st->wq);
+ fw_st->status = FW_STATUS_UNKNOWN;
+}
+
+static inline bool __fw_state_is_done(enum fw_status status)
+{
+ return status == FW_STATUS_DONE || status == FW_STATUS_ABORTED;
+}
+
+static int __fw_state_wait_common(struct fw_state *fw_st, long timeout)
+{
+ long ret;
+
+ ret = swait_event_interruptible_timeout(fw_st->wq,
+ __fw_state_is_done(READ_ONCE(fw_st->status)),
+ timeout);
+ if (ret != 0 && fw_st->status == FW_STATUS_ABORTED)
+ return -ENOENT;
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return ret < 0 ? ret : 0;
+}
+
+static void __fw_state_set(struct fw_state *fw_st,
+ enum fw_status status)
+{
+ WRITE_ONCE(fw_st->status, status);
+
+ if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED)
+ swake_up(&fw_st->wq);
+}
+
+#define fw_state_start(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_LOADING)
+#define fw_state_done(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_DONE)
+#define fw_state_wait(fw_st) \
+ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT)
+
+#ifndef CONFIG_FW_LOADER_USER_HELPER
+
+#define fw_state_is_aborted(fw_st) false
+
+#else /* CONFIG_FW_LOADER_USER_HELPER */
+
+static int __fw_state_check(struct fw_state *fw_st, enum fw_status status)
+{
+ return fw_st->status == status;
+}
+
+#define fw_state_aborted(fw_st) \
+ __fw_state_set(fw_st, FW_STATUS_ABORTED)
+#define fw_state_is_done(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_DONE)
+#define fw_state_is_loading(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_LOADING)
+#define fw_state_is_aborted(fw_st) \
+ __fw_state_check(fw_st, FW_STATUS_ABORTED)
+#define fw_state_wait_timeout(fw_st, timeout) \
+ __fw_state_wait_common(fw_st, timeout)
+
+#endif /* CONFIG_FW_LOADER_USER_HELPER */
+
/* firmware behavior options */
#define FW_OPT_UEVENT (1U << 0)
#define FW_OPT_NOWAIT (1U << 1)
@@ -145,9 +223,8 @@ struct firmware_cache {
struct firmware_buf {
struct kref ref;
struct list_head list;
- struct completion completion;
struct firmware_cache *fwc;
- unsigned long status;
+ struct fw_state fw_st;
void *data;
size_t size;
size_t allocated_size;
@@ -205,7 +282,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
buf->fwc = fwc;
buf->data = dbuf;
buf->allocated_size = size;
- init_completion(&buf->completion);
+ fw_state_init(&buf->fw_st);
#ifdef CONFIG_FW_LOADER_USER_HELPER
INIT_LIST_HEAD(&buf->pending_list);
#endif
@@ -305,15 +382,6 @@ static const char * const fw_path[] = {
module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
-static void fw_finish_direct_load(struct device *device,
- struct firmware_buf *buf)
-{
- mutex_lock(&fw_lock);
- set_bit(FW_STATUS_DONE, &buf->status);
- complete_all(&buf->completion);
- mutex_unlock(&fw_lock);
-}
-
static int
fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
{
@@ -360,7 +428,7 @@ fw_get_filesystem_firmware(struct device *device, struct firmware_buf *buf)
}
dev_dbg(device, "direct-loading %s\n", buf->fw_id);
buf->size = size;
- fw_finish_direct_load(device, buf);
+ fw_state_done(&buf->fw_st);
break;
}
__putname(path);
@@ -478,12 +546,11 @@ static void __fw_load_abort(struct firmware_buf *buf)
* There is a small window in which user can write to 'loading'
* between loading done and disappearance of 'loading'
*/
- if (test_bit(FW_STATUS_DONE, &buf->status))
+ if (fw_state_is_done(&buf->fw_st))
return;
list_del_init(&buf->pending_list);
- set_bit(FW_STATUS_ABORT, &buf->status);
- complete_all(&buf->completion);
+ fw_state_aborted(&buf->fw_st);
}
static void fw_load_abort(struct firmware_priv *fw_priv)
@@ -496,9 +563,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
fw_priv->buf = NULL;
}
-#define is_fw_load_aborted(buf) \
- test_bit(FW_STATUS_ABORT, &(buf)->status)
-
static LIST_HEAD(pending_fw_head);
/* reboot notifier for avoid deadlock with usermode_lock */
@@ -546,11 +610,13 @@ static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
return count;
}
+static CLASS_ATTR_RW(timeout);
-static struct class_attribute firmware_class_attrs[] = {
- __ATTR_RW(timeout),
- __ATTR_NULL
+static struct attribute *firmware_class_attrs[] = {
+ &class_attr_timeout.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(firmware_class);
static void fw_dev_release(struct device *dev)
{
@@ -585,7 +651,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct class firmware_class = {
.name = "firmware",
- .class_attrs = firmware_class_attrs,
+ .class_groups = firmware_class_groups,
.dev_uevent = firmware_uevent,
.dev_release = fw_dev_release,
};
@@ -598,7 +664,7 @@ static ssize_t firmware_loading_show(struct device *dev,
mutex_lock(&fw_lock);
if (fw_priv->buf)
- loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+ loading = fw_state_is_loading(&fw_priv->buf->fw_st);
mutex_unlock(&fw_lock);
return sprintf(buf, "%d\n", loading);
@@ -653,23 +719,20 @@ static ssize_t firmware_loading_store(struct device *dev,
switch (loading) {
case 1:
/* discarding any previous partial load */
- if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+ if (!fw_state_is_done(&fw_buf->fw_st)) {
for (i = 0; i < fw_buf->nr_pages; i++)
__free_page(fw_buf->pages[i]);
vfree(fw_buf->pages);
fw_buf->pages = NULL;
fw_buf->page_array_size = 0;
fw_buf->nr_pages = 0;
- set_bit(FW_STATUS_LOADING, &fw_buf->status);
+ fw_state_start(&fw_buf->fw_st);
}
break;
case 0:
- if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
+ if (fw_state_is_loading(&fw_buf->fw_st)) {
int rc;
- set_bit(FW_STATUS_DONE, &fw_buf->status);
- clear_bit(FW_STATUS_LOADING, &fw_buf->status);
-
/*
* Several loading requests may be pending on
* one same firmware buf, so let all requests
@@ -691,10 +754,11 @@ static ssize_t firmware_loading_store(struct device *dev,
*/
list_del_init(&fw_buf->pending_list);
if (rc) {
- set_bit(FW_STATUS_ABORT, &fw_buf->status);
+ fw_state_aborted(&fw_buf->fw_st);
written = rc;
+ } else {
+ fw_state_done(&fw_buf->fw_st);
}
- complete_all(&fw_buf->completion);
break;
}
/* fallthrough */
@@ -755,7 +819,7 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
ret_count = -ENODEV;
goto out;
}
@@ -842,7 +906,7 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
mutex_lock(&fw_lock);
buf = fw_priv->buf;
- if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
+ if (!buf || fw_state_is_done(&buf->fw_st)) {
retval = -ENODEV;
goto out;
}
@@ -955,17 +1019,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
timeout = MAX_JIFFY_OFFSET;
}
- retval = wait_for_completion_interruptible_timeout(&buf->completion,
- timeout);
- if (retval == -ERESTARTSYS || !retval) {
+ retval = fw_state_wait_timeout(&buf->fw_st, timeout);
+ if (retval < 0) {
mutex_lock(&fw_lock);
fw_load_abort(fw_priv);
mutex_unlock(&fw_lock);
- } else if (retval > 0) {
- retval = 0;
}
- if (is_fw_load_aborted(buf))
+ if (fw_state_is_aborted(&buf->fw_st))
retval = -EAGAIN;
else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
@@ -1015,35 +1076,12 @@ fw_load_from_user_helper(struct firmware *firmware, const char *name,
return -ENOENT;
}
-/* No abort during direct loading */
-#define is_fw_load_aborted(buf) false
-
#ifdef CONFIG_PM_SLEEP
static inline void kill_requests_without_uevent(void) { }
#endif
#endif /* CONFIG_FW_LOADER_USER_HELPER */
-
-/* wait until the shared firmware_buf becomes ready (or error) */
-static int sync_cached_firmware_buf(struct firmware_buf *buf)
-{
- int ret = 0;
-
- mutex_lock(&fw_lock);
- while (!test_bit(FW_STATUS_DONE, &buf->status)) {
- if (is_fw_load_aborted(buf)) {
- ret = -ENOENT;
- break;
- }
- mutex_unlock(&fw_lock);
- ret = wait_for_completion_interruptible(&buf->completion);
- mutex_lock(&fw_lock);
- }
- mutex_unlock(&fw_lock);
- return ret;
-}
-
/* prepare firmware and firmware_buf structs;
* return 0 if a firmware is already assigned, 1 if need to load one,
* or a negative error code
@@ -1077,7 +1115,7 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
firmware->priv = buf;
if (ret > 0) {
- ret = sync_cached_firmware_buf(buf);
+ ret = fw_state_wait(&buf->fw_st);
if (!ret) {
fw_set_page_data(buf, firmware);
return 0; /* assigned */
@@ -1095,7 +1133,7 @@ static int assign_firmware_buf(struct firmware *fw, struct device *device,
struct firmware_buf *buf = fw->priv;
mutex_lock(&fw_lock);
- if (!buf->size || is_fw_load_aborted(buf)) {
+ if (!buf->size || fw_state_is_aborted(&buf->fw_st)) {
mutex_unlock(&fw_lock);
return -ENOENT;
}
@@ -1345,9 +1383,9 @@ static void request_firmware_work_func(struct work_struct *work)
*
* Asynchronous variant of request_firmware() for user contexts:
* - sleep for as small periods as possible since it may
- * increase kernel boot time of built-in device drivers
- * requesting firmware in their ->probe() methods, if
- * @gfp is GFP_KERNEL.
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
*
* - can't sleep at all if @gfp is GFP_ATOMIC.
**/
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 62c63c0c5c22..bb69e58c29f3 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -226,11 +226,9 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
{
unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
int ret;
start_pfn = section_nr_to_pfn(phys_index);
- first_page = pfn_to_page(start_pfn);
switch (action) {
case MEM_ONLINE:
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index eb474c882ebe..48c6294e9c34 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -131,6 +131,7 @@ void device_pm_add(struct device *dev)
dev_warn(dev, "parent %s should not be sleeping\n",
dev_name(dev->parent));
list_add_tail(&dev->power.entry, &dpm_list);
+ dev->power.in_dpm_list = true;
mutex_unlock(&dpm_list_mtx);
}
@@ -145,6 +146,7 @@ void device_pm_remove(struct device *dev)
complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
+ dev->power.in_dpm_list = false;
mutex_unlock(&dpm_list_mtx);
device_wakeup_disable(dev);
pm_runtime_remove(dev);
@@ -244,6 +246,62 @@ static void dpm_wait_for_children(struct device *dev, bool async)
device_for_each_child(dev, &async, dpm_wait_fn);
}
+static void dpm_wait_for_suppliers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * If the supplier goes away right after we've checked the link to it,
+ * we'll wait for its completion to change the state, but that's fine,
+ * because the only things that will block as a result are the SRCU
+ * callbacks freeing the link objects for the links in the list we're
+ * walking.
+ */
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->supplier, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_superior(struct device *dev, bool async)
+{
+ dpm_wait(dev->parent, async);
+ dpm_wait_for_suppliers(dev, async);
+}
+
+static void dpm_wait_for_consumers(struct device *dev, bool async)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ /*
+ * The status of a device link can only be changed from "dormant" by a
+ * probe, but that cannot happen during system suspend/resume. In
+ * theory it can change to "dormant" at that time, but then it is
+ * reasonable to wait for the target device anyway (eg. if it goes
+ * away, it's better to wait for it to go away completely and then
+ * continue instead of trying to continue in parallel with its
+ * unregistration).
+ */
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
+ if (READ_ONCE(link->status) != DL_STATE_DORMANT)
+ dpm_wait(link->consumer, async);
+
+ device_links_read_unlock(idx);
+}
+
+static void dpm_wait_for_subordinate(struct device *dev, bool async)
+{
+ dpm_wait_for_children(dev, async);
+ dpm_wait_for_consumers(dev, async);
+}
+
/**
* pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
@@ -488,7 +546,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_noirq_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -618,7 +676,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
if (!dev->power.is_late_suspended)
goto Out;
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
if (dev->pm_domain) {
info = "early power domain ";
@@ -750,7 +808,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
}
- dpm_wait(dev->parent, async);
+ dpm_wait_for_superior(dev, async);
dpm_watchdog_set(&wd, dev);
device_lock(dev);
@@ -1027,7 +1085,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1174,7 +1232,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
__pm_runtime_disable(dev, false);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1342,6 +1400,22 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static void dpm_clear_suppliers_direct_complete(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ spin_lock_irq(&link->supplier->power.lock);
+ link->supplier->power.direct_complete = false;
+ spin_unlock_irq(&link->supplier->power.lock);
+ }
+
+ device_links_read_unlock(idx);
+}
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
@@ -1358,7 +1432,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
TRACE_DEVICE(dev);
TRACE_SUSPEND(0);
- dpm_wait_for_children(dev, async);
+ dpm_wait_for_subordinate(dev, async);
if (async_error)
goto Complete;
@@ -1454,6 +1528,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
+ dpm_clear_suppliers_direct_complete(dev);
}
device_unlock(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index a84332aefc2d..a46e97e515c5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -144,6 +144,11 @@ extern void device_pm_move_after(struct device *, struct device *);
extern void device_pm_move_last(struct device *);
extern void device_pm_check_callbacks(struct device *dev);
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return dev->power.in_dpm_list;
+}
+
#else /* !CONFIG_PM_SLEEP */
static inline void device_pm_sleep_init(struct device *dev) {}
@@ -163,6 +168,11 @@ static inline void device_pm_move_last(struct device *dev) {}
static inline void device_pm_check_callbacks(struct device *dev) {}
+static inline bool device_pm_initialized(struct device *dev)
+{
+ return device_is_registered(dev);
+}
+
#endif /* !CONFIG_PM_SLEEP */
static inline void device_pm_init(struct device *dev)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 26856d050037..872eac4cb1df 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -12,6 +12,8 @@
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include <trace/events/rpm.h>
+
+#include "../base.h"
#include "power.h"
typedef int (*pm_callback_t)(struct device *);
@@ -259,6 +261,42 @@ static int rpm_check_suspend_allowed(struct device *dev)
return retval;
}
+static int rpm_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ int retval;
+
+ if (!(link->flags & DL_FLAG_PM_RUNTIME))
+ continue;
+
+ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+ link->rpm_active)
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+ if (retval < 0) {
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+ link->rpm_active = true;
+ }
+ return 0;
+}
+
+static void rpm_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->rpm_active &&
+ READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
+ pm_runtime_put(link->supplier);
+ link->rpm_active = false;
+ }
+}
+
/**
* __rpm_callback - Run a given runtime PM callback for a given device.
* @cb: Runtime PM callback to run.
@@ -267,19 +305,57 @@ static int rpm_check_suspend_allowed(struct device *dev)
static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
__releases(&dev->power.lock) __acquires(&dev->power.lock)
{
- int retval;
+ int retval, idx;
+ bool use_links = dev->power.links_count > 0;
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_unlock(&dev->power.lock);
- else
+ } else {
spin_unlock_irq(&dev->power.lock);
+ /*
+ * Resume suppliers if necessary.
+ *
+ * The device's runtime PM status cannot change until this
+ * routine returns, so it is safe to read the status outside of
+ * the lock.
+ */
+ if (use_links && dev->power.runtime_status == RPM_RESUMING) {
+ idx = device_links_read_lock();
+
+ retval = rpm_get_suppliers(dev);
+ if (retval)
+ goto fail;
+
+ device_links_read_unlock(idx);
+ }
+ }
+
retval = cb(dev);
- if (dev->power.irq_safe)
+ if (dev->power.irq_safe) {
spin_lock(&dev->power.lock);
- else
+ } else {
+ /*
+ * If the device is suspending and the callback has returned
+ * success, drop the usage counters of the suppliers that have
+ * been reference counted on its resume.
+ *
+ * Do that if resume fails too.
+ */
+ if (use_links
+ && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
+ || (dev->power.runtime_status == RPM_RESUMING && retval))) {
+ idx = device_links_read_lock();
+
+ fail:
+ rpm_put_suppliers(dev);
+
+ device_links_read_unlock(idx);
+ }
+
spin_lock_irq(&dev->power.lock);
+ }
return retval;
}
@@ -1458,6 +1534,94 @@ void pm_runtime_remove(struct device *dev)
}
/**
+ * pm_runtime_clean_up_links - Prepare links to consumers for driver removal.
+ * @dev: Device whose driver is going to be removed.
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+ * (once per link).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+ * Since the device is guaranteed to be runtime-active at the point this is
+ * called, nothing else needs to be done here.
+ *
+ * Moreover, this is called after device_links_busy() has returned 'false', so
+ * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and
+ * therefore rpm_active can't be manipulated concurrently.
+ */
+void pm_runtime_clean_up_links(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+ if (link->rpm_active) {
+ pm_runtime_put_noidle(dev);
+ link->rpm_active = false;
+ }
+ }
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_get_suppliers - Resume and reference-count supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_get_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_get_sync(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+/**
+ * pm_runtime_put_suppliers - Drop references to supplier devices.
+ * @dev: Consumer device.
+ */
+void pm_runtime_put_suppliers(struct device *dev)
+{
+ struct device_link *link;
+ int idx;
+
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_put(link->supplier);
+
+ device_links_read_unlock(idx);
+}
+
+void pm_runtime_new_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ dev->power.links_count++;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+void pm_runtime_drop_link(struct device *dev)
+{
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+ spin_unlock_irq(&dev->power.lock);
+}
+
+/**
* pm_runtime_force_suspend - Force a device into suspend state if needed.
* @dev: Device to suspend.
*
diff --git a/drivers/base/test/Kconfig b/drivers/base/test/Kconfig
new file mode 100644
index 000000000000..9aa0d45a60db
--- /dev/null
+++ b/drivers/base/test/Kconfig
@@ -0,0 +1,9 @@
+config TEST_ASYNC_DRIVER_PROBE
+ tristate "Build kernel module to test asynchronous driver probing"
+ depends on m
+ help
+ Enabling this option produces a kernel module that allows
+ testing asynchronous driver probing by the device core.
+ The module name will be test_async_driver_probe.ko
+
+ If unsure say N.
diff --git a/drivers/base/test/Makefile b/drivers/base/test/Makefile
new file mode 100644
index 000000000000..90477c5fd9f9
--- /dev/null
+++ b/drivers/base/test/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_TEST_ASYNC_DRIVER_PROBE) += test_async_driver_probe.o
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
new file mode 100644
index 000000000000..304d5c2bd5e9
--- /dev/null
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+#define TEST_PROBE_DELAY (5 * 1000) /* 5 sec */
+#define TEST_PROBE_THRESHOLD (TEST_PROBE_DELAY / 2)
+
+static int test_probe(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "sleeping for %d msecs in probe\n",
+ TEST_PROBE_DELAY);
+ msleep(TEST_PROBE_DELAY);
+ dev_info(&pdev->dev, "done sleeping\n");
+
+ return 0;
+}
+
+static struct platform_driver async_driver = {
+ .driver = {
+ .name = "test_async_driver",
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_driver sync_driver = {
+ .driver = {
+ .name = "test_sync_driver",
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+ },
+ .probe = test_probe,
+};
+
+static struct platform_device *async_dev_1, *async_dev_2;
+static struct platform_device *sync_dev_1;
+
+static int __init test_async_probe_init(void)
+{
+ ktime_t calltime, delta;
+ unsigned long long duration;
+ int error;
+
+ pr_info("registering first asynchronous device...\n");
+
+ async_dev_1 = platform_device_register_simple("test_async_driver", 1,
+ NULL, 0);
+ if (IS_ERR(async_dev_1)) {
+ error = PTR_ERR(async_dev_1);
+ pr_err("failed to create async_dev_1: %d", error);
+ return error;
+ }
+
+ pr_info("registering asynchronous driver...\n");
+ calltime = ktime_get();
+ error = platform_driver_register(&async_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_1;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_driver;
+ }
+
+ pr_info("registering second asynchronous device...\n");
+ calltime = ktime_get();
+ async_dev_2 = platform_device_register_simple("test_async_driver", 2,
+ NULL, 0);
+ if (IS_ERR(async_dev_2)) {
+ error = PTR_ERR(async_dev_2);
+ pr_err("failed to create async_dev_2: %d", error);
+ goto err_unregister_async_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration > TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe took too long\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous driver...\n");
+
+ error = platform_driver_register(&sync_driver);
+ if (error) {
+ pr_err("Failed to register async_driver: %d\n", error);
+ goto err_unregister_async_dev_2;
+ }
+
+ pr_info("registering synchronous device...\n");
+ calltime = ktime_get();
+ sync_dev_1 = platform_device_register_simple("test_sync_driver", 1,
+ NULL, 0);
+ if (IS_ERR(sync_dev_1)) {
+ error = PTR_ERR(sync_dev_1);
+ pr_err("failed to create sync_dev_1: %d", error);
+ goto err_unregister_sync_driver;
+ }
+
+ delta = ktime_sub(ktime_get(), calltime);
+ duration = (unsigned long long) ktime_to_ms(delta);
+ pr_info("registration took %lld msecs\n", duration);
+ if (duration < TEST_PROBE_THRESHOLD) {
+ pr_err("test failed: probe was too quick\n");
+ error = -ETIMEDOUT;
+ goto err_unregister_sync_dev_1;
+ }
+
+ pr_info("completed successfully");
+
+ return 0;
+
+err_unregister_sync_dev_1:
+ platform_device_unregister(sync_dev_1);
+
+err_unregister_sync_driver:
+ platform_driver_unregister(&sync_driver);
+
+err_unregister_async_dev_2:
+ platform_device_unregister(async_dev_2);
+
+err_unregister_async_driver:
+ platform_driver_unregister(&async_driver);
+
+err_unregister_async_dev_1:
+ platform_device_unregister(async_dev_1);
+
+ return error;
+}
+module_init(test_async_probe_init);
+
+static void __exit test_async_probe_exit(void)
+{
+ platform_driver_unregister(&async_driver);
+ platform_driver_unregister(&sync_driver);
+ platform_device_unregister(async_dev_1);
+ platform_device_unregister(async_dev_2);
+ platform_device_unregister(sync_dev_1);
+}
+module_exit(test_async_probe_exit);
+
+MODULE_DESCRIPTION("Test module for asynchronous driver probing");
+MODULE_AUTHOR("Dmitry Torokhov <dtor@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index a1982118f92f..ac9e108ce1ea 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -335,7 +335,7 @@ static int kernfs_xattr_set(const struct xattr_handler *handler,
return simple_xattr_set(&attrs->xattrs, name, value, size, flags);
}
-const struct xattr_handler kernfs_trusted_xattr_handler = {
+static const struct xattr_handler kernfs_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = kernfs_xattr_get,
.set = kernfs_xattr_set,
@@ -372,7 +372,7 @@ static int kernfs_security_xattr_set(const struct xattr_handler *handler,
return error;
}
-const struct xattr_handler kernfs_security_xattr_handler = {
+static const struct xattr_handler kernfs_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = kernfs_xattr_get,
.set = kernfs_security_xattr_set,
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index 2189935075b4..a951fd10aaaa 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
struct cacheinfo *info_list;
unsigned int num_levels;
unsigned int num_leaves;
+ bool cpu_map_populated;
};
/*
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index bf1907d96097..014cc564d1c4 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -63,6 +63,21 @@ debugfs_real_fops(const struct file *filp)
return filp->f_path.dentry->d_fsdata;
}
+#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
+static int __fops ## _open(struct inode *inode, struct file *file) \
+{ \
+ __simple_attr_check_format(__fmt, 0ull); \
+ return simple_attr_open(inode, file, __get, __set, __fmt); \
+} \
+static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops ## _open, \
+ .release = simple_attr_release, \
+ .read = debugfs_attr_read, \
+ .write = debugfs_attr_write, \
+ .llseek = generic_file_llseek, \
+}
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_create_file(const char *name, umode_t mode,
@@ -100,21 +115,6 @@ ssize_t debugfs_attr_read(struct file *file, char __user *buf,
ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
-static int __fops ## _open(struct inode *inode, struct file *file) \
-{ \
- __simple_attr_check_format(__fmt, 0ull); \
- return simple_attr_open(inode, file, __get, __set, __fmt); \
-} \
-static const struct file_operations __fops = { \
- .owner = THIS_MODULE, \
- .open = __fops ## _open, \
- .release = simple_attr_release, \
- .read = debugfs_attr_read, \
- .write = debugfs_attr_write, \
- .llseek = generic_file_llseek, \
-}
-
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
@@ -234,8 +234,18 @@ static inline void debugfs_use_file_finish(int srcu_idx)
__releases(&debugfs_srcu)
{ }
-#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
- static const struct file_operations __fops = { 0 }
+static inline ssize_t debugfs_attr_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
+
+static inline ssize_t debugfs_attr_write(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -ENODEV;
+}
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, char *new_name)
diff --git a/include/linux/device.h b/include/linux/device.h
index 94926d3ad6c6..491b4c0ca633 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -362,6 +362,7 @@ int subsys_virtual_register(struct bus_type *subsys,
* @name: Name of the class.
* @owner: The module owner.
* @class_attrs: Default attributes of this class.
+ * @class_groups: Default attributes of this class.
* @dev_groups: Default attributes of the devices that belong to the class.
* @dev_kobj: The kobject that represents this class and links it into the hierarchy.
* @dev_uevent: Called when a device is added, removed from this class, or a
@@ -390,6 +391,7 @@ struct class {
struct module *owner;
struct class_attribute *class_attrs;
+ const struct attribute_group **class_groups;
const struct attribute_group **dev_groups;
struct kobject *dev_kobj;
@@ -465,6 +467,8 @@ struct class_attribute {
struct class_attribute class_attr_##_name = __ATTR_RW(_name)
#define CLASS_ATTR_RO(_name) \
struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_WO(_name)
extern int __must_check class_create_file_ns(struct class *class,
const struct class_attribute *attr,
@@ -727,6 +731,87 @@ struct device_dma_parameters {
};
/**
+ * enum device_link_state - Device link states.
+ * @DL_STATE_NONE: The presence of the drivers is not being tracked.
+ * @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
+ * @DL_STATE_AVAILABLE: The supplier driver is present, but the consumer is not.
+ * @DL_STATE_CONSUMER_PROBE: The consumer is probing (supplier driver present).
+ * @DL_STATE_ACTIVE: Both the supplier and consumer drivers are present.
+ * @DL_STATE_SUPPLIER_UNBIND: The supplier driver is unbinding.
+ */
+enum device_link_state {
+ DL_STATE_NONE = -1,
+ DL_STATE_DORMANT = 0,
+ DL_STATE_AVAILABLE,
+ DL_STATE_CONSUMER_PROBE,
+ DL_STATE_ACTIVE,
+ DL_STATE_SUPPLIER_UNBIND,
+};
+
+/*
+ * Device link flags.
+ *
+ * STATELESS: The core won't track the presence of supplier/consumer drivers.
+ * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * PM_RUNTIME: If set, the runtime PM framework will use this link.
+ * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ */
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+
+/**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
+ * @s_node: Hook to the supplier device's list of links to consumers.
+ * @consumer: The device on the consumer end of the link.
+ * @c_node: Hook to the consumer device's list of links to suppliers.
+ * @status: The state of the link (with respect to the presence of drivers).
+ * @flags: Link flags.
+ * @rpm_active: Whether or not the consumer device is runtime-PM-active.
+ * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks.
+ */
+struct device_link {
+ struct device *supplier;
+ struct list_head s_node;
+ struct device *consumer;
+ struct list_head c_node;
+ enum device_link_state status;
+ u32 flags;
+ bool rpm_active;
+#ifdef CONFIG_SRCU
+ struct rcu_head rcu_head;
+#endif
+};
+
+/**
+ * enum dl_dev_state - Device driver presence tracking information.
+ * @DL_DEV_NO_DRIVER: There is no driver attached to the device.
+ * @DL_DEV_PROBING: A driver is probing.
+ * @DL_DEV_DRIVER_BOUND: The driver has been bound to the device.
+ * @DL_DEV_UNBINDING: The driver is unbinding from the device.
+ */
+enum dl_dev_state {
+ DL_DEV_NO_DRIVER = 0,
+ DL_DEV_PROBING,
+ DL_DEV_DRIVER_BOUND,
+ DL_DEV_UNBINDING,
+};
+
+/**
+ * struct dev_links_info - Device data related to device links.
+ * @suppliers: List of links to supplier devices.
+ * @consumers: List of links to consumer devices.
+ * @status: Driver status information.
+ */
+struct dev_links_info {
+ struct list_head suppliers;
+ struct list_head consumers;
+ enum dl_dev_state status;
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -751,6 +836,7 @@ struct device_dma_parameters {
* on. This shrinks the "Board Support Packages" (BSPs) and
* minimizes board-specific #ifdefs in drivers.
* @driver_data: Private pointer for driver specific info.
+ * @links: Links to suppliers and consumers of this device.
* @power: For device power management.
* See Documentation/power/admin-guide/devices.rst for details.
* @pm_domain: Provide callbacks that are executed during system suspend,
@@ -818,6 +904,7 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set/get_drvdata */
+ struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
@@ -1135,6 +1222,10 @@ extern void device_shutdown(void);
/* debugging and troubleshooting/diagnostic helpers. */
extern const char *dev_driver_string(const struct device *dev);
+/* Device links interface. */
+struct device_link *device_link_add(struct device *consumer,
+ struct device *supplier, u32 flags);
+void device_link_del(struct device_link *link);
#ifdef CONFIG_PRINTK
diff --git a/include/linux/pm.h b/include/linux/pm.h
index efa67b2dfee9..f926af41e122 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -559,6 +559,7 @@ struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int async_suspend:1;
+ bool in_dpm_list:1; /* Owned by the PM core */
bool is_prepared:1; /* Owned by the PM core */
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
@@ -596,6 +597,7 @@ struct dev_pm_info {
unsigned int use_autosuspend:1;
unsigned int timer_autosuspends:1;
unsigned int memalloc_noio:1;
+ unsigned int links_count;
enum rpm_request request;
enum rpm_status runtime_status;
int runtime_error;
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 4957fc185ea9..ca4823e675e2 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -55,6 +55,11 @@ extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
extern void pm_runtime_update_max_time_suspended(struct device *dev,
s64 delta_ns);
extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
+extern void pm_runtime_clean_up_links(struct device *dev);
+extern void pm_runtime_get_suppliers(struct device *dev);
+extern void pm_runtime_put_suppliers(struct device *dev);
+extern void pm_runtime_new_link(struct device *dev);
+extern void pm_runtime_drop_link(struct device *dev);
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
@@ -179,6 +184,11 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
struct device *dev) { return 0; }
static inline void pm_runtime_set_memalloc_noio(struct device *dev,
bool enable){}
+static inline void pm_runtime_clean_up_links(struct device *dev) {}
+static inline void pm_runtime_get_suppliers(struct device *dev) {}
+static inline void pm_runtime_put_suppliers(struct device *dev) {}
+static inline void pm_runtime_new_link(struct device *dev) {}
+static inline void pm_runtime_drop_link(struct device *dev) {}
#endif /* !CONFIG_PM */
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index f6c2c1e7779c..9a2b811966eb 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -56,7 +56,7 @@ static const char *kobject_actions[] = {
* kobject_action_type - translate action string to numeric type
*
* @buf: buffer containing the action string, newline is ignored
- * @len: length of buffer
+ * @count: length of buffer
* @type: pointer to the location to store the action type
*
* Returns 0 if the action string was recognized.
@@ -154,8 +154,8 @@ static void cleanup_uevent_env(struct subprocess_info *info)
/**
* kobject_uevent_env - send an uevent with environmental data
*
- * @action: action that is happening
* @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
* @envp_ext: pointer to environmental data
*
* Returns 0 if kobject_uevent_env() is completed with success or the
@@ -363,8 +363,8 @@ EXPORT_SYMBOL_GPL(kobject_uevent_env);
/**
* kobject_uevent - notify userspace by sending an uevent
*
- * @action: action that is happening
* @kobj: struct kobject that the action is happening to
+ * @action: action that is happening
*
* Returns 0 if kobject_uevent() is completed with success or the
* corresponding error when it fails.