summaryrefslogtreecommitdiff
path: root/fs/kernfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/kernfs/file.c')
-rw-r--r--fs/kernfs/file.c205
1 files changed, 134 insertions, 71 deletions
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e3abfa843879..b3ec34386b43 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -18,21 +18,8 @@
#include "kernfs-internal.h"
-/*
- * There's one kernfs_open_file for each open file and one kernfs_open_node
- * for each kernfs_node with one or more open files.
- *
- * kernfs_node->attr.open points to kernfs_open_node. attr.open is
- * protected by kernfs_open_node_lock.
- *
- * filp->private_data points to seq_file whose ->private points to
- * kernfs_open_file. kernfs_open_files are chained at
- * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
- */
-static DEFINE_SPINLOCK(kernfs_open_node_lock);
-static DEFINE_MUTEX(kernfs_open_file_mutex);
-
struct kernfs_open_node {
+ struct rcu_head rcu_head;
atomic_t event;
wait_queue_head_t poll;
struct list_head files; /* goes through kernfs_open_file.list */
@@ -51,6 +38,70 @@ struct kernfs_open_node {
static DEFINE_SPINLOCK(kernfs_notify_lock);
static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
+static inline struct mutex *kernfs_open_file_mutex_ptr(struct kernfs_node *kn)
+{
+ int idx = hash_ptr(kn, NR_KERNFS_LOCK_BITS);
+
+ return &kernfs_locks->open_file_mutex[idx];
+}
+
+static inline struct mutex *kernfs_open_file_mutex_lock(struct kernfs_node *kn)
+{
+ struct mutex *lock;
+
+ lock = kernfs_open_file_mutex_ptr(kn);
+
+ mutex_lock(lock);
+
+ return lock;
+}
+
+/**
+ * kernfs_deref_open_node - Get kernfs_open_node corresponding to @kn.
+ *
+ * @of: associated kernfs_open_file instance.
+ * @kn: target kernfs_node.
+ *
+ * Fetch and return ->attr.open of @kn if @of->list is non empty.
+ * If @of->list is not empty we can safely assume that @of is on
+ * @kn->attr.open->files list and this guarantees that @kn->attr.open
+ * will not vanish i.e. dereferencing outside RCU read-side critical
+ * section is safe here.
+ *
+ * The caller needs to make sure that @of->list is not empty.
+ */
+static struct kernfs_open_node *
+kernfs_deref_open_node(struct kernfs_open_file *of, struct kernfs_node *kn)
+{
+ struct kernfs_open_node *on;
+
+ on = rcu_dereference_check(kn->attr.open, !list_empty(&of->list));
+
+ return on;
+}
+
+/**
+ * kernfs_deref_open_node_protected - Get kernfs_open_node corresponding to @kn
+ *
+ * @kn: target kernfs_node.
+ *
+ * Fetch and return ->attr.open of @kn when caller holds the
+ * kernfs_open_file_mutex_ptr(kn).
+ *
+ * Update of ->attr.open happens under kernfs_open_file_mutex_ptr(kn). So when
+ * the caller guarantees that this mutex is being held, other updaters can't
+ * change ->attr.open and this means that we can safely deref ->attr.open
+ * outside RCU read-side critical section.
+ *
+ * The caller needs to make sure that kernfs_open_file_mutex is held.
+ */
+static struct kernfs_open_node *
+kernfs_deref_open_node_protected(struct kernfs_node *kn)
+{
+ return rcu_dereference_protected(kn->attr.open,
+ lockdep_is_held(kernfs_open_file_mutex_ptr(kn)));
+}
+
static struct kernfs_open_file *kernfs_of(struct file *file)
{
return ((struct seq_file *)file->private_data)->private;
@@ -156,8 +207,12 @@ static void kernfs_seq_stop(struct seq_file *sf, void *v)
static int kernfs_seq_show(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
+ struct kernfs_open_node *on = kernfs_deref_open_node(of, of->kn);
- of->event = atomic_read(&of->kn->attr.open->event);
+ if (!on)
+ return -EINVAL;
+
+ of->event = atomic_read(&on->event);
return of->kn->attr.ops->seq_show(sf, v);
}
@@ -180,6 +235,7 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
struct kernfs_open_file *of = kernfs_of(iocb->ki_filp);
ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE);
const struct kernfs_ops *ops;
+ struct kernfs_open_node *on;
char *buf;
buf = of->prealloc_buf;
@@ -201,7 +257,15 @@ static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
goto out_free;
}
- of->event = atomic_read(&of->kn->attr.open->event);
+ on = kernfs_deref_open_node(of, of->kn);
+ if (!on) {
+ len = -EINVAL;
+ mutex_unlock(&of->mutex);
+ goto out_free;
+ }
+
+ of->event = atomic_read(&on->event);
+
ops = kernfs_ops(of->kn);
if (ops->read)
len = ops->read(of, buf, len, iocb->ki_pos);
@@ -243,7 +307,7 @@ static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter)
* There is no easy way for us to know if userspace is only doing a partial
* write, so we don't support them. We expect the entire buffer to come on
* the first write. Hint: if you're writing a value, first read the file,
- * modify only the the value you're changing, then write entire buffer
+ * modify only the value you're changing, then write entire buffer
* back.
*/
static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter)
@@ -484,7 +548,6 @@ static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
* It is not possible to successfully wrap close.
* So error if someone is trying to use close.
*/
- rc = -EINVAL;
if (vma->vm_ops && vma->vm_ops->close)
goto out_put;
@@ -518,37 +581,31 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
struct kernfs_open_node *on, *new_on = NULL;
+ struct mutex *mutex = NULL;
- retry:
- mutex_lock(&kernfs_open_file_mutex);
- spin_lock_irq(&kernfs_open_node_lock);
-
- if (!kn->attr.open && new_on) {
- kn->attr.open = new_on;
- new_on = NULL;
- }
-
- on = kn->attr.open;
- if (on)
- list_add_tail(&of->list, &on->files);
-
- spin_unlock_irq(&kernfs_open_node_lock);
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
+ on = kernfs_deref_open_node_protected(kn);
if (on) {
- kfree(new_on);
+ list_add_tail(&of->list, &on->files);
+ mutex_unlock(mutex);
return 0;
+ } else {
+ /* not there, initialize a new one */
+ new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
+ if (!new_on) {
+ mutex_unlock(mutex);
+ return -ENOMEM;
+ }
+ atomic_set(&new_on->event, 1);
+ init_waitqueue_head(&new_on->poll);
+ INIT_LIST_HEAD(&new_on->files);
+ list_add_tail(&of->list, &new_on->files);
+ rcu_assign_pointer(kn->attr.open, new_on);
}
+ mutex_unlock(mutex);
- /* not there, initialize a new one and retry */
- new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
- if (!new_on)
- return -ENOMEM;
-
- atomic_set(&new_on->event, 1);
- init_waitqueue_head(&new_on->poll);
- INIT_LIST_HEAD(&new_on->files);
- goto retry;
+ return 0;
}
/**
@@ -567,24 +624,26 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
static void kernfs_unlink_open_file(struct kernfs_node *kn,
struct kernfs_open_file *of)
{
- struct kernfs_open_node *on = kn->attr.open;
- unsigned long flags;
+ struct kernfs_open_node *on;
+ struct mutex *mutex = NULL;
- mutex_lock(&kernfs_open_file_mutex);
- spin_lock_irqsave(&kernfs_open_node_lock, flags);
+ mutex = kernfs_open_file_mutex_lock(kn);
+
+ on = kernfs_deref_open_node_protected(kn);
+ if (!on) {
+ mutex_unlock(mutex);
+ return;
+ }
if (of)
list_del(&of->list);
- if (list_empty(&on->files))
- kn->attr.open = NULL;
- else
- on = NULL;
-
- spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
- mutex_unlock(&kernfs_open_file_mutex);
+ if (list_empty(&on->files)) {
+ rcu_assign_pointer(kn->attr.open, NULL);
+ kfree_rcu(on, rcu_head);
+ }
- kfree(on);
+ mutex_unlock(mutex);
}
static int kernfs_fop_open(struct inode *inode, struct file *file)
@@ -722,11 +781,11 @@ static void kernfs_release_file(struct kernfs_node *kn,
/*
* @of is guaranteed to have no other file operations in flight and
* we just want to synchronize release and drain paths.
- * @kernfs_open_file_mutex is enough. @of->mutex can't be used
+ * @kernfs_open_file_mutex_ptr(kn) is enough. @of->mutex can't be used
* here because drain path may be called from places which can
* cause circular dependency.
*/
- lockdep_assert_held(&kernfs_open_file_mutex);
+ lockdep_assert_held(kernfs_open_file_mutex_ptr(kn));
if (!of->released) {
/*
@@ -743,11 +802,12 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
{
struct kernfs_node *kn = inode->i_private;
struct kernfs_open_file *of = kernfs_of(filp);
+ struct mutex *mutex = NULL;
if (kn->flags & KERNFS_HAS_RELEASE) {
- mutex_lock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
kernfs_release_file(kn, of);
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}
kernfs_unlink_open_file(kn, of);
@@ -762,6 +822,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
{
struct kernfs_open_node *on;
struct kernfs_open_file *of;
+ struct mutex *mutex = NULL;
if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE)))
return;
@@ -771,20 +832,19 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* ->attr.open at this point of time. This check allows early bail out
* if ->attr.open is already NULL. kernfs_unlink_open_file makes
* ->attr.open NULL only while holding kernfs_open_file_mutex so below
- * check under kernfs_open_file_mutex will ensure bailing out if
+ * check under kernfs_open_file_mutex_ptr(kn) will ensure bailing out if
* ->attr.open became NULL while waiting for the mutex.
*/
- if (!kn->attr.open)
+ if (!rcu_access_pointer(kn->attr.open))
return;
- mutex_lock(&kernfs_open_file_mutex);
- if (!kn->attr.open) {
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex = kernfs_open_file_mutex_lock(kn);
+ on = kernfs_deref_open_node_protected(kn);
+ if (!on) {
+ mutex_unlock(mutex);
return;
}
- on = kn->attr.open;
-
list_for_each_entry(of, &on->files, list) {
struct inode *inode = file_inode(of->file);
@@ -795,7 +855,7 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
kernfs_release_file(kn, of);
}
- mutex_unlock(&kernfs_open_file_mutex);
+ mutex_unlock(mutex);
}
/*
@@ -815,7 +875,10 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
{
struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry);
- struct kernfs_open_node *on = kn->attr.open;
+ struct kernfs_open_node *on = kernfs_deref_open_node(of, kn);
+
+ if (!on)
+ return EPOLLERR;
poll_wait(of->file, &on->poll, wait);
@@ -922,13 +985,13 @@ void kernfs_notify(struct kernfs_node *kn)
return;
/* kick poll immediately */
- spin_lock_irqsave(&kernfs_open_node_lock, flags);
- on = kn->attr.open;
+ rcu_read_lock();
+ on = rcu_dereference(kn->attr.open);
if (on) {
atomic_inc(&on->event);
wake_up_interruptible(&on->poll);
}
- spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
+ rcu_read_unlock();
/* schedule work to kick fsnotify */
spin_lock_irqsave(&kernfs_notify_lock, flags);