summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2018-09-20 08:35:34 -0700
committerMark Brown <broonie@kernel.org>2018-09-20 08:35:34 -0700
commitf4150b38acaa292e9efa1e0d82a3d8bdfdbec658 (patch)
tree672578a0f65a3315ad6e5354fecbf916fad108db /mm
parentff05cdd619ab422b5380f56d19603bc51aca97a5 (diff)
parentd9560919689d588beccf719452086b5cdf6d6c22 (diff)
Merge tag 'v4.4.157' into linux-linaro-lsk-v4.4
This is the 4.4.157 stable release
Diffstat (limited to 'mm')
-rw-r--r--mm/debug.c4
-rw-r--r--mm/fadvise.c8
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/vmacache.c38
4 files changed, 9 insertions, 43 deletions
diff --git a/mm/debug.c b/mm/debug.c
index 668aa35191ca..689b6e911cae 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -168,7 +168,7 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
+ pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %p\n"
#endif
@@ -198,7 +198,7 @@ void dump_mm(const struct mm_struct *mm)
#endif
"%s", /* This is here to hold the comma */
- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8a5bc66b0c0..001877e32f0c 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -68,8 +68,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
goto out;
}
- /* Careful about overflows. Len == 0 means "as much as possible" */
- endbyte = offset + len;
+ /*
+ * Careful about overflows. Len == 0 means "as much as possible". Use
+ * unsigned math because signed overflows are undefined and UBSan
+ * complains.
+ */
+ endbyte = (u64)offset + (u64)len;
if (!len || endbyte < len)
endbyte = -1;
else
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0127b788272f..c4ea57ee2fd1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1393,12 +1393,12 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
+ page_nid = -1;
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(ptl);
wait_on_page_locked(page);
put_page(page);
- page_nid = -1;
goto out;
}
diff --git a/mm/vmacache.c b/mm/vmacache.c
index fd09dc9c6812..9c8ff3d4eda9 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -6,44 +6,6 @@
#include <linux/vmacache.h>
/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
- struct task_struct *g, *p;
-
- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
- /*
- * Single threaded tasks need not iterate the entire
- * list of process. We can avoid the flushing as well
- * since the mm's seqnum was increased and don't have
- * to worry about other threads' seqnum. Current's
- * flush will occur upon the next lookup.
- */
- if (atomic_read(&mm->mm_users) == 1)
- return;
-
- rcu_read_lock();
- for_each_process_thread(g, p) {
- /*
- * Only flush the vmacache pointers as the
- * mm seqnum is already set and curr's will
- * be set upon invalidation when the next
- * lookup is done.
- */
- if (mm == p->mm)
- vmacache_flush(p);
- }
- rcu_read_unlock();
-}
-
-/*
* This task may be accessing a foreign mm via (for example)
* get_user_pages()->find_vma(). The vmacache is task-local and this
* task's vmacache pertains to a different mm (ie, its own). There is