summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c56
1 files changed, 56 insertions, 0 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 1bceb49aa214..488dda209431 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1324,12 +1324,41 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
+ unsigned long sh_address;
+ bool pmd_sharing_possible = false;
+ unsigned long spmd_start, spmd_end;
enum ttu_flags flags = (enum ttu_flags)arg;
/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
goto out;
+ /*
+ * Only use the range_start/end mmu notifiers if huge pmd sharing
+ * is possible. In the normal case, mmu_notifier_invalidate_page
+ * is sufficient as we only unmap a page. However, if we unshare
+ * a pmd, we will unmap a PUD_SIZE range.
+ */
+ if (PageHuge(page)) {
+ spmd_start = address;
+ spmd_end = spmd_start + vma_mmu_pagesize(vma);
+
+ /*
+ * Check if pmd sharing is possible. If possible, we could
+ * unmap a PUD_SIZE range. spmd_start/spmd_end will be
+ * modified if sharing is possible.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &spmd_start,
+ &spmd_end);
+ if (spmd_end - spmd_start != vma_mmu_pagesize(vma)) {
+ sh_address = address;
+
+ pmd_sharing_possible = true;
+ mmu_notifier_invalidate_range_start(vma->vm_mm,
+ spmd_start, spmd_end);
+ }
+ }
+
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -1356,6 +1385,30 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
}
+ /*
+ * Call huge_pmd_unshare to potentially unshare a huge pmd. Pass
+ * sh_address as it will be modified if unsharing is successful.
+ */
+ if (PageHuge(page) && huge_pmd_unshare(mm, &sh_address, pte)) {
+ /*
+ * huge_pmd_unshare unmapped an entire PMD page. There is
+ * no way of knowing exactly which PMDs may be cached for
+ * this mm, so flush them all. spmd_start/spmd_end cover
+ * this PUD_SIZE range.
+ */
+ flush_cache_range(vma, spmd_start, spmd_end);
+ flush_tlb_range(vma, spmd_start, spmd_end);
+
+ /*
+ * The ref count of the PMD page was dropped which is part
+ * of the way map counting is done for shared PMDs. When
+ * there is no other sharing, huge_pmd_unshare returns false
+ * and we will unmap the actual page and drop map count
+ * to zero.
+ */
+ goto out_unmap;
+ }
+
/* Nuke the page table entry. */
flush_cache_page(vma, address, page_to_pfn(page));
if (should_defer_flush(mm, flags)) {
@@ -1450,6 +1503,9 @@ out_unmap:
if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
mmu_notifier_invalidate_page(mm, address);
out:
+ if (pmd_sharing_possible)
+ mmu_notifier_invalidate_range_end(vma->vm_mm,
+ spmd_start, spmd_end);
return ret;
}