summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 15:07:24 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-14 16:04:09 -0800
commitb1aa812b21084285e9f6098639be9cd5bf9e05d7 (patch)
tree4ae7e8f05e0d88f4a22bdd32b1b23722a779d4bc /mm
parent9118c0cbd44262d0015568266f314e645ed6b9ce (diff)
mm: move handling of COW faults into DAX code
Move final handling of COW faults from generic code into DAX fault handler. That way generic code doesn't have to be aware of peculiarities of DAX locking so remove that knowledge and make locking functions private to fs/dax.c. Link: http://lkml.kernel.org/r/1479460644-25076-11-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 22f7f6e38515..ca3b95fa5fd1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2845,7 +2845,7 @@ static int __do_fault(struct vm_fault *vmf)
ret = vma->vm_ops->fault(vma, vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
- VM_FAULT_DAX_LOCKED | VM_FAULT_DONE_COW)))
+ VM_FAULT_DONE_COW)))
return ret;
if (unlikely(PageHWPoison(vmf->page))) {
@@ -3276,17 +3276,12 @@ static int do_cow_fault(struct vm_fault *vmf)
if (ret & VM_FAULT_DONE_COW)
return ret;
- if (!(ret & VM_FAULT_DAX_LOCKED))
- copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
+ copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
__SetPageUptodate(vmf->cow_page);
ret |= finish_fault(vmf);
- if (!(ret & VM_FAULT_DAX_LOCKED)) {
- unlock_page(vmf->page);
- put_page(vmf->page);
- } else {
- dax_unlock_mapping_entry(vma->vm_file->f_mapping, vmf->pgoff);
- }
+ unlock_page(vmf->page);
+ put_page(vmf->page);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
return ret;