Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0 2 : /* 3 : * linux/mm/msync.c 4 : * 5 : * Copyright (C) 1994-1999 Linus Torvalds 6 : */ 7 : 8 : /* 9 : * The msync() system call. 10 : */ 11 : #include <linux/fs.h> 12 : #include <linux/mm.h> 13 : #include <linux/mman.h> 14 : #include <linux/file.h> 15 : #include <linux/syscalls.h> 16 : #include <linux/sched.h> 17 : 18 : /* 19 : * MS_SYNC syncs the entire file - including mappings. 20 : * 21 : * MS_ASYNC does not start I/O (it used to, up to 2.5.67). 22 : * Nor does it marks the relevant pages dirty (it used to up to 2.6.17). 23 : * Now it doesn't do anything, since dirty pages are properly tracked. 24 : * 25 : * The application may now run fsync() to 26 : * write out the dirty pages and wait on the writeout and check the result. 27 : * Or the application may run fadvise(FADV_DONTNEED) against the fd to start 28 : * async writeout immediately. 29 : * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to 30 : * applications. 31 : */ 32 0 : SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) 33 : { 34 : unsigned long end; 35 0 : struct mm_struct *mm = current->mm; 36 : struct vm_area_struct *vma; 37 0 : int unmapped_error = 0; 38 0 : int error = -EINVAL; 39 : 40 0 : start = untagged_addr(start); 41 : 42 0 : if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC)) 43 : goto out; 44 0 : if (offset_in_page(start)) 45 : goto out; 46 0 : if ((flags & MS_ASYNC) && (flags & MS_SYNC)) 47 : goto out; 48 0 : error = -ENOMEM; 49 0 : len = (len + ~PAGE_MASK) & PAGE_MASK; 50 0 : end = start + len; 51 0 : if (end < start) 52 : goto out; 53 0 : error = 0; 54 0 : if (end == start) 55 : goto out; 56 : /* 57 : * If the interval [start,end) covers some unmapped address ranges, 58 : * just ignore them, but return -ENOMEM at the end. Besides, if the 59 : * flag is MS_ASYNC (w/o MS_INVALIDATE) the result would be -ENOMEM 60 : * anyway and there is nothing left to do, so return immediately. 61 : */ 62 0 : mmap_read_lock(mm); 63 0 : vma = find_vma(mm, start); 64 : for (;;) { 65 : struct file *file; 66 : loff_t fstart, fend; 67 : 68 : /* Still start < end. */ 69 0 : error = -ENOMEM; 70 0 : if (!vma) 71 : goto out_unlock; 72 : /* Here start < vma->vm_end. */ 73 0 : if (start < vma->vm_start) { 74 0 : if (flags == MS_ASYNC) 75 : goto out_unlock; 76 0 : start = vma->vm_start; 77 0 : if (start >= end) 78 : goto out_unlock; 79 : unmapped_error = -ENOMEM; 80 : } 81 : /* Here vma->vm_start <= start < vma->vm_end. */ 82 0 : if ((flags & MS_INVALIDATE) && 83 0 : (vma->vm_flags & VM_LOCKED)) { 84 : error = -EBUSY; 85 : goto out_unlock; 86 : } 87 0 : file = vma->vm_file; 88 0 : fstart = (start - vma->vm_start) + 89 0 : ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 90 0 : fend = fstart + (min(end, vma->vm_end) - start) - 1; 91 0 : start = vma->vm_end; 92 0 : if ((flags & MS_SYNC) && file && 93 0 : (vma->vm_flags & VM_SHARED)) { 94 0 : get_file(file); 95 0 : mmap_read_unlock(mm); 96 0 : error = vfs_fsync_range(file, fstart, fend, 1); 97 0 : fput(file); 98 0 : if (error || start >= end) 99 : goto out; 100 0 : mmap_read_lock(mm); 101 0 : vma = find_vma(mm, start); 102 : } else { 103 0 : if (start >= end) { 104 : error = 0; 105 : goto out_unlock; 106 : } 107 0 : vma = find_vma(mm, vma->vm_end); 108 : } 109 : } 110 : out_unlock: 111 : mmap_read_unlock(mm); 112 : out: 113 0 : return error ? : unmapped_error; 114 : }