Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * linux/mm/process_vm_access.c
4 : *
5 : * Copyright (C) 2010-2011 Christopher Yeoh <cyeoh@au1.ibm.com>, IBM Corp.
6 : */
7 :
8 : #include <linux/compat.h>
9 : #include <linux/mm.h>
10 : #include <linux/uio.h>
11 : #include <linux/sched.h>
12 : #include <linux/sched/mm.h>
13 : #include <linux/highmem.h>
14 : #include <linux/ptrace.h>
15 : #include <linux/slab.h>
16 : #include <linux/syscalls.h>
17 :
18 : /**
19 : * process_vm_rw_pages - read/write pages from task specified
20 : * @pages: array of pointers to pages we want to copy
21 : * @offset: offset in page to start copying from/to
22 : * @len: number of bytes to copy
23 : * @iter: where to copy to/from locally
24 : * @vm_write: 0 means copy from, 1 means copy to
25 : * Returns 0 on success, error code otherwise
26 : */
27 0 : static int process_vm_rw_pages(struct page **pages,
28 : unsigned offset,
29 : size_t len,
30 : struct iov_iter *iter,
31 : int vm_write)
32 : {
33 : /* Do the copy for each page */
34 0 : while (len && iov_iter_count(iter)) {
35 0 : struct page *page = *pages++;
36 0 : size_t copy = PAGE_SIZE - offset;
37 : size_t copied;
38 :
39 0 : if (copy > len)
40 0 : copy = len;
41 :
42 0 : if (vm_write)
43 0 : copied = copy_page_from_iter(page, offset, copy, iter);
44 : else
45 0 : copied = copy_page_to_iter(page, offset, copy, iter);
46 :
47 0 : len -= copied;
48 0 : if (copied < copy && iov_iter_count(iter))
49 : return -EFAULT;
50 : offset = 0;
51 : }
52 : return 0;
53 : }
54 :
55 : /* Maximum number of pages kmalloc'd to hold struct page's during copy */
56 : #define PVM_MAX_KMALLOC_PAGES (PAGE_SIZE * 2)
57 :
58 : /**
59 : * process_vm_rw_single_vec - read/write pages from task specified
60 : * @addr: start memory address of target process
61 : * @len: size of area to copy to/from
62 : * @iter: where to copy to/from locally
63 : * @process_pages: struct pages area that can store at least
64 : * nr_pages_to_copy struct page pointers
65 : * @mm: mm for task
66 : * @task: task to read/write from
67 : * @vm_write: 0 means copy from, 1 means copy to
68 : * Returns 0 on success or on failure error code
69 : */
70 0 : static int process_vm_rw_single_vec(unsigned long addr,
71 : unsigned long len,
72 : struct iov_iter *iter,
73 : struct page **process_pages,
74 : struct mm_struct *mm,
75 : struct task_struct *task,
76 : int vm_write)
77 : {
78 0 : unsigned long pa = addr & PAGE_MASK;
79 0 : unsigned long start_offset = addr - pa;
80 : unsigned long nr_pages;
81 0 : ssize_t rc = 0;
82 0 : unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
83 : / sizeof(struct pages *);
84 0 : unsigned int flags = 0;
85 :
86 : /* Work out address and page range required */
87 0 : if (len == 0)
88 : return 0;
89 0 : nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
90 :
91 0 : if (vm_write)
92 0 : flags |= FOLL_WRITE;
93 :
94 0 : while (!rc && nr_pages && iov_iter_count(iter)) {
95 0 : int pinned_pages = min(nr_pages, max_pages_per_loop);
96 0 : int locked = 1;
97 : size_t bytes;
98 :
99 : /*
100 : * Get the pages we're interested in. We must
101 : * access remotely because task/mm might not
102 : * current/current->mm
103 : */
104 0 : mmap_read_lock(mm);
105 0 : pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages,
106 : flags, process_pages,
107 : &locked);
108 0 : if (locked)
109 : mmap_read_unlock(mm);
110 0 : if (pinned_pages <= 0)
111 0 : return -EFAULT;
112 :
113 0 : bytes = pinned_pages * PAGE_SIZE - start_offset;
114 0 : if (bytes > len)
115 0 : bytes = len;
116 :
117 0 : rc = process_vm_rw_pages(process_pages,
118 : start_offset, bytes, iter,
119 : vm_write);
120 0 : len -= bytes;
121 0 : start_offset = 0;
122 0 : nr_pages -= pinned_pages;
123 0 : pa += pinned_pages * PAGE_SIZE;
124 :
125 : /* If vm_write is set, the pages need to be made dirty: */
126 0 : unpin_user_pages_dirty_lock(process_pages, pinned_pages,
127 : vm_write);
128 : }
129 :
130 0 : return rc;
131 : }
132 :
133 : /* Maximum number of entries for process pages array
134 : which lives on stack */
135 : #define PVM_MAX_PP_ARRAY_COUNT 16
136 :
137 : /**
138 : * process_vm_rw_core - core of reading/writing pages from task specified
139 : * @pid: PID of process to read/write from/to
140 : * @iter: where to copy to/from locally
141 : * @rvec: iovec array specifying where to copy to/from in the other process
142 : * @riovcnt: size of rvec array
143 : * @flags: currently unused
144 : * @vm_write: 0 if reading from other process, 1 if writing to other process
145 : *
146 : * Returns the number of bytes read/written or error code. May
147 : * return less bytes than expected if an error occurs during the copying
148 : * process.
149 : */
150 0 : static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
151 : const struct iovec *rvec,
152 : unsigned long riovcnt,
153 : unsigned long flags, int vm_write)
154 : {
155 : struct task_struct *task;
156 : struct page *pp_stack[PVM_MAX_PP_ARRAY_COUNT];
157 0 : struct page **process_pages = pp_stack;
158 : struct mm_struct *mm;
159 : unsigned long i;
160 0 : ssize_t rc = 0;
161 0 : unsigned long nr_pages = 0;
162 : unsigned long nr_pages_iov;
163 : ssize_t iov_len;
164 0 : size_t total_len = iov_iter_count(iter);
165 :
166 : /*
167 : * Work out how many pages of struct pages we're going to need
168 : * when eventually calling get_user_pages
169 : */
170 0 : for (i = 0; i < riovcnt; i++) {
171 0 : iov_len = rvec[i].iov_len;
172 0 : if (iov_len > 0) {
173 0 : nr_pages_iov = ((unsigned long)rvec[i].iov_base
174 0 : + iov_len)
175 0 : / PAGE_SIZE - (unsigned long)rvec[i].iov_base
176 0 : / PAGE_SIZE + 1;
177 0 : nr_pages = max(nr_pages, nr_pages_iov);
178 : }
179 : }
180 :
181 0 : if (nr_pages == 0)
182 : return 0;
183 :
184 0 : if (nr_pages > PVM_MAX_PP_ARRAY_COUNT) {
185 : /* For reliability don't try to kmalloc more than
186 : 2 pages worth */
187 0 : process_pages = kmalloc(min_t(size_t, PVM_MAX_KMALLOC_PAGES,
188 : sizeof(struct pages *)*nr_pages),
189 : GFP_KERNEL);
190 :
191 0 : if (!process_pages)
192 : return -ENOMEM;
193 : }
194 :
195 : /* Get process information */
196 0 : task = find_get_task_by_vpid(pid);
197 0 : if (!task) {
198 : rc = -ESRCH;
199 : goto free_proc_pages;
200 : }
201 :
202 0 : mm = mm_access(task, PTRACE_MODE_ATTACH_REALCREDS);
203 0 : if (!mm || IS_ERR(mm)) {
204 0 : rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
205 : /*
206 : * Explicitly map EACCES to EPERM as EPERM is a more
207 : * appropriate error code for process_vw_readv/writev
208 : */
209 0 : if (rc == -EACCES)
210 0 : rc = -EPERM;
211 : goto put_task_struct;
212 : }
213 :
214 0 : for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
215 0 : rc = process_vm_rw_single_vec(
216 0 : (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
217 : iter, process_pages, mm, task, vm_write);
218 :
219 : /* copied = space before - space after */
220 0 : total_len -= iov_iter_count(iter);
221 :
222 : /* If we have managed to copy any data at all then
223 : we return the number of bytes copied. Otherwise
224 : we return the error code */
225 0 : if (total_len)
226 0 : rc = total_len;
227 :
228 0 : mmput(mm);
229 :
230 : put_task_struct:
231 0 : put_task_struct(task);
232 :
233 : free_proc_pages:
234 0 : if (process_pages != pp_stack)
235 0 : kfree(process_pages);
236 : return rc;
237 : }
238 :
239 : /**
240 : * process_vm_rw - check iovecs before calling core routine
241 : * @pid: PID of process to read/write from/to
242 : * @lvec: iovec array specifying where to copy to/from locally
243 : * @liovcnt: size of lvec array
244 : * @rvec: iovec array specifying where to copy to/from in the other process
245 : * @riovcnt: size of rvec array
246 : * @flags: currently unused
247 : * @vm_write: 0 if reading from other process, 1 if writing to other process
248 : *
249 : * Returns the number of bytes read/written or error code. May
250 : * return less bytes than expected if an error occurs during the copying
251 : * process.
252 : */
253 0 : static ssize_t process_vm_rw(pid_t pid,
254 : const struct iovec __user *lvec,
255 : unsigned long liovcnt,
256 : const struct iovec __user *rvec,
257 : unsigned long riovcnt,
258 : unsigned long flags, int vm_write)
259 : {
260 : struct iovec iovstack_l[UIO_FASTIOV];
261 : struct iovec iovstack_r[UIO_FASTIOV];
262 0 : struct iovec *iov_l = iovstack_l;
263 : struct iovec *iov_r;
264 : struct iov_iter iter;
265 : ssize_t rc;
266 0 : int dir = vm_write ? ITER_SOURCE : ITER_DEST;
267 :
268 0 : if (flags != 0)
269 : return -EINVAL;
270 :
271 : /* Check iovecs */
272 0 : rc = import_iovec(dir, lvec, liovcnt, UIO_FASTIOV, &iov_l, &iter);
273 0 : if (rc < 0)
274 : return rc;
275 0 : if (!iov_iter_count(&iter))
276 : goto free_iov_l;
277 0 : iov_r = iovec_from_user(rvec, riovcnt, UIO_FASTIOV, iovstack_r,
278 : in_compat_syscall());
279 0 : if (IS_ERR(iov_r)) {
280 0 : rc = PTR_ERR(iov_r);
281 0 : goto free_iov_l;
282 : }
283 0 : rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
284 0 : if (iov_r != iovstack_r)
285 0 : kfree(iov_r);
286 : free_iov_l:
287 0 : kfree(iov_l);
288 0 : return rc;
289 : }
290 :
291 0 : SYSCALL_DEFINE6(process_vm_readv, pid_t, pid, const struct iovec __user *, lvec,
292 : unsigned long, liovcnt, const struct iovec __user *, rvec,
293 : unsigned long, riovcnt, unsigned long, flags)
294 : {
295 0 : return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 0);
296 : }
297 :
298 0 : SYSCALL_DEFINE6(process_vm_writev, pid_t, pid,
299 : const struct iovec __user *, lvec,
300 : unsigned long, liovcnt, const struct iovec __user *, rvec,
301 : unsigned long, riovcnt, unsigned long, flags)
302 : {
303 0 : return process_vm_rw(pid, lvec, liovcnt, rvec, riovcnt, flags, 1);
304 : }
|