Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 : * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 : * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 : * Copyright 2003 PathScale, Inc.
7 : */
8 :
9 : #include <linux/stddef.h>
10 : #include <linux/err.h>
11 : #include <linux/hardirq.h>
12 : #include <linux/mm.h>
13 : #include <linux/module.h>
14 : #include <linux/personality.h>
15 : #include <linux/proc_fs.h>
16 : #include <linux/ptrace.h>
17 : #include <linux/random.h>
18 : #include <linux/slab.h>
19 : #include <linux/sched.h>
20 : #include <linux/sched/debug.h>
21 : #include <linux/sched/task.h>
22 : #include <linux/sched/task_stack.h>
23 : #include <linux/seq_file.h>
24 : #include <linux/tick.h>
25 : #include <linux/threads.h>
26 : #include <linux/resume_user_mode.h>
27 : #include <asm/current.h>
28 : #include <asm/mmu_context.h>
29 : #include <linux/uaccess.h>
30 : #include <as-layout.h>
31 : #include <kern_util.h>
32 : #include <os.h>
33 : #include <skas.h>
34 : #include <registers.h>
35 : #include <linux/time-internal.h>
36 : #include <linux/elfcore.h>
37 :
38 : /*
39 : * This is a per-cpu array. A processor only modifies its entry and it only
40 : * cares about its entry, so it's OK if another processor is modifying its
41 : * entry.
42 : */
43 : struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
44 :
45 : static inline int external_pid(void)
46 : {
47 : /* FIXME: Need to look up userspace_pid by cpu */
48 2513 : return userspace_pid[0];
49 : }
50 :
51 0 : int pid_to_processor_id(int pid)
52 : {
53 : int i;
54 :
55 0 : for (i = 0; i < ncpus; i++) {
56 0 : if (cpu_tasks[i].pid == pid)
57 : return i;
58 : }
59 : return -1;
60 : }
61 :
62 0 : void free_stack(unsigned long stack, int order)
63 : {
64 0 : free_pages(stack, order);
65 0 : }
66 :
67 0 : unsigned long alloc_stack(int order, int atomic)
68 : {
69 : unsigned long page;
70 0 : gfp_t flags = GFP_KERNEL;
71 :
72 0 : if (atomic)
73 0 : flags = GFP_ATOMIC;
74 0 : page = __get_free_pages(flags, order);
75 :
76 0 : return page;
77 : }
78 :
79 : static inline void set_current(struct task_struct *task)
80 : {
81 5026 : cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
82 : { external_pid(), task });
83 : }
84 :
85 : extern void arch_switch_to(struct task_struct *to);
86 :
87 2513 : void *__switch_to(struct task_struct *from, struct task_struct *to)
88 : {
89 2513 : to->thread.prev_sched = from;
90 2513 : set_current(to);
91 :
92 2513 : switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
93 2131 : arch_switch_to(current);
94 :
95 2131 : return current->thread.prev_sched;
96 : }
97 :
98 0 : void interrupt_end(void)
99 : {
100 0 : struct pt_regs *regs = ¤t->thread.regs;
101 :
102 0 : if (need_resched())
103 0 : schedule();
104 0 : if (test_thread_flag(TIF_SIGPENDING) ||
105 0 : test_thread_flag(TIF_NOTIFY_SIGNAL))
106 0 : do_signal(regs);
107 0 : if (test_thread_flag(TIF_NOTIFY_RESUME))
108 0 : resume_user_mode_work(regs);
109 0 : }
110 :
111 0 : int get_current_pid(void)
112 : {
113 0 : return task_pid_nr(current);
114 : }
115 :
116 : /*
117 : * This is called magically, by its address being stuffed in a jmp_buf
118 : * and being longjmp-d to.
119 : */
120 383 : void new_thread_handler(void)
121 : {
122 : int (*fn)(void *), n;
123 : void *arg;
124 :
125 383 : if (current->thread.prev_sched != NULL)
126 382 : schedule_tail(current->thread.prev_sched);
127 383 : current->thread.prev_sched = NULL;
128 :
129 383 : fn = current->thread.request.u.thread.proc;
130 383 : arg = current->thread.request.u.thread.arg;
131 :
132 : /*
133 : * callback returns only if the kernel thread execs a process
134 : */
135 383 : n = fn(arg);
136 0 : userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
137 0 : }
138 :
139 : /* Called magically, see new_thread_handler above */
140 0 : void fork_handler(void)
141 : {
142 0 : force_flush_all();
143 :
144 0 : schedule_tail(current->thread.prev_sched);
145 :
146 : /*
147 : * XXX: if interrupt_end() calls schedule, this call to
148 : * arch_switch_to isn't needed. We could want to apply this to
149 : * improve performance. -bb
150 : */
151 0 : arch_switch_to(current);
152 :
153 0 : current->thread.prev_sched = NULL;
154 :
155 0 : userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
156 0 : }
157 :
158 382 : int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
159 : {
160 382 : unsigned long clone_flags = args->flags;
161 382 : unsigned long sp = args->stack;
162 382 : unsigned long tls = args->tls;
163 : void (*handler)(void);
164 382 : int ret = 0;
165 :
166 382 : p->thread = (struct thread_struct) INIT_THREAD;
167 :
168 382 : if (!args->fn) {
169 0 : memcpy(&p->thread.regs.regs, current_pt_regs(),
170 : sizeof(p->thread.regs.regs));
171 0 : PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
172 0 : if (sp != 0)
173 0 : REGS_SP(p->thread.regs.regs.gp) = sp;
174 :
175 0 : handler = fork_handler;
176 :
177 0 : arch_copy_thread(¤t->thread.arch, &p->thread.arch);
178 : } else {
179 382 : get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
180 382 : p->thread.request.u.thread.proc = args->fn;
181 382 : p->thread.request.u.thread.arg = args->fn_arg;
182 382 : handler = new_thread_handler;
183 : }
184 :
185 382 : new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
186 :
187 382 : if (!args->fn) {
188 0 : clear_flushed_tls(p);
189 :
190 : /*
191 : * Set a new TLS for the child thread?
192 : */
193 0 : if (clone_flags & CLONE_SETTLS)
194 0 : ret = arch_set_tls(p, tls);
195 : }
196 :
197 382 : return ret;
198 : }
199 :
200 1 : void initial_thread_cb(void (*proc)(void *), void *arg)
201 : {
202 1 : int save_kmalloc_ok = kmalloc_ok;
203 :
204 1 : kmalloc_ok = 0;
205 1 : initial_thread_cb_skas(proc, arg);
206 1 : kmalloc_ok = save_kmalloc_ok;
207 1 : }
208 :
209 0 : void um_idle_sleep(void)
210 : {
211 : if (time_travel_mode != TT_MODE_OFF)
212 : time_travel_sleep();
213 : else
214 0 : os_idle_sleep();
215 0 : }
216 :
217 0 : void arch_cpu_idle(void)
218 : {
219 0 : cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
220 : um_idle_sleep();
221 0 : }
222 :
223 0 : int __cant_sleep(void) {
224 0 : return in_atomic() || irqs_disabled() || in_interrupt();
225 : /* Is in_interrupt() really needed? */
226 : }
227 :
228 0 : int user_context(unsigned long sp)
229 : {
230 : unsigned long stack;
231 :
232 0 : stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
233 0 : return stack != (unsigned long) current_thread_info();
234 : }
235 :
236 : extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
237 :
238 1 : void do_uml_exitcalls(void)
239 : {
240 : exitcall_t *call;
241 :
242 1 : call = &__uml_exitcall_end;
243 7 : while (--call >= &__uml_exitcall_begin)
244 5 : (*call)();
245 1 : }
246 :
247 0 : char *uml_strdup(const char *string)
248 : {
249 0 : return kstrdup(string, GFP_KERNEL);
250 : }
251 : EXPORT_SYMBOL(uml_strdup);
252 :
253 0 : int copy_to_user_proc(void __user *to, void *from, int size)
254 : {
255 0 : return copy_to_user(to, from, size);
256 : }
257 :
258 0 : int copy_from_user_proc(void *to, void __user *from, int size)
259 : {
260 0 : return copy_from_user(to, from, size);
261 : }
262 :
263 0 : int clear_user_proc(void __user *buf, int size)
264 : {
265 0 : return clear_user(buf, size);
266 : }
267 :
268 : static atomic_t using_sysemu = ATOMIC_INIT(0);
269 : int sysemu_supported;
270 :
271 5 : void set_using_sysemu(int value)
272 : {
273 5 : if (value > sysemu_supported)
274 : return;
275 : atomic_set(&using_sysemu, value);
276 : }
277 :
278 0 : int get_using_sysemu(void)
279 : {
280 0 : return atomic_read(&using_sysemu);
281 : }
282 :
283 0 : static int sysemu_proc_show(struct seq_file *m, void *v)
284 : {
285 0 : seq_printf(m, "%d\n", get_using_sysemu());
286 0 : return 0;
287 : }
288 :
289 0 : static int sysemu_proc_open(struct inode *inode, struct file *file)
290 : {
291 0 : return single_open(file, sysemu_proc_show, NULL);
292 : }
293 :
294 0 : static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
295 : size_t count, loff_t *pos)
296 : {
297 : char tmp[2];
298 :
299 0 : if (copy_from_user(tmp, buf, 1))
300 : return -EFAULT;
301 :
302 0 : if (tmp[0] >= '0' && tmp[0] <= '2')
303 0 : set_using_sysemu(tmp[0] - '0');
304 : /* We use the first char, but pretend to write everything */
305 0 : return count;
306 : }
307 :
308 : static const struct proc_ops sysemu_proc_ops = {
309 : .proc_open = sysemu_proc_open,
310 : .proc_read = seq_read,
311 : .proc_lseek = seq_lseek,
312 : .proc_release = single_release,
313 : .proc_write = sysemu_proc_write,
314 : };
315 :
316 1 : int __init make_proc_sysemu(void)
317 : {
318 : struct proc_dir_entry *ent;
319 1 : if (!sysemu_supported)
320 : return 0;
321 :
322 1 : ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
323 :
324 1 : if (ent == NULL)
325 : {
326 0 : printk(KERN_WARNING "Failed to register /proc/sysemu\n");
327 0 : return 0;
328 : }
329 :
330 : return 0;
331 : }
332 :
333 : late_initcall(make_proc_sysemu);
334 :
335 0 : int singlestepping(void * t)
336 : {
337 0 : struct task_struct *task = t ? t : current;
338 :
339 0 : if (!test_thread_flag(TIF_SINGLESTEP))
340 : return 0;
341 :
342 0 : if (task->thread.singlestep_syscall)
343 : return 1;
344 :
345 0 : return 2;
346 : }
347 :
348 : /*
349 : * Only x86 and x86_64 have an arch_align_stack().
350 : * All other arches have "#define arch_align_stack(x) (x)"
351 : * in their asm/exec.h
352 : * As this is included in UML from asm-um/system-generic.h,
353 : * we can use it to behave as the subarch does.
354 : */
355 : #ifndef arch_align_stack
356 0 : unsigned long arch_align_stack(unsigned long sp)
357 : {
358 0 : if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
359 0 : sp -= get_random_u32_below(8192);
360 0 : return sp & ~0xf;
361 : }
362 : #endif
363 :
364 0 : unsigned long __get_wchan(struct task_struct *p)
365 : {
366 : unsigned long stack_page, sp, ip;
367 0 : bool seen_sched = 0;
368 :
369 0 : stack_page = (unsigned long) task_stack_page(p);
370 : /* Bail if the process has no kernel stack for some reason */
371 0 : if (stack_page == 0)
372 : return 0;
373 :
374 0 : sp = p->thread.switch_buf->JB_SP;
375 : /*
376 : * Bail if the stack pointer is below the bottom of the kernel
377 : * stack for some reason
378 : */
379 0 : if (sp < stack_page)
380 : return 0;
381 :
382 0 : while (sp < stack_page + THREAD_SIZE) {
383 0 : ip = *((unsigned long *) sp);
384 0 : if (in_sched_functions(ip))
385 : /* Ignore everything until we're above the scheduler */
386 : seen_sched = 1;
387 0 : else if (kernel_text_address(ip) && seen_sched)
388 : return ip;
389 :
390 0 : sp += sizeof(unsigned long);
391 : }
392 :
393 : return 0;
394 : }
395 :
396 0 : int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
397 : {
398 0 : int cpu = current_thread_info()->cpu;
399 :
400 0 : return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
401 : }
402 :
|